Compare commits

..

1 Commits

Author SHA1 Message Date
z78078 74b9416174 nac3core: add fields initialization check for parent class 2022-08-17 17:44:17 +08:00
92 changed files with 6389 additions and 21835 deletions

View File

@ -1 +0,0 @@
doc-valid-idents = ["NumPy", ".."]

2
.gitignore vendored
View File

@ -1,3 +1,3 @@
__pycache__ __pycache__
/target /target
nix/windows/msys2 windows/msys2

826
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,6 @@ members = [
"nac3artiq", "nac3artiq",
"runkernel", "runkernel",
] ]
resolver = "2"
[profile.release] [profile.release]
debug = true debug = true

View File

@ -23,19 +23,21 @@ After setting up Nix as above, use ``nix shell git+https://github.com/m-labs/art
### Windows ### Windows
Install [MSYS2](https://www.msys2.org/), and open "MSYS2 CLANG64". Edit ``/etc/pacman.conf`` to add: Install [MSYS2](https://www.msys2.org/), and open "MSYS2 MinGW x64". Edit ``/etc/pacman.conf`` to add:
``` ```
[artiq] [artiq]
SigLevel = Optional TrustAll SigLevel = Optional TrustAll
Server = https://msys2.m-labs.hk/artiq-nac3 Server = https://lab.m-labs.hk/msys2
``` ```
Then run the following commands: Then run the following commands:
``` ```
pacman -Syu pacman -Syu
pacman -S mingw-w64-clang-x86_64-artiq pacman -S mingw-w64-x86_64-artiq
``` ```
Note: This build of NAC3 cannot be used with Anaconda Python nor the python.org binaries for Windows. Those Python versions are compiled with Visual Studio (MSVC) and their ABI is incompatible with the GNU ABI used in this build. We have no plans to support Visual Studio nor the MSVC ABI. If you need a MSVC build, please install the requisite bloated spyware from Microsoft and compile NAC3 yourself.
## For developers ## For developers
This repository contains: This repository contains:

View File

@ -2,16 +2,16 @@
"nodes": { "nodes": {
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1708296515, "lastModified": 1659689094,
"narHash": "sha256-FyF489fYNAUy7b6dkYV6rGPyzp+4tThhr80KNAaF/yY=", "narHash": "sha256-cXrWxpPYpV1PeEhtpQf9W++8aCgwzxpx2PzfszPofJE=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "b98a4e1746acceb92c509bc496ef3d0e5ad8d4aa", "rev": "697fc6ae98d077f6448cada3ecd63465c48c6af5",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "nixos-unstable", "ref": "master",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }

View File

@ -1,7 +1,7 @@
{ {
description = "The third-generation ARTIQ compiler"; description = "The third-generation ARTIQ compiler";
inputs.nixpkgs.url = github:NixOS/nixpkgs/nixos-unstable; inputs.nixpkgs.url = github:NixOS/nixpkgs/master;
outputs = { self, nixpkgs }: outputs = { self, nixpkgs }:
let let
@ -9,12 +9,6 @@
in rec { in rec {
packages.x86_64-linux = rec { packages.x86_64-linux = rec {
llvm-nac3 = pkgs.callPackage ./nix/llvm {}; llvm-nac3 = pkgs.callPackage ./nix/llvm {};
llvm-tools-irrt = pkgs.runCommandNoCC "llvm-tools-irrt" {}
''
mkdir -p $out/bin
ln -s ${pkgs.llvmPackages_14.clang-unwrapped}/bin/clang $out/bin/clang-irrt
ln -s ${pkgs.llvmPackages_14.llvm.out}/bin/llvm-as $out/bin/llvm-as-irrt
'';
nac3artiq = pkgs.python3Packages.toPythonModule ( nac3artiq = pkgs.python3Packages.toPythonModule (
pkgs.rustPlatform.buildRustPackage rec { pkgs.rustPlatform.buildRustPackage rec {
name = "nac3artiq"; name = "nac3artiq";
@ -22,11 +16,14 @@
src = self; src = self;
cargoLock = { cargoLock = {
lockFile = ./Cargo.lock; lockFile = ./Cargo.lock;
outputHashes = {
"inkwell-0.1.0" = "sha256-+ih3SO0n6YmZ/mcf+rLDwPAy/1MEZ/A+tI4pM1pUhvU=";
};
}; };
passthru.cargoLock = cargoLock; passthru.cargoLock = cargoLock;
nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang llvm-tools-irrt pkgs.llvmPackages_14.llvm.out llvm-nac3 ]; nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang-unwrapped pkgs.llvmPackages_14.llvm.out llvm-nac3 ];
buildInputs = [ pkgs.python3 llvm-nac3 ]; buildInputs = [ pkgs.python3 llvm-nac3 ];
checkInputs = [ (pkgs.python3.withPackages(ps: [ ps.numpy ps.scipy ])) ]; checkInputs = [ (pkgs.python3.withPackages(ps: [ ps.numpy ])) ];
checkPhase = checkPhase =
'' ''
echo "Checking nac3standalone demos..." echo "Checking nac3standalone demos..."
@ -66,7 +63,7 @@
name = "nac3artiq-instrumented"; name = "nac3artiq-instrumented";
src = self; src = self;
inherit (nac3artiq) cargoLock; inherit (nac3artiq) cargoLock;
nativeBuildInputs = [ pkgs.python3 packages.x86_64-linux.llvm-tools-irrt llvm-nac3-instrumented ]; nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang-unwrapped pkgs.llvmPackages_14.llvm.out llvm-nac3-instrumented ];
buildInputs = [ pkgs.python3 llvm-nac3-instrumented ]; buildInputs = [ pkgs.python3 llvm-nac3-instrumented ];
cargoBuildFlags = [ "--package" "nac3artiq" "--features" "init-llvm-profile" ]; cargoBuildFlags = [ "--package" "nac3artiq" "--features" "init-llvm-profile" ];
doCheck = false; doCheck = false;
@ -94,12 +91,12 @@
(pkgs.fetchFromGitHub { (pkgs.fetchFromGitHub {
owner = "m-labs"; owner = "m-labs";
repo = "artiq"; repo = "artiq";
rev = "923ca3377d42c815f979983134ec549dc39d3ca0"; rev = "dd57fdc530baf926a5f354dc1c2bd90564affd96";
sha256 = "sha256-oJoEeNEeNFSUyh6jXG8Tzp6qHVikeHS0CzfE+mODPgw="; sha256 = "sha256-hcqVcToYWkc3oDFkKr9wZUF65ydiSYVHdmiGiu2Mc1c=";
}) })
]; ];
buildInputs = [ buildInputs = [
(python3-mimalloc.withPackages(ps: [ ps.numpy ps.scipy ps.jsonschema ps.lmdb nac3artiq-instrumented ])) (python3-mimalloc.withPackages(ps: [ ps.numpy ps.jsonschema nac3artiq-instrumented ]))
pkgs.llvmPackages_14.llvm.out pkgs.llvmPackages_14.llvm.out
]; ];
phases = [ "buildPhase" "installPhase" ]; phases = [ "buildPhase" "installPhase" ];
@ -128,7 +125,7 @@
name = "nac3artiq-pgo"; name = "nac3artiq-pgo";
src = self; src = self;
inherit (nac3artiq) cargoLock; inherit (nac3artiq) cargoLock;
nativeBuildInputs = [ pkgs.python3 packages.x86_64-linux.llvm-tools-irrt llvm-nac3-pgo ]; nativeBuildInputs = [ pkgs.python3 pkgs.llvmPackages_14.clang-unwrapped pkgs.llvmPackages_14.llvm.out llvm-nac3-pgo ];
buildInputs = [ pkgs.python3 llvm-nac3-pgo ]; buildInputs = [ pkgs.python3 llvm-nac3-pgo ];
cargoBuildFlags = [ "--package" "nac3artiq" ]; cargoBuildFlags = [ "--package" "nac3artiq" ];
cargoTestFlags = [ "--package" "nac3ast" "--package" "nac3parser" "--package" "nac3core" "--package" "nac3artiq" ]; cargoTestFlags = [ "--package" "nac3ast" "--package" "nac3parser" "--package" "nac3core" "--package" "nac3artiq" ];
@ -144,18 +141,18 @@
packages.x86_64-w64-mingw32 = import ./nix/windows { inherit pkgs; }; packages.x86_64-w64-mingw32 = import ./nix/windows { inherit pkgs; };
devShells.x86_64-linux.default = pkgs.mkShell { devShell.x86_64-linux = pkgs.mkShell {
name = "nac3-dev-shell"; name = "nac3-dev-shell";
buildInputs = with pkgs; [ buildInputs = with pkgs; [
# build dependencies # build dependencies
packages.x86_64-linux.llvm-nac3 packages.x86_64-linux.llvm-nac3
llvmPackages_14.clang # demo llvmPackages_14.clang-unwrapped # IRRT
packages.x86_64-linux.llvm-tools-irrt pkgs.llvmPackages_14.llvm.out # IRRT
cargo cargo
rustc rustc
# runtime dependencies # runtime dependencies
lld_14 # for running kernels on the host lld_14 # for running kernels on the host
(packages.x86_64-linux.python3-mimalloc.withPackages(ps: [ ps.numpy ps.scipy ])) (packages.x86_64-linux.python3-mimalloc.withPackages(ps: [ ps.numpy ]))
# development tools # development tools
cargo-insta cargo-insta
clippy clippy

View File

@ -2,23 +2,22 @@
name = "nac3artiq" name = "nac3artiq"
version = "0.1.0" version = "0.1.0"
authors = ["M-Labs"] authors = ["M-Labs"]
edition = "2021" edition = "2018"
[lib] [lib]
name = "nac3artiq" name = "nac3artiq"
crate-type = ["cdylib"] crate-type = ["cdylib"]
[dependencies] [dependencies]
itertools = "0.12" pyo3 = { version = "0.16", features = ["extension-module"] }
pyo3 = { version = "0.20", features = ["extension-module"] }
parking_lot = "0.12" parking_lot = "0.12"
tempfile = "3.10" tempfile = "3"
nac3parser = { path = "../nac3parser" } nac3parser = { path = "../nac3parser" }
nac3core = { path = "../nac3core" } nac3core = { path = "../nac3core" }
nac3ld = { path = "../nac3ld" } nac3ld = { path = "../nac3ld" }
[dependencies.inkwell] [dependencies.inkwell]
version = "0.4" git = "https://github.com/TheDan64/inkwell.git"
default-features = false default-features = false
features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"] features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]

View File

@ -18,13 +18,6 @@ class EmbeddingMap:
"SPIError", "SPIError",
"0:ZeroDivisionError", "0:ZeroDivisionError",
"0:IndexError", "0:IndexError",
"0:ValueError",
"0:RuntimeError",
"0:AssertionError",
"0:KeyError",
"0:NotImplementedError",
"0:OverflowError",
"0:IOError",
"0:UnwrapNoneError"]) "0:UnwrapNoneError"])
def preallocate_runtime_exception_names(self, names): def preallocate_runtime_exception_names(self, names):

View File

@ -10,7 +10,7 @@ from embedding_map import EmbeddingMap
__all__ = [ __all__ = [
"Kernel", "KernelInvariant", "virtual", "ConstGeneric", "Kernel", "KernelInvariant", "virtual",
"Option", "Some", "none", "UnwrapNoneError", "Option", "Some", "none", "UnwrapNoneError",
"round64", "floor64", "ceil64", "round64", "floor64", "ceil64",
"extern", "kernel", "portable", "nac3", "extern", "kernel", "portable", "nac3",
@ -67,12 +67,6 @@ def Some(v: T) -> Option[T]:
none = Option(None) none = Option(None)
class _ConstGenericMarker:
pass
def ConstGeneric(name, constraint):
return TypeVar(name, _ConstGenericMarker, constraint)
def round64(x): def round64(x):
return round(x) return round(x)
@ -86,13 +80,7 @@ def ceil64(x):
import device_db import device_db
core_arguments = device_db.device_db["core"]["arguments"] core_arguments = device_db.device_db["core"]["arguments"]
artiq_builtins = { compiler = nac3artiq.NAC3(core_arguments["target"])
"none": none,
"virtual": virtual,
"_ConstGenericMarker": _ConstGenericMarker,
"Option": Option,
}
compiler = nac3artiq.NAC3(core_arguments["target"], artiq_builtins)
allow_registration = True allow_registration = True
# Delay NAC3 analysis until all referenced variables are supposed to exist on the CPython side. # Delay NAC3 analysis until all referenced variables are supposed to exist on the CPython side.
registered_functions = set() registered_functions = set()

View File

@ -1,23 +1,18 @@
use nac3core::{ use nac3core::{
codegen::{ codegen::{
expr::gen_call, expr::gen_call,
llvm_intrinsics::{call_int_smax, call_stackrestore, call_stacksave},
stmt::{gen_block, gen_with}, stmt::{gen_block, gen_with},
CodeGenContext, CodeGenerator, CodeGenContext, CodeGenerator,
}, },
symbol_resolver::ValueEnum, symbol_resolver::ValueEnum,
toplevel::{DefinitionId, GenCall, helper::PRIMITIVE_DEF_IDS}, toplevel::{DefinitionId, GenCall},
typecheck::typedef::{FunSignature, FuncArg, Type, TypeEnum, VarMap} typecheck::typedef::{FunSignature, FuncArg, Type, TypeEnum}
}; };
use nac3parser::ast::{Expr, ExprKind, Located, Stmt, StmtKind, StrRef}; use nac3parser::ast::{Expr, ExprKind, Located, Stmt, StmtKind, StrRef};
use inkwell::{ use inkwell::{
context::Context, context::Context, module::Linkage, types::IntType, values::BasicValueEnum, AddressSpace,
module::Linkage,
types::IntType,
values::BasicValueEnum,
AddressSpace,
}; };
use pyo3::{PyObject, PyResult, Python, types::{PyDict, PyList}}; use pyo3::{PyObject, PyResult, Python, types::{PyDict, PyList}};
@ -31,45 +26,13 @@ use std::{
sync::Arc, sync::Arc,
}; };
/// The parallelism mode within a block.
#[derive(Copy, Clone, Eq, PartialEq)]
enum ParallelMode {
/// No parallelism is currently registered for this context.
None,
/// Legacy (or shallow) parallelism. Default before NAC3.
///
/// Each statement within the `with` block is treated as statements to be executed in parallel.
Legacy,
/// Deep parallelism. Default since NAC3.
///
/// Each function call within the `with` block (except those within a nested `sequential` block)
/// are treated to be executed in parallel.
Deep
}
pub struct ArtiqCodeGenerator<'a> { pub struct ArtiqCodeGenerator<'a> {
name: String, name: String,
/// The size of a `size_t` variable in bits.
size_t: u32, size_t: u32,
/// Monotonic counter for naming `start`/`stop` variables used by `with parallel` blocks.
name_counter: u32, name_counter: u32,
/// Variable for tracking the start of a `with parallel` block.
start: Option<Expr<Option<Type>>>, start: Option<Expr<Option<Type>>>,
/// Variable for tracking the end of a `with parallel` block.
end: Option<Expr<Option<Type>>>, end: Option<Expr<Option<Type>>>,
timeline: &'a (dyn TimeFns + Sync), timeline: &'a (dyn TimeFns + Sync),
/// The [ParallelMode] of the current parallel context.
///
/// The current parallel context refers to the nearest `with parallel` or `with legacy_parallel`
/// statement, which is used to determine when and how the timeline should be updated.
parallel_mode: ParallelMode,
} }
impl<'a> ArtiqCodeGenerator<'a> { impl<'a> ArtiqCodeGenerator<'a> {
@ -79,75 +42,7 @@ impl<'a> ArtiqCodeGenerator<'a> {
timeline: &'a (dyn TimeFns + Sync), timeline: &'a (dyn TimeFns + Sync),
) -> ArtiqCodeGenerator<'a> { ) -> ArtiqCodeGenerator<'a> {
assert!(size_t == 32 || size_t == 64); assert!(size_t == 32 || size_t == 64);
ArtiqCodeGenerator { ArtiqCodeGenerator { name, size_t, name_counter: 0, start: None, end: None, timeline }
name,
size_t,
name_counter: 0,
start: None,
end: None,
timeline,
parallel_mode: ParallelMode::None,
}
}
/// If the generator is currently in a direct-`parallel` block context, emits IR that resets the
/// position of the timeline to the initial timeline position before entering the `parallel`
/// block.
///
/// Direct-`parallel` block context refers to when the generator is generating statements whose
/// closest parent `with` statement is a `with parallel` block.
fn timeline_reset_start(
&mut self,
ctx: &mut CodeGenContext<'_, '_>
) -> Result<(), String> {
if let Some(start) = self.start.clone() {
let start_val = self.gen_expr(ctx, &start)?
.unwrap()
.to_basic_value_enum(ctx, self, start.custom.unwrap())?;
self.timeline.emit_at_mu(ctx, start_val);
}
Ok(())
}
/// If the generator is currently in a `parallel` block context, emits IR that updates the
/// maximum end position of the `parallel` block as specified by the timeline `end` value.
///
/// In general the `end` parameter should be set to `self.end` for updating the maximum end
/// position for the current `parallel` block. Other values can be passed in to update the
/// maximum end position for other `parallel` blocks.
///
/// `parallel`-block context refers to when the generator is generating statements within a
/// (possibly indirect) `parallel` block.
///
/// * `store_name` - The LLVM value name for the pointer to `end`. `.addr` will be appended to
/// the end of the provided value name.
fn timeline_update_end_max(
&mut self,
ctx: &mut CodeGenContext<'_, '_>,
end: Option<Expr<Option<Type>>>,
store_name: Option<&str>,
) -> Result<(), String> {
if let Some(end) = end {
let old_end = self.gen_expr(ctx, &end)?
.unwrap()
.to_basic_value_enum(ctx, self, end.custom.unwrap())?;
let now = self.timeline.emit_now_mu(ctx);
let max = call_int_smax(
ctx,
old_end.into_int_value(),
now.into_int_value(),
Some("smax")
);
let end_store = self.gen_store_target(
ctx,
&end,
store_name.map(|name| format!("{name}.addr")).as_deref())?
.unwrap();
ctx.builder.build_store(end_store, max).unwrap();
}
Ok(())
} }
} }
@ -164,201 +59,183 @@ impl<'b> CodeGenerator for ArtiqCodeGenerator<'b> {
} }
} }
fn gen_block<'ctx, 'a, 'c, I: Iterator<Item=&'c Stmt<Option<Type>>>>( fn gen_call<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, 'a>, ctx: &mut CodeGenContext<'ctx, 'a>,
stmts: I
) -> Result<(), String> where Self: Sized {
// Legacy parallel emits timeline end-update/timeline-reset after each top-level statement
// in the parallel block
if self.parallel_mode == ParallelMode::Legacy {
for stmt in stmts {
self.gen_stmt(ctx, stmt)?;
if ctx.is_terminated() {
break;
}
self.timeline_update_end_max(ctx, self.end.clone(), Some("end"))?;
self.timeline_reset_start(ctx)?;
}
Ok(())
} else {
gen_block(self, ctx, stmts)
}
}
fn gen_call<'ctx>(
&mut self,
ctx: &mut CodeGenContext<'ctx, '_>,
obj: Option<(Type, ValueEnum<'ctx>)>, obj: Option<(Type, ValueEnum<'ctx>)>,
fun: (&FunSignature, DefinitionId), fun: (&FunSignature, DefinitionId),
params: Vec<(Option<StrRef>, ValueEnum<'ctx>)>, params: Vec<(Option<StrRef>, ValueEnum<'ctx>)>,
) -> Result<Option<BasicValueEnum<'ctx>>, String> { ) -> Result<Option<BasicValueEnum<'ctx>>, String> {
let result = gen_call(self, ctx, obj, fun, params)?; let result = gen_call(self, ctx, obj, fun, params)?;
if let Some(end) = self.end.clone() {
// Deep parallel emits timeline end-update/timeline-reset after each function call let old_end = self.gen_expr(ctx, &end)?.unwrap().to_basic_value_enum(ctx, self, end.custom.unwrap())?;
if self.parallel_mode == ParallelMode::Deep { let now = self.timeline.emit_now_mu(ctx);
self.timeline_update_end_max(ctx, self.end.clone(), Some("end"))?; let smax = ctx.module.get_function("llvm.smax.i64").unwrap_or_else(|| {
self.timeline_reset_start(ctx)?; let i64 = ctx.ctx.i64_type();
ctx.module.add_function(
"llvm.smax.i64",
i64.fn_type(&[i64.into(), i64.into()], false),
None,
)
});
let max = ctx
.builder
.build_call(smax, &[old_end.into(), now.into()], "smax")
.try_as_basic_value()
.left()
.unwrap();
let end_store = self.gen_store_target(ctx, &end)?;
ctx.builder.build_store(end_store, max);
}
if let Some(start) = self.start.clone() {
let start_val = self.gen_expr(ctx, &start)?.unwrap().to_basic_value_enum(ctx, self, start.custom.unwrap())?;
self.timeline.emit_at_mu(ctx, start_val);
} }
Ok(result) Ok(result)
} }
fn gen_with( fn gen_with<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
stmt: &Stmt<Option<Type>>, stmt: &Stmt<Option<Type>>,
) -> Result<(), String> { ) -> Result<(), String> {
let StmtKind::With { items, body, .. } = &stmt.node else { if let StmtKind::With { items, body, .. } = &stmt.node {
unreachable!() if items.len() == 1 && items[0].optional_vars.is_none() {
}; let item = &items[0];
// Behavior of parallel and sequential:
if items.len() == 1 && items[0].optional_vars.is_none() { // Each function call (indirectly, can be inside a sequential block) within a parallel
let item = &items[0]; // block will update the end variable to the maximum now_mu in the block.
// Each function call directly inside a parallel block will reset the timeline after
// Behavior of parallel and sequential: // execution. A parallel block within a sequential block (or not within any block) will
// Each function call (indirectly, can be inside a sequential block) within a parallel // set the timeline to the max now_mu within the block (and the outer max now_mu will also
// block will update the end variable to the maximum now_mu in the block. // be updated).
// Each function call directly inside a parallel block will reset the timeline after //
// execution. A parallel block within a sequential block (or not within any block) will // Implementation: We track the start and end separately.
// set the timeline to the max now_mu within the block (and the outer max now_mu will also // - If there is a start variable, it indicates that we are directly inside a
// be updated). // parallel block and we have to reset the timeline after every function call.
// // - If there is a end variable, it indicates that we are (indirectly) inside a
// Implementation: We track the start and end separately. // parallel block, and we should update the max end value.
// - If there is a start variable, it indicates that we are directly inside a if let ExprKind::Name { id, ctx: name_ctx } = &item.context_expr.node {
// parallel block and we have to reset the timeline after every function call. if id == &"parallel".into() {
// - If there is a end variable, it indicates that we are (indirectly) inside a let old_start = self.start.take();
// parallel block, and we should update the max end value. let old_end = self.end.take();
if let ExprKind::Name { id, ctx: name_ctx } = &item.context_expr.node { let now = if let Some(old_start) = &old_start {
if id == &"parallel".into() || id == &"legacy_parallel".into() { self.gen_expr(ctx, old_start)?.unwrap().to_basic_value_enum(ctx, self, old_start.custom.unwrap())?
let old_start = self.start.take(); } else {
let old_end = self.end.take(); self.timeline.emit_now_mu(ctx)
let old_parallel_mode = self.parallel_mode; };
// Emulate variable allocation, as we need to use the CodeGenContext
let now = if let Some(old_start) = &old_start { // HashMap to store our variable due to lifetime limitation
self.gen_expr(ctx, old_start)? // Note: we should be able to store variables directly if generic
// associative type is used by limiting the lifetime of CodeGenerator to
// the LLVM Context.
// The name is guaranteed to be unique as users cannot use this as variable
// name.
self.start = old_start.clone().map_or_else(
|| {
let start = format!("with-{}-start", self.name_counter).into();
let start_expr = Located {
// location does not matter at this point
location: stmt.location,
node: ExprKind::Name { id: start, ctx: name_ctx.clone() },
custom: Some(ctx.primitives.int64),
};
let start = self.gen_store_target(ctx, &start_expr)?;
ctx.builder.build_store(start, now);
Ok(Some(start_expr)) as Result<_, String>
},
|v| Ok(Some(v)),
)?;
let end = format!("with-{}-end", self.name_counter).into();
let end_expr = Located {
// location does not matter at this point
location: stmt.location,
node: ExprKind::Name { id: end, ctx: name_ctx.clone() },
custom: Some(ctx.primitives.int64),
};
let end = self.gen_store_target(ctx, &end_expr)?;
ctx.builder.build_store(end, now);
self.end = Some(end_expr);
self.name_counter += 1;
gen_block(self, ctx, body.iter())?;
let current = ctx.builder.get_insert_block().unwrap();
// if the current block is terminated, move before the terminator
// we want to set the timeline before reaching the terminator
// TODO: This may be unsound if there are multiple exit paths in the
// block... e.g.
// if ...:
// return
// Perhaps we can fix this by using actual with block?
let reset_position = if let Some(terminator) = current.get_terminator() {
ctx.builder.position_before(&terminator);
true
} else {
false
};
// set duration
let end_expr = self.end.take().unwrap();
let end_val = self
.gen_expr(ctx, &end_expr)?
.unwrap() .unwrap()
.to_basic_value_enum(ctx, self, old_start.custom.unwrap())? .to_basic_value_enum(ctx, self, end_expr.custom.unwrap())?;
} else {
self.timeline.emit_now_mu(ctx)
};
// Emulate variable allocation, as we need to use the CodeGenContext // inside a sequential block
// HashMap to store our variable due to lifetime limitation if old_start.is_none() {
// Note: we should be able to store variables directly if generic self.timeline.emit_at_mu(ctx, end_val);
// associative type is used by limiting the lifetime of CodeGenerator to }
// the LLVM Context. // inside a parallel block, should update the outer max now_mu
// The name is guaranteed to be unique as users cannot use this as variable if let Some(old_end) = &old_end {
// name. let outer_end_val = self
self.start = old_start.clone().map_or_else( .gen_expr(ctx, old_end)?
|| { .unwrap()
let start = format!("with-{}-start", self.name_counter).into(); .to_basic_value_enum(ctx, self, old_end.custom.unwrap())?;
let start_expr = Located { let smax =
// location does not matter at this point ctx.module.get_function("llvm.smax.i64").unwrap_or_else(|| {
location: stmt.location, let i64 = ctx.ctx.i64_type();
node: ExprKind::Name { id: start, ctx: name_ctx.clone() }, ctx.module.add_function(
custom: Some(ctx.primitives.int64), "llvm.smax.i64",
}; i64.fn_type(&[i64.into(), i64.into()], false),
let start = self None,
.gen_store_target(ctx, &start_expr, Some("start.addr"))? )
});
let max = ctx
.builder
.build_call(smax, &[end_val.into(), outer_end_val.into()], "smax")
.try_as_basic_value()
.left()
.unwrap(); .unwrap();
ctx.builder.build_store(start, now).unwrap(); let outer_end = self.gen_store_target(ctx, old_end)?;
Ok(Some(start_expr)) as Result<_, String> ctx.builder.build_store(outer_end, max);
}, }
|v| Ok(Some(v)), self.start = old_start;
)?; self.end = old_end;
let end = format!("with-{}-end", self.name_counter).into(); if reset_position {
let end_expr = Located { ctx.builder.position_at_end(current);
// location does not matter at this point }
location: stmt.location, return Ok(());
node: ExprKind::Name { id: end, ctx: name_ctx.clone() }, } else if id == &"sequential".into() {
custom: Some(ctx.primitives.int64), let start = self.start.take();
}; for stmt in body.iter() {
let end = self self.gen_stmt(ctx, stmt)?;
.gen_store_target(ctx, &end_expr, Some("end.addr"))? if ctx.is_terminated() {
.unwrap(); break;
ctx.builder.build_store(end, now).unwrap(); }
self.end = Some(end_expr); }
self.name_counter += 1; self.start = start;
self.parallel_mode = match id.to_string().as_str() { return Ok(());
"parallel" => ParallelMode::Deep,
"legacy_parallel" => ParallelMode::Legacy,
_ => unreachable!(),
};
self.gen_block(ctx, body.iter())?;
let current = ctx.builder.get_insert_block().unwrap();
// if the current block is terminated, move before the terminator
// we want to set the timeline before reaching the terminator
// TODO: This may be unsound if there are multiple exit paths in the
// block... e.g.
// if ...:
// return
// Perhaps we can fix this by using actual with block?
let reset_position = if let Some(terminator) = current.get_terminator() {
ctx.builder.position_before(&terminator);
true
} else {
false
};
// set duration
let end_expr = self.end.take().unwrap();
let end_val = self
.gen_expr(ctx, &end_expr)?
.unwrap()
.to_basic_value_enum(ctx, self, end_expr.custom.unwrap())?;
// inside a sequential block
if old_start.is_none() {
self.timeline.emit_at_mu(ctx, end_val);
} }
// inside a parallel block, should update the outer max now_mu
self.timeline_update_end_max(ctx, old_end.clone(), Some("outer.end"))?;
self.parallel_mode = old_parallel_mode;
self.end = old_end;
self.start = old_start;
if reset_position {
ctx.builder.position_at_end(current);
}
return Ok(());
} else if id == &"sequential".into() {
// For deep parallel, temporarily take away start to avoid function calls in
// the block from resetting the timeline.
// This does not affect legacy parallel, as the timeline will be reset after
// this block finishes execution.
let start = self.start.take();
self.gen_block(ctx, body.iter())?;
self.start = start;
// Reset the timeline when we are exiting the sequential block
// Legacy parallel does not need this, since it will be reset after codegen
// for this statement is completed
if self.parallel_mode == ParallelMode::Deep {
self.timeline_reset_start(ctx)?;
}
return Ok(());
} }
} }
// not parallel/sequential
gen_with(self, ctx, stmt)
} else {
unreachable!()
} }
// not parallel/sequential
gen_with(self, ctx, stmt)
} }
} }
fn gen_rpc_tag( fn gen_rpc_tag<'ctx, 'a>(
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
ty: Type, ty: Type,
buffer: &mut Vec<u8>, buffer: &mut Vec<u8>,
) -> Result<(), String> { ) -> Result<(), String> {
@ -403,14 +280,14 @@ fn gen_rpc_tag(
Ok(()) Ok(())
} }
fn rpc_codegen_callback_fn<'ctx>( fn rpc_codegen_callback_fn<'ctx, 'a>(
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
obj: Option<(Type, ValueEnum<'ctx>)>, obj: Option<(Type, ValueEnum<'ctx>)>,
fun: (&FunSignature, DefinitionId), fun: (&FunSignature, DefinitionId),
args: Vec<(Option<StrRef>, ValueEnum<'ctx>)>, args: Vec<(Option<StrRef>, ValueEnum<'ctx>)>,
generator: &mut dyn CodeGenerator, generator: &mut dyn CodeGenerator,
) -> Result<Option<BasicValueEnum<'ctx>>, String> { ) -> Result<Option<BasicValueEnum<'ctx>>, String> {
let ptr_type = ctx.ctx.i8_type().ptr_type(AddressSpace::default()); let ptr_type = ctx.ctx.i8_type().ptr_type(inkwell::AddressSpace::Generic);
let size_type = generator.get_size_type(ctx.ctx); let size_type = generator.get_size_type(ctx.ctx);
let int8 = ctx.ctx.i8_type(); let int8 = ctx.ctx.i8_type();
let int32 = ctx.ctx.i32_type(); let int32 = ctx.ctx.i32_type();
@ -422,7 +299,7 @@ fn rpc_codegen_callback_fn<'ctx>(
if obj.is_some() { if obj.is_some() {
tag.push(b'O'); tag.push(b'O');
} }
for arg in &fun.0.args { for arg in fun.0.args.iter() {
gen_rpc_tag(ctx, arg.ty, &mut tag)?; gen_rpc_tag(ctx, arg.ty, &mut tag)?;
} }
tag.push(b':'); tag.push(b':');
@ -458,25 +335,34 @@ fn rpc_codegen_callback_fn<'ctx>(
}) })
.as_pointer_value(); .as_pointer_value();
let arg_length = args.len() + usize::from(obj.is_some()); let arg_length = args.len() + if obj.is_some() { 1 } else { 0 };
let stackptr = call_stacksave(ctx, Some("rpc.stack")); let stacksave = ctx.module.get_function("llvm.stacksave").unwrap_or_else(|| {
let args_ptr = ctx.builder ctx.module.add_function("llvm.stacksave", ptr_type.fn_type(&[], false), None)
.build_array_alloca( });
ptr_type, let stackrestore = ctx.module.get_function("llvm.stackrestore").unwrap_or_else(|| {
ctx.ctx.i32_type().const_int(arg_length as u64, false), ctx.module.add_function(
"argptr", "llvm.stackrestore",
ctx.ctx.void_type().fn_type(&[ptr_type.into()], false),
None,
) )
.unwrap(); });
let stackptr = ctx.builder.build_call(stacksave, &[], "rpc.stack");
let args_ptr = ctx.builder.build_array_alloca(
ptr_type,
ctx.ctx.i32_type().const_int(arg_length as u64, false),
"argptr",
);
// -- rpc args handling // -- rpc args handling
let mut keys = fun.0.args.clone(); let mut keys = fun.0.args.clone();
let mut mapping = HashMap::new(); let mut mapping = HashMap::new();
for (key, value) in args { for (key, value) in args.into_iter() {
mapping.insert(key.unwrap_or_else(|| keys.remove(0).name), value); mapping.insert(key.unwrap_or_else(|| keys.remove(0).name), value);
} }
// default value handling // default value handling
for k in keys { for k in keys.into_iter() {
mapping.insert( mapping.insert(
k.name, k.name,
ctx.gen_symbol_val(generator, &k.default_value.unwrap(), k.ty).into() ctx.gen_symbol_val(generator, &k.default_value.unwrap(), k.ty).into()
@ -499,17 +385,17 @@ fn rpc_codegen_callback_fn<'ctx>(
} }
for (i, arg) in real_params.iter().enumerate() { for (i, arg) in real_params.iter().enumerate() {
let arg_slot = generator.gen_var_alloc(ctx, arg.get_type(), Some(&format!("rpc.arg{i}"))).unwrap(); let arg_slot = ctx.builder.build_alloca(arg.get_type(), &format!("rpc.arg{}", i));
ctx.builder.build_store(arg_slot, *arg).unwrap(); ctx.builder.build_store(arg_slot, *arg);
let arg_slot = ctx.builder.build_bitcast(arg_slot, ptr_type, "rpc.arg").unwrap(); let arg_slot = ctx.builder.build_bitcast(arg_slot, ptr_type, "rpc.arg");
let arg_ptr = unsafe { let arg_ptr = unsafe {
ctx.builder.build_gep( ctx.builder.build_gep(
args_ptr, args_ptr,
&[int32.const_int(i as u64, false)], &[int32.const_int(i as u64, false)],
&format!("rpc.arg{i}"), &format!("rpc.arg{}", i),
) )
}.unwrap(); };
ctx.builder.build_store(arg_ptr, arg_slot).unwrap(); ctx.builder.build_store(arg_ptr, arg_slot);
} }
// call // call
@ -519,24 +405,26 @@ fn rpc_codegen_callback_fn<'ctx>(
ctx.ctx.void_type().fn_type( ctx.ctx.void_type().fn_type(
&[ &[
int32.into(), int32.into(),
tag_ptr_type.ptr_type(AddressSpace::default()).into(), tag_ptr_type.ptr_type(AddressSpace::Generic).into(),
ptr_type.ptr_type(AddressSpace::default()).into(), ptr_type.ptr_type(AddressSpace::Generic).into(),
], ],
false, false,
), ),
None, None,
) )
}); });
ctx.builder ctx.builder.build_call(
.build_call( rpc_send,
rpc_send, &[service_id.into(), tag_ptr.into(), args_ptr.into()],
&[service_id.into(), tag_ptr.into(), args_ptr.into()], "rpc.send",
"rpc.send", );
)
.unwrap();
// reclaim stack space used by arguments // reclaim stack space used by arguments
call_stackrestore(ctx, stackptr); ctx.builder.build_call(
stackrestore,
&[stackptr.try_as_basic_value().unwrap_left().into()],
"rpc.stackrestore",
);
// -- receive value: // -- receive value:
// T result = { // T result = {
@ -562,60 +450,62 @@ fn rpc_codegen_callback_fn<'ctx>(
let alloc_bb = ctx.ctx.append_basic_block(current_function, "rpc.continue"); let alloc_bb = ctx.ctx.append_basic_block(current_function, "rpc.continue");
let tail_bb = ctx.ctx.append_basic_block(current_function, "rpc.tail"); let tail_bb = ctx.ctx.append_basic_block(current_function, "rpc.tail");
let ret_ty = ctx.get_llvm_abi_type(generator, fun.0.ret); let ret_ty = ctx.get_llvm_type(generator, fun.0.ret);
let need_load = !ret_ty.is_pointer_type(); let need_load = !ret_ty.is_pointer_type();
let slot = ctx.builder.build_alloca(ret_ty, "rpc.ret.slot").unwrap(); let slot = ctx.builder.build_alloca(ret_ty, "rpc.ret.slot");
let slotgen = ctx.builder.build_bitcast(slot, ptr_type, "rpc.ret.ptr").unwrap(); let slotgen = ctx.builder.build_bitcast(slot, ptr_type, "rpc.ret.ptr");
ctx.builder.build_unconditional_branch(head_bb).unwrap(); ctx.builder.build_unconditional_branch(head_bb);
ctx.builder.position_at_end(head_bb); ctx.builder.position_at_end(head_bb);
let phi = ctx.builder.build_phi(ptr_type, "rpc.ptr").unwrap(); let phi = ctx.builder.build_phi(ptr_type, "rpc.ptr");
phi.add_incoming(&[(&slotgen, prehead_bb)]); phi.add_incoming(&[(&slotgen, prehead_bb)]);
let alloc_size = ctx let alloc_size = ctx
.build_call_or_invoke(rpc_recv, &[phi.as_basic_value()], "rpc.size.next") .build_call_or_invoke(rpc_recv, &[phi.as_basic_value()], "rpc.size.next")
.unwrap() .unwrap()
.into_int_value(); .into_int_value();
let is_done = ctx.builder let is_done = ctx.builder.build_int_compare(
.build_int_compare( inkwell::IntPredicate::EQ,
inkwell::IntPredicate::EQ, int32.const_zero(),
int32.const_zero(), alloc_size,
alloc_size, "rpc.done",
"rpc.done", );
)
.unwrap();
ctx.builder.build_conditional_branch(is_done, tail_bb, alloc_bb).unwrap(); ctx.builder.build_conditional_branch(is_done, tail_bb, alloc_bb);
ctx.builder.position_at_end(alloc_bb); ctx.builder.position_at_end(alloc_bb);
let alloc_ptr = ctx.builder.build_array_alloca(ptr_type, alloc_size, "rpc.alloc").unwrap(); let alloc_ptr = ctx.builder.build_array_alloca(ptr_type, alloc_size, "rpc.alloc");
let alloc_ptr = ctx.builder.build_bitcast(alloc_ptr, ptr_type, "rpc.alloc.ptr").unwrap(); let alloc_ptr = ctx.builder.build_bitcast(alloc_ptr, ptr_type, "rpc.alloc.ptr");
phi.add_incoming(&[(&alloc_ptr, alloc_bb)]); phi.add_incoming(&[(&alloc_ptr, alloc_bb)]);
ctx.builder.build_unconditional_branch(head_bb).unwrap(); ctx.builder.build_unconditional_branch(head_bb);
ctx.builder.position_at_end(tail_bb); ctx.builder.position_at_end(tail_bb);
let result = ctx.builder.build_load(slot, "rpc.result").unwrap(); let result = ctx.builder.build_load(slot, "rpc.result");
if need_load { if need_load {
call_stackrestore(ctx, stackptr); ctx.builder.build_call(
stackrestore,
&[stackptr.try_as_basic_value().unwrap_left().into()],
"rpc.stackrestore",
);
} }
Ok(Some(result)) Ok(Some(result))
} }
pub fn attributes_writeback( pub fn attributes_writeback<'ctx, 'a>(
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
generator: &mut dyn CodeGenerator, generator: &mut dyn CodeGenerator,
inner_resolver: &InnerResolver, inner_resolver: &InnerResolver,
host_attributes: &PyObject, host_attributes: PyObject,
) -> Result<(), String> { ) -> Result<(), String> {
Python::with_gil(|py| -> PyResult<Result<(), String>> { Python::with_gil(|py| -> PyResult<Result<(), String>> {
let host_attributes: &PyList = host_attributes.downcast(py)?; let host_attributes = host_attributes.cast_as::<PyList>(py)?;
let top_levels = ctx.top_level.definitions.read(); let top_levels = ctx.top_level.definitions.read();
let globals = inner_resolver.global_value_ids.read(); let globals = inner_resolver.global_value_ids.read();
let int32 = ctx.ctx.i32_type(); let int32 = ctx.ctx.i32_type();
let zero = int32.const_zero(); let zero = int32.const_zero();
let mut values = Vec::new(); let mut values = Vec::new();
let mut scratch_buffer = Vec::new(); let mut scratch_buffer = Vec::new();
for val in (*globals).values() { for (_, val) in globals.iter() {
let val = val.as_ref(py); let val = val.as_ref(py);
let ty = inner_resolver.get_obj_type(py, val, &mut ctx.unifier, &top_levels, &ctx.primitives)?; let ty = inner_resolver.get_obj_type(py, val, &mut ctx.unifier, &top_levels, &ctx.primitives)?;
if let Err(ty) = ty { if let Err(ty) = ty {
@ -624,13 +514,13 @@ pub fn attributes_writeback(
let ty = ty.unwrap(); let ty = ty.unwrap();
match &*ctx.unifier.get_ty(ty) { match &*ctx.unifier.get_ty(ty) {
TypeEnum::TObj { fields, obj_id, .. } TypeEnum::TObj { fields, obj_id, .. }
if *obj_id != ctx.primitives.option.obj_id(&ctx.unifier).unwrap() => if *obj_id != ctx.primitives.option.get_obj_id(&ctx.unifier) =>
{ {
// we only care about primitive attributes // we only care about primitive attributes
// for non-primitive attributes, they should be in another global // for non-primitive attributes, they should be in another global
let mut attributes = Vec::new(); let mut attributes = Vec::new();
let obj = inner_resolver.get_obj_value(py, val, ctx, generator, ty)?.unwrap(); let obj = inner_resolver.get_obj_value(py, val, ctx, generator, ty)?.unwrap();
for (name, (field_ty, is_mutable)) in fields { for (name, (field_ty, is_mutable)) in fields.iter() {
if !is_mutable { if !is_mutable {
continue continue
} }
@ -639,7 +529,7 @@ pub fn attributes_writeback(
let index = ctx.get_attr_index(ty, *name); let index = ctx.get_attr_index(ty, *name);
values.push((*field_ty, ctx.build_gep_and_load( values.push((*field_ty, ctx.build_gep_and_load(
obj.into_pointer_value(), obj.into_pointer_value(),
&[zero, int32.const_int(index as u64, false)], None))); &[zero, int32.const_int(index as u64, false)])));
} }
} }
if !attributes.is_empty() { if !attributes.is_empty() {
@ -667,10 +557,10 @@ pub fn attributes_writeback(
default_value: None default_value: None
}).collect(), }).collect(),
ret: ctx.primitives.none, ret: ctx.primitives.none,
vars: VarMap::default() vars: Default::default()
}; };
let args: Vec<_> = values.into_iter().map(|(_, val)| (None, ValueEnum::Dynamic(val))).collect(); let args: Vec<_> = values.into_iter().map(|(_, val)| (None, ValueEnum::Dynamic(val))).collect();
if let Err(e) = rpc_codegen_callback_fn(ctx, None, (&fun, PRIMITIVE_DEF_IDS.int32), args, generator) { if let Err(e) = rpc_codegen_callback_fn(ctx, None, (&fun, DefinitionId(0)), args, generator) {
return Ok(Err(e)); return Ok(Err(e));
} }
Ok(Ok(())) Ok(Ok(()))

View File

@ -8,18 +8,16 @@ use std::sync::Arc;
use inkwell::{ use inkwell::{
memory_buffer::MemoryBuffer, memory_buffer::MemoryBuffer,
module::{Linkage, Module}, module::{Linkage, Module},
passes::PassBuilderOptions, passes::{PassManager, PassManagerBuilder},
support::is_multithreaded,
targets::*, targets::*,
OptimizationLevel, OptimizationLevel,
}; };
use itertools::Itertools; use nac3core::codegen::gen_func_impl;
use nac3core::codegen::{CodeGenLLVMOptions, CodeGenTargetMachineOptions, gen_func_impl};
use nac3core::toplevel::builtins::get_exn_constructor; use nac3core::toplevel::builtins::get_exn_constructor;
use nac3core::typecheck::typedef::{TypeEnum, Unifier, VarMap}; use nac3core::typecheck::typedef::{TypeEnum, Unifier};
use nac3parser::{ use nac3parser::{
ast::{ExprKind, Stmt, StmtKind, StrRef}, ast::{self, ExprKind, Stmt, StmtKind, StrRef},
parser::parse_program, parser::{self, parse_program},
}; };
use pyo3::prelude::*; use pyo3::prelude::*;
use pyo3::{exceptions, types::PyBytes, types::PyDict, types::PySet}; use pyo3::{exceptions, types::PyBytes, types::PyDict, types::PySet};
@ -63,17 +61,6 @@ enum Isa {
CortexA9, CortexA9,
} }
impl Isa {
/// Returns the number of bits in `size_t` for the [`Isa`].
fn get_size_type(self) -> u32 {
if self == Isa::Host {
64u32
} else {
32u32
}
}
}
#[derive(Clone)] #[derive(Clone)]
pub struct PrimitivePythonId { pub struct PrimitivePythonId {
int: u64, int: u64,
@ -85,10 +72,8 @@ pub struct PrimitivePythonId {
float64: u64, float64: u64,
bool: u64, bool: u64,
list: u64, list: u64,
ndarray: u64,
tuple: u64, tuple: u64,
typevar: u64, typevar: u64,
const_generic_marker: u64,
none: u64, none: u64,
exception: u64, exception: u64,
generic_alias: (u64, u64), generic_alias: (u64, u64),
@ -112,9 +97,7 @@ struct Nac3 {
top_levels: Vec<TopLevelComponent>, top_levels: Vec<TopLevelComponent>,
string_store: Arc<RwLock<HashMap<String, i32>>>, string_store: Arc<RwLock<HashMap<String, i32>>>,
exception_ids: Arc<RwLock<HashMap<usize, usize>>>, exception_ids: Arc<RwLock<HashMap<usize, usize>>>,
deferred_eval_store: DeferredEvaluationStore, deferred_eval_store: DeferredEvaluationStore
/// LLVM-related options for code generation.
llvm_options: CodeGenLLVMOptions,
} }
create_exception!(nac3artiq, CompileError, exceptions::PyException); create_exception!(nac3artiq, CompileError, exceptions::PyException);
@ -122,7 +105,7 @@ create_exception!(nac3artiq, CompileError, exceptions::PyException);
impl Nac3 { impl Nac3 {
fn register_module( fn register_module(
&mut self, &mut self,
module: &PyObject, module: PyObject,
registered_class_ids: &HashSet<u64>, registered_class_ids: &HashSet<u64>,
) -> PyResult<()> { ) -> PyResult<()> {
let (module_name, source_file) = Python::with_gil(|py| -> PyResult<(String, String)> { let (module_name, source_file) = Python::with_gil(|py| -> PyResult<(String, String)> {
@ -131,18 +114,18 @@ impl Nac3 {
})?; })?;
let source = fs::read_to_string(&source_file).map_err(|e| { let source = fs::read_to_string(&source_file).map_err(|e| {
exceptions::PyIOError::new_err(format!("failed to read input file: {e}")) exceptions::PyIOError::new_err(format!("failed to read input file: {}", e))
})?; })?;
let parser_result = parse_program(&source, source_file.into()) let parser_result = parser::parse_program(&source, source_file.into())
.map_err(|e| exceptions::PySyntaxError::new_err(format!("parse error: {e}")))?; .map_err(|e| exceptions::PySyntaxError::new_err(format!("parse error: {}", e)))?;
for mut stmt in parser_result { for mut stmt in parser_result.into_iter() {
let include = match stmt.node { let include = match stmt.node {
StmtKind::ClassDef { ast::StmtKind::ClassDef {
ref decorator_list, ref mut body, ref mut bases, .. ref decorator_list, ref mut body, ref mut bases, ..
} => { } => {
let nac3_class = decorator_list.iter().any(|decorator| { let nac3_class = decorator_list.iter().any(|decorator| {
if let ExprKind::Name { id, .. } = decorator.node { if let ast::ExprKind::Name { id, .. } = decorator.node {
id.to_string() == "nac3" id.to_string() == "nac3"
} else { } else {
false false
@ -156,11 +139,11 @@ impl Nac3 {
Python::with_gil(|py| -> PyResult<bool> { Python::with_gil(|py| -> PyResult<bool> {
let id_fn = PyModule::import(py, "builtins")?.getattr("id")?; let id_fn = PyModule::import(py, "builtins")?.getattr("id")?;
match &base.node { match &base.node {
ExprKind::Name { id, .. } => { ast::ExprKind::Name { id, .. } => {
if *id == "Exception".into() { if *id == "Exception".into() {
Ok(true) Ok(true)
} else { } else {
let base_obj = module.getattr(py, id.to_string().as_str())?; let base_obj = module.getattr(py, id.to_string())?;
let base_id = id_fn.call1((base_obj,))?.extract()?; let base_id = id_fn.call1((base_obj,))?.extract()?;
Ok(registered_class_ids.contains(&base_id)) Ok(registered_class_ids.contains(&base_id))
} }
@ -171,9 +154,9 @@ impl Nac3 {
.unwrap() .unwrap()
}); });
body.retain(|stmt| { body.retain(|stmt| {
if let StmtKind::FunctionDef { ref decorator_list, .. } = stmt.node { if let ast::StmtKind::FunctionDef { ref decorator_list, .. } = stmt.node {
decorator_list.iter().any(|decorator| { decorator_list.iter().any(|decorator| {
if let ExprKind::Name { id, .. } = decorator.node { if let ast::ExprKind::Name { id, .. } = decorator.node {
id.to_string() == "kernel" id.to_string() == "kernel"
|| id.to_string() == "portable" || id.to_string() == "portable"
|| id.to_string() == "rpc" || id.to_string() == "rpc"
@ -187,9 +170,9 @@ impl Nac3 {
}); });
true true
} }
StmtKind::FunctionDef { ref decorator_list, .. } => { ast::StmtKind::FunctionDef { ref decorator_list, .. } => {
decorator_list.iter().any(|decorator| { decorator_list.iter().any(|decorator| {
if let ExprKind::Name { id, .. } = decorator.node { if let ast::ExprKind::Name { id, .. } = decorator.node {
let id = id.to_string(); let id = id.to_string();
id == "extern" || id == "portable" || id == "kernel" || id == "rpc" id == "extern" || id == "portable" || id == "kernel" || id == "rpc"
} else { } else {
@ -210,7 +193,7 @@ impl Nac3 {
fn report_modinit( fn report_modinit(
arg_names: &[String], arg_names: &[String],
method_name: &str, method_name: &str,
resolver: &Arc<dyn SymbolResolver + Send + Sync>, resolver: Arc<dyn SymbolResolver + Send + Sync>,
top_level_defs: &[Arc<RwLock<TopLevelDef>>], top_level_defs: &[Arc<RwLock<TopLevelDef>>],
unifier: &mut Unifier, unifier: &mut Unifier,
primitives: &PrimitiveStore, primitives: &PrimitiveStore,
@ -218,7 +201,7 @@ impl Nac3 {
let base_ty = let base_ty =
match resolver.get_symbol_type(unifier, top_level_defs, primitives, "base".into()) { match resolver.get_symbol_type(unifier, top_level_defs, primitives, "base".into()) {
Ok(ty) => ty, Ok(ty) => ty,
Err(e) => return Some(format!("type error inside object launching kernel: {e}")), Err(e) => return Some(format!("type error inside object launching kernel: {}", e)),
}; };
let fun_ty = if method_name.is_empty() { let fun_ty = if method_name.is_empty() {
@ -228,7 +211,8 @@ impl Nac3 {
Some(t) => t.0, Some(t) => t.0,
None => { None => {
return Some(format!( return Some(format!(
"object launching kernel does not have method `{method_name}`" "object launching kernel does not have method `{}`",
method_name
)) ))
} }
} }
@ -249,7 +233,8 @@ impl Nac3 {
Some(n) => n, Some(n) => n,
None if default_value.is_none() => { None if default_value.is_none() => {
return Some(format!( return Some(format!(
"argument `{name}` not provided when launching kernel function" "argument `{}` not provided when launching kernel function",
name
)) ))
} }
_ => break, _ => break,
@ -263,14 +248,16 @@ impl Nac3 {
Ok(t) => t, Ok(t) => t,
Err(e) => { Err(e) => {
return Some(format!( return Some(format!(
"type error ({e}) at parameter #{i} when calling kernel function" "type error ({}) at parameter #{} when calling kernel function",
e, i
)) ))
} }
}; };
if let Err(e) = unifier.unify(in_ty, *ty) { if let Err(e) = unifier.unify(in_ty, *ty) {
return Some(format!( return Some(format!(
"type error ({}) at parameter #{i} when calling kernel function", "type error ({}) at parameter #{} when calling kernel function",
e.to_display(unifier), e.to_display(unifier).to_string(),
i
)); ));
} }
} }
@ -289,11 +276,9 @@ impl Nac3 {
py: Python, py: Python,
link_fn: &dyn Fn(&Module) -> PyResult<T>, link_fn: &dyn Fn(&Module) -> PyResult<T>,
) -> PyResult<T> { ) -> PyResult<T> {
let size_t = self.isa.get_size_type();
let (mut composer, mut builtins_def, mut builtins_ty) = TopLevelComposer::new( let (mut composer, mut builtins_def, mut builtins_ty) = TopLevelComposer::new(
self.builtins.clone(), self.builtins.clone(),
ComposerConfig { kernel_ann: Some("Kernel"), kernel_invariant_ann: "KernelInvariant" }, ComposerConfig { kernel_ann: Some("Kernel"), kernel_invariant_ann: "KernelInvariant" },
size_t,
); );
let builtins = PyModule::import(py, "builtins")?; let builtins = PyModule::import(py, "builtins")?;
@ -334,13 +319,13 @@ impl Nac3 {
let mut module_to_resolver_cache: HashMap<u64, _> = HashMap::new(); let mut module_to_resolver_cache: HashMap<u64, _> = HashMap::new();
let mut rpc_ids = vec![]; let mut rpc_ids = vec![];
for (stmt, path, module) in &self.top_levels { for (stmt, path, module) in self.top_levels.iter() {
let py_module: &PyAny = module.extract(py)?; let py_module: &PyAny = module.extract(py)?;
let module_id: u64 = id_fn.call1((py_module,))?.extract()?; let module_id: u64 = id_fn.call1((py_module,))?.extract()?;
let helper = helper.clone(); let helper = helper.clone();
let class_obj; let class_obj;
if let StmtKind::ClassDef { name, .. } = &stmt.node { if let StmtKind::ClassDef { name, .. } = &stmt.node {
let class = py_module.getattr(name.to_string().as_str()).unwrap(); let class = py_module.getattr(name.to_string()).unwrap();
if issubclass.call1((class, exn_class)).unwrap().extract().unwrap() && if issubclass.call1((class, exn_class)).unwrap().extract().unwrap() &&
class.getattr("artiq_builtin").is_err() { class.getattr("artiq_builtin").is_err() {
class_obj = Some(class); class_obj = Some(class);
@ -354,8 +339,8 @@ impl Nac3 {
module_to_resolver_cache.get(&module_id).cloned().unwrap_or_else(|| { module_to_resolver_cache.get(&module_id).cloned().unwrap_or_else(|| {
let mut name_to_pyid: HashMap<StrRef, u64> = HashMap::new(); let mut name_to_pyid: HashMap<StrRef, u64> = HashMap::new();
let members: &PyDict = let members: &PyDict =
py_module.getattr("__dict__").unwrap().downcast().unwrap(); py_module.getattr("__dict__").unwrap().cast_as().unwrap();
for (key, val) in members { for (key, val) in members.iter() {
let key: &str = key.extract().unwrap(); let key: &str = key.extract().unwrap();
let val = id_fn.call1((val,)).unwrap().extract().unwrap(); let val = id_fn.call1((val,)).unwrap().extract().unwrap();
name_to_pyid.insert(key.into(), val); name_to_pyid.insert(key.into(), val);
@ -367,12 +352,12 @@ impl Nac3 {
pyid_to_type: pyid_to_type.clone(), pyid_to_type: pyid_to_type.clone(),
primitive_ids: self.primitive_ids.clone(), primitive_ids: self.primitive_ids.clone(),
global_value_ids: global_value_ids.clone(), global_value_ids: global_value_ids.clone(),
class_names: Mutex::default(), class_names: Default::default(),
name_to_pyid: name_to_pyid.clone(), name_to_pyid: name_to_pyid.clone(),
module: module.clone(), module: module.clone(),
id_to_pyval: RwLock::default(), id_to_pyval: Default::default(),
id_to_primitive: RwLock::default(), id_to_primitive: Default::default(),
field_to_val: RwLock::default(), field_to_val: Default::default(),
helper, helper,
string_store: self.string_store.clone(), string_store: self.string_store.clone(),
exception_ids: self.exception_ids.clone(), exception_ids: self.exception_ids.clone(),
@ -386,10 +371,11 @@ impl Nac3 {
}); });
let (name, def_id, ty) = composer let (name, def_id, ty) = composer
.register_top_level(stmt.clone(), Some(resolver.clone()), path, false) .register_top_level(stmt.clone(), Some(resolver.clone()), path.clone(), false)
.map_err(|e| { .map_err(|e| {
CompileError::new_err(format!( CompileError::new_err(format!(
"compilation failed\n----------\n{e}" "compilation failed\n----------\n{}",
e
)) ))
})?; })?;
if let Some(class_obj) = class_obj { if let Some(class_obj) = class_obj {
@ -399,14 +385,14 @@ impl Nac3 {
match &stmt.node { match &stmt.node {
StmtKind::FunctionDef { decorator_list, .. } => { StmtKind::FunctionDef { decorator_list, .. } => {
if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) { if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) {
store_fun.call1(py, (def_id.0.into_py(py), module.getattr(py, name.to_string().as_str()).unwrap())).unwrap(); store_fun.call1(py, (def_id.0.into_py(py), module.getattr(py, name.to_string()).unwrap())).unwrap();
rpc_ids.push((None, def_id)); rpc_ids.push((None, def_id));
} }
} }
StmtKind::ClassDef { name, body, .. } => { StmtKind::ClassDef { name, body, .. } => {
let class_name = name.to_string(); let class_name = name.to_string();
let class_obj = module.getattr(py, class_name.as_str()).unwrap(); let class_obj = module.getattr(py, &class_name).unwrap();
for stmt in body { for stmt in body.iter() {
if let StmtKind::FunctionDef { name, decorator_list, .. } = &stmt.node { if let StmtKind::FunctionDef { name, decorator_list, .. } = &stmt.node {
if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) { if decorator_list.iter().any(|decorator| matches!(decorator.node, ExprKind::Name { id, .. } if id == "rpc".into())) {
if name == &"__init__".into() { if name == &"__init__".into() {
@ -440,7 +426,7 @@ impl Nac3 {
name_to_pyid.insert("base".into(), id_fun.call1((obj,))?.extract()?); name_to_pyid.insert("base".into(), id_fun.call1((obj,))?.extract()?);
let mut arg_names = vec![]; let mut arg_names = vec![];
for (i, arg) in args.into_iter().enumerate() { for (i, arg) in args.into_iter().enumerate() {
let name = format!("tmp{i}"); let name = format!("tmp{}", i);
module.add(&name, arg)?; module.add(&name, arg)?;
name_to_pyid.insert(name.clone().into(), id_fun.call1((arg,))?.extract()?); name_to_pyid.insert(name.clone().into(), id_fun.call1((arg,))?.extract()?);
arg_names.push(name); arg_names.push(name);
@ -459,10 +445,10 @@ impl Nac3 {
pyid_to_type: pyid_to_type.clone(), pyid_to_type: pyid_to_type.clone(),
primitive_ids: self.primitive_ids.clone(), primitive_ids: self.primitive_ids.clone(),
global_value_ids: global_value_ids.clone(), global_value_ids: global_value_ids.clone(),
class_names: Mutex::default(), class_names: Default::default(),
id_to_pyval: RwLock::default(), id_to_pyval: Default::default(),
id_to_primitive: RwLock::default(), id_to_primitive: Default::default(),
field_to_val: RwLock::default(), field_to_val: Default::default(),
name_to_pyid, name_to_pyid,
module: module.to_object(py), module: module.to_object(py),
helper, helper,
@ -472,11 +458,11 @@ impl Nac3 {
}); });
let resolver = Arc::new(Resolver(inner_resolver.clone())) as Arc<dyn SymbolResolver + Send + Sync>; let resolver = Arc::new(Resolver(inner_resolver.clone())) as Arc<dyn SymbolResolver + Send + Sync>;
let (_, def_id, _) = composer let (_, def_id, _) = composer
.register_top_level(synthesized.pop().unwrap(), Some(resolver.clone()), "", false) .register_top_level(synthesized.pop().unwrap(), Some(resolver.clone()), "".into(), false)
.unwrap(); .unwrap();
let fun_signature = let fun_signature =
FunSignature { args: vec![], ret: self.primitive.none, vars: VarMap::new() }; FunSignature { args: vec![], ret: self.primitive.none, vars: HashMap::new() };
let mut store = ConcreteTypeStore::new(); let mut store = ConcreteTypeStore::new();
let mut cache = HashMap::new(); let mut cache = HashMap::new();
let signature = let signature =
@ -485,26 +471,24 @@ impl Nac3 {
if let Err(e) = composer.start_analysis(true) { if let Err(e) = composer.start_analysis(true) {
// report error of __modinit__ separately // report error of __modinit__ separately
return if e.iter().any(|err| err.contains("<nac3_synthesized_modinit>")) { if !e.contains("<nac3_synthesized_modinit>") {
return Err(CompileError::new_err(format!(
"compilation failed\n----------\n{}",
e
)));
} else {
let msg = Self::report_modinit( let msg = Self::report_modinit(
&arg_names, &arg_names,
method_name, method_name,
&resolver, resolver.clone(),
&composer.extract_def_list(), &composer.extract_def_list(),
&mut composer.unifier, &mut composer.unifier,
&self.primitive, &self.primitive,
); );
Err(CompileError::new_err(format!( return Err(CompileError::new_err(format!(
"compilation failed\n----------\n{}", "compilation failed\n----------\n{}",
msg.unwrap_or(e.iter().sorted().join("\n----------\n")) msg.unwrap_or(e)
))) )));
} else {
Err(CompileError::new_err(
format!(
"compilation failed\n----------\n{}",
e.iter().sorted().join("\n----------\n"),
),
))
} }
} }
let top_level = Arc::new(composer.make_top_level_context()); let top_level = Arc::new(composer.make_top_level_context());
@ -512,7 +496,7 @@ impl Nac3 {
{ {
let rpc_codegen = rpc_codegen_callback(); let rpc_codegen = rpc_codegen_callback();
let defs = top_level.definitions.read(); let defs = top_level.definitions.read();
for (class_data, id) in &rpc_ids { for (class_data, id) in rpc_ids.iter() {
let mut def = defs[id.0].write(); let mut def = defs[id.0].write();
match &mut *def { match &mut *def {
TopLevelDef::Function { codegen_callback, .. } => { TopLevelDef::Function { codegen_callback, .. } => {
@ -520,7 +504,7 @@ impl Nac3 {
} }
TopLevelDef::Class { methods, .. } => { TopLevelDef::Class { methods, .. } => {
let (class_def, method_name) = class_data.as_ref().unwrap(); let (class_def, method_name) = class_data.as_ref().unwrap();
for (name, _, id) in &*methods { for (name, _, id) in methods.iter() {
if name != method_name { if name != method_name {
continue; continue;
} }
@ -533,7 +517,7 @@ impl Nac3 {
py, py,
( (
id.0.into_py(py), id.0.into_py(py),
class_def.getattr(py, name.to_string().as_str()).unwrap(), class_def.getattr(py, name.to_string()).unwrap(),
), ),
) )
.unwrap(); .unwrap();
@ -547,17 +531,18 @@ impl Nac3 {
let instance = { let instance = {
let defs = top_level.definitions.read(); let defs = top_level.definitions.read();
let mut definition = defs[def_id.0].write(); let mut definition = defs[def_id.0].write();
let TopLevelDef::Function { instance_to_stmt, instance_to_symbol, .. } = if let TopLevelDef::Function { instance_to_stmt, instance_to_symbol, .. } =
&mut *definition else { &mut *definition
{
instance_to_symbol.insert("".to_string(), "__modinit__".into());
instance_to_stmt[""].clone()
} else {
unreachable!() unreachable!()
}; }
instance_to_symbol.insert(String::new(), "__modinit__".into());
instance_to_stmt[""].clone()
}; };
let task = CodeGenTask { let task = CodeGenTask {
subst: Vec::default(), subst: Default::default(),
symbol_name: "__modinit__".to_string(), symbol_name: "__modinit__".to_string(),
body: instance.body, body: instance.body,
signature, signature,
@ -574,18 +559,18 @@ impl Nac3 {
store.from_signature(&mut composer.unifier, &self.primitive, &fun_signature, &mut cache); store.from_signature(&mut composer.unifier, &self.primitive, &fun_signature, &mut cache);
let signature = store.add_cty(signature); let signature = store.add_cty(signature);
let attributes_writeback_task = CodeGenTask { let attributes_writeback_task = CodeGenTask {
subst: Vec::default(), subst: Default::default(),
symbol_name: "attributes_writeback".to_string(), symbol_name: "attributes_writeback".to_string(),
body: Arc::new(Vec::default()), body: Arc::new(Default::default()),
signature, signature,
resolver, resolver,
store, store,
unifier_index: instance.unifier_id, unifier_index: instance.unifier_id,
calls: Arc::new(HashMap::default()), calls: Arc::new(Default::default()),
id: 0, id: 0,
}; };
let membuffers: Arc<Mutex<Vec<Vec<u8>>>> = Arc::default(); let membuffers: Arc<Mutex<Vec<Vec<u8>>>> = Default::default();
let membuffer = membuffers.clone(); let membuffer = membuffers.clone();
@ -595,8 +580,7 @@ impl Nac3 {
membuffer.lock().push(buffer); membuffer.lock().push(buffer);
}))); })));
let size_t = if self.isa == Isa::Host { 64 } else { 32 }; let size_t = if self.isa == Isa::Host { 64 } else { 32 };
let num_threads = if is_multithreaded() { 4 } else { 1 }; let thread_names: Vec<String> = (0..4).map(|_| "main".to_string()).collect();
let thread_names: Vec<String> = (0..num_threads).map(|_| "main".to_string()).collect();
let threads: Vec<_> = thread_names let threads: Vec<_> = thread_names
.iter() .iter()
.map(|s| Box::new(ArtiqCodeGenerator::new(s.to_string(), size_t, self.time_fns))) .map(|s| Box::new(ArtiqCodeGenerator::new(s.to_string(), size_t, self.time_fns)))
@ -604,12 +588,7 @@ impl Nac3 {
let membuffer = membuffers.clone(); let membuffer = membuffers.clone();
py.allow_threads(|| { py.allow_threads(|| {
let (registry, handles) = WorkerRegistry::create_workers( let (registry, handles) = WorkerRegistry::create_workers(threads, top_level.clone(), f);
threads,
top_level.clone(),
&self.llvm_options,
&f
);
registry.add_task(task); registry.add_task(task);
registry.wait_tasks_complete(handles); registry.wait_tasks_complete(handles);
@ -619,7 +598,7 @@ impl Nac3 {
let builder = context.create_builder(); let builder = context.create_builder();
let (_, module, _) = gen_func_impl(&context, &mut generator, &registry, builder, module, let (_, module, _) = gen_func_impl(&context, &mut generator, &registry, builder, module,
attributes_writeback_task, |generator, ctx| { attributes_writeback_task, |generator, ctx| {
attributes_writeback(ctx, generator, inner_resolver.as_ref(), &host_attributes) attributes_writeback(ctx, generator, inner_resolver.as_ref(), host_attributes)
}).unwrap(); }).unwrap();
let buffer = module.write_bitcode_to_memory(); let buffer = module.write_bitcode_to_memory();
let buffer = buffer.as_slice().into(); let buffer = buffer.as_slice().into();
@ -642,13 +621,7 @@ impl Nac3 {
let builder = context.create_builder(); let builder = context.create_builder();
let modinit_return = main.get_function("__modinit__").unwrap().get_last_basic_block().unwrap().get_terminator().unwrap(); let modinit_return = main.get_function("__modinit__").unwrap().get_last_basic_block().unwrap().get_terminator().unwrap();
builder.position_before(&modinit_return); builder.position_before(&modinit_return);
builder builder.build_call(main.get_function("attributes_writeback").unwrap(), &[], "attributes_writeback");
.build_call(
main.get_function("attributes_writeback").unwrap(),
&[],
"attributes_writeback",
)
.unwrap();
main.link_in_module(load_irrt(&context)) main.link_in_module(load_irrt(&context))
.map_err(|err| CompileError::new_err(err.to_string()))?; .map_err(|err| CompileError::new_err(err.to_string()))?;
@ -656,7 +629,7 @@ impl Nac3 {
let mut function_iter = main.get_first_function(); let mut function_iter = main.get_first_function();
while let Some(func) = function_iter { while let Some(func) = function_iter {
if func.count_basic_blocks() > 0 && func.get_name().to_str().unwrap() != "__modinit__" { if func.count_basic_blocks() > 0 && func.get_name().to_str().unwrap() != "__modinit__" {
func.set_linkage(Linkage::Private); func.set_linkage(inkwell::module::Linkage::Private);
} }
function_iter = func.get_next_function(); function_iter = func.get_next_function();
} }
@ -674,66 +647,44 @@ impl Nac3 {
global_option = global.get_next_global(); global_option = global.get_next_global();
} }
let target_machine = self.llvm_options.target let builder = PassManagerBuilder::create();
.create_target_machine(self.llvm_options.opt_level) builder.set_optimization_level(OptimizationLevel::Aggressive);
.expect("couldn't create target machine"); let passes = PassManager::create(());
builder.set_inliner_with_threshold(255);
let pass_options = PassBuilderOptions::create(); builder.populate_module_pass_manager(&passes);
pass_options.set_merge_functions(true); passes.run_on(&main);
let passes = format!("default<O{}>", self.llvm_options.opt_level as u32);
let result = main.run_passes(passes.as_str(), &target_machine, pass_options);
if let Err(err) = result {
panic!("Failed to run optimization for module `main`: {}", err.to_string());
}
link_fn(&main) link_fn(&main)
} }
/// Returns the [`TargetTriple`] used for compiling to [isa]. fn get_llvm_target_machine(
fn get_llvm_target_triple(isa: Isa) -> TargetTriple { &self,
match isa { ) -> TargetMachine {
Isa::Host => TargetMachine::get_default_triple(), let (triple, features) = match self.isa {
Isa::RiscV32G | Isa::RiscV32IMA => TargetTriple::create("riscv32-unknown-linux"), Isa::Host => (
Isa::CortexA9 => TargetTriple::create("armv7-unknown-linux-gnueabihf"), TargetMachine::get_default_triple(),
} TargetMachine::get_host_cpu_features().to_string(),
} ),
Isa::RiscV32G => {
/// Returns the [`String`] representing the target CPU used for compiling to [isa]. (TargetTriple::create("riscv32-unknown-linux"), "+a,+m,+f,+d".to_string())
fn get_llvm_target_cpu(isa: Isa) -> String { }
match isa { Isa::RiscV32IMA => (TargetTriple::create("riscv32-unknown-linux"), "+a,+m".to_string()),
Isa::Host => TargetMachine::get_host_cpu_name().to_string(), Isa::CortexA9 => (
Isa::RiscV32G | Isa::RiscV32IMA => "generic-rv32".to_string(), TargetTriple::create("armv7-unknown-linux-gnueabihf"),
Isa::CortexA9 => "cortex-a9".to_string(), "+dsp,+fp16,+neon,+vfp3,+long-calls".to_string(),
} ),
} };
let target =
/// Returns the [`String`] representing the target features used for compiling to [isa]. Target::from_triple(&triple).expect("couldn't create target from target triple");
fn get_llvm_target_features(isa: Isa) -> String { target
match isa { .create_target_machine(
Isa::Host => TargetMachine::get_host_cpu_features().to_string(), &triple,
Isa::RiscV32G => "+a,+m,+f,+d".to_string(), "",
Isa::RiscV32IMA => "+a,+m".to_string(), &features,
Isa::CortexA9 => "+dsp,+fp16,+neon,+vfp3,+long-calls".to_string(), OptimizationLevel::Default,
} RelocMode::PIC,
} CodeModel::Default,
)
/// Returns an instance of [`CodeGenTargetMachineOptions`] representing the target machine
/// options used for compiling to [isa].
fn get_llvm_target_options(isa: Isa) -> CodeGenTargetMachineOptions {
CodeGenTargetMachineOptions {
triple: Nac3::get_llvm_target_triple(isa).as_str().to_string_lossy().into_owned(),
cpu: Nac3::get_llvm_target_cpu(isa),
features: Nac3::get_llvm_target_features(isa),
reloc_mode: RelocMode::PIC,
..CodeGenTargetMachineOptions::from_host()
}
}
/// Returns an instance of [`TargetMachine`] used in compiling and linking of a program to the
/// target [isa].
fn get_llvm_target_machine(&self) -> TargetMachine {
Nac3::get_llvm_target_options(self.isa)
.create_target_machine(self.llvm_options.opt_level)
.expect("couldn't create target machine") .expect("couldn't create target machine")
} }
} }
@ -799,7 +750,7 @@ fn add_exceptions(
#[pymethods] #[pymethods]
impl Nac3 { impl Nac3 {
#[new] #[new]
fn new(isa: &str, artiq_builtins: &PyDict, py: Python) -> PyResult<Self> { fn new(isa: &str, py: Python) -> PyResult<Self> {
let isa = match isa { let isa = match isa {
"host" => Isa::Host, "host" => Isa::Host,
"rv32g" => Isa::RiscV32G, "rv32g" => Isa::RiscV32G,
@ -808,15 +759,16 @@ impl Nac3 {
_ => return Err(exceptions::PyValueError::new_err("invalid ISA")), _ => return Err(exceptions::PyValueError::new_err("invalid ISA")),
}; };
let time_fns: &(dyn TimeFns + Sync) = match isa { let time_fns: &(dyn TimeFns + Sync) = match isa {
Isa::Host => &timeline::EXTERN_TIME_FNS,
Isa::RiscV32G => &timeline::NOW_PINNING_TIME_FNS_64, Isa::RiscV32G => &timeline::NOW_PINNING_TIME_FNS_64,
Isa::RiscV32IMA => &timeline::NOW_PINNING_TIME_FNS, Isa::RiscV32IMA => &timeline::NOW_PINNING_TIME_FNS,
Isa::CortexA9 | Isa::Host => &timeline::EXTERN_TIME_FNS, Isa::CortexA9 => &timeline::EXTERN_TIME_FNS,
}; };
let primitive: PrimitiveStore = TopLevelComposer::make_primitives(isa.get_size_type()).0; let primitive: PrimitiveStore = TopLevelComposer::make_primitives().0;
let builtins = vec![ let builtins = vec![
( (
"now_mu".into(), "now_mu".into(),
FunSignature { args: vec![], ret: primitive.int64, vars: VarMap::new() }, FunSignature { args: vec![], ret: primitive.int64, vars: HashMap::new() },
Arc::new(GenCall::new(Box::new(move |ctx, _, _, _, _| { Arc::new(GenCall::new(Box::new(move |ctx, _, _, _, _| {
Ok(Some(time_fns.emit_now_mu(ctx))) Ok(Some(time_fns.emit_now_mu(ctx)))
}))), }))),
@ -830,7 +782,7 @@ impl Nac3 {
default_value: None, default_value: None,
}], }],
ret: primitive.none, ret: primitive.none,
vars: VarMap::new(), vars: HashMap::new(),
}, },
Arc::new(GenCall::new(Box::new(move |ctx, _, fun, args, generator| { Arc::new(GenCall::new(Box::new(move |ctx, _, fun, args, generator| {
let arg_ty = fun.0.args[0].ty; let arg_ty = fun.0.args[0].ty;
@ -848,7 +800,7 @@ impl Nac3 {
default_value: None, default_value: None,
}], }],
ret: primitive.none, ret: primitive.none,
vars: VarMap::new(), vars: HashMap::new(),
}, },
Arc::new(GenCall::new(Box::new(move |ctx, _, fun, args, generator| { Arc::new(GenCall::new(Box::new(move |ctx, _, fun, args, generator| {
let arg_ty = fun.0.args[0].ty; let arg_ty = fun.0.args[0].ty;
@ -862,22 +814,38 @@ impl Nac3 {
let builtins_mod = PyModule::import(py, "builtins").unwrap(); let builtins_mod = PyModule::import(py, "builtins").unwrap();
let id_fn = builtins_mod.getattr("id").unwrap(); let id_fn = builtins_mod.getattr("id").unwrap();
let numpy_mod = PyModule::import(py, "numpy").unwrap(); let numpy_mod = PyModule::import(py, "numpy").unwrap();
let numpy_typing_mod = PyModule::import(py, "numpy.typing").unwrap();
let typing_mod = PyModule::import(py, "typing").unwrap(); let typing_mod = PyModule::import(py, "typing").unwrap();
let types_mod = PyModule::import(py, "types").unwrap(); let types_mod = PyModule::import(py, "types").unwrap();
let get_id = |x: &PyAny| id_fn.call1((x,)).and_then(PyAny::extract).unwrap(); let get_id = |x| id_fn.call1((x,)).unwrap().extract().unwrap();
let get_attr_id = |obj: &PyModule, attr| id_fn.call1((obj.getattr(attr).unwrap(),)) let get_attr_id = |obj: &PyModule, attr| id_fn.call1((obj.getattr(attr).unwrap(),))
.unwrap().extract().unwrap(); .unwrap().extract().unwrap();
let primitive_ids = PrimitivePythonId { let primitive_ids = PrimitivePythonId {
virtual_id: get_id(artiq_builtins.get_item("virtual").ok().flatten().unwrap()), virtual_id: get_id(
builtins_mod
.getattr("globals")
.unwrap()
.call0()
.unwrap()
.get_item("virtual")
.unwrap(
)),
generic_alias: ( generic_alias: (
get_attr_id(typing_mod, "_GenericAlias"), get_attr_id(typing_mod, "_GenericAlias"),
get_attr_id(types_mod, "GenericAlias"), get_attr_id(types_mod, "GenericAlias"),
), ),
none: get_id(artiq_builtins.get_item("none").ok().flatten().unwrap()), none: id_fn
.call1((builtins_mod
.getattr("globals")
.unwrap()
.call0()
.unwrap()
.get_item("none")
.unwrap(),))
.unwrap()
.extract()
.unwrap(),
typevar: get_attr_id(typing_mod, "TypeVar"), typevar: get_attr_id(typing_mod, "TypeVar"),
const_generic_marker: get_id(artiq_builtins.get_item("_ConstGenericMarker").ok().flatten().unwrap()),
int: get_attr_id(builtins_mod, "int"), int: get_attr_id(builtins_mod, "int"),
int32: get_attr_id(numpy_mod, "int32"), int32: get_attr_id(numpy_mod, "int32"),
int64: get_attr_id(numpy_mod, "int64"), int64: get_attr_id(numpy_mod, "int64"),
@ -887,10 +855,19 @@ impl Nac3 {
float: get_attr_id(builtins_mod, "float"), float: get_attr_id(builtins_mod, "float"),
float64: get_attr_id(numpy_mod, "float64"), float64: get_attr_id(numpy_mod, "float64"),
list: get_attr_id(builtins_mod, "list"), list: get_attr_id(builtins_mod, "list"),
ndarray: get_attr_id(numpy_typing_mod, "NDArray"),
tuple: get_attr_id(builtins_mod, "tuple"), tuple: get_attr_id(builtins_mod, "tuple"),
exception: get_attr_id(builtins_mod, "Exception"), exception: get_attr_id(builtins_mod, "Exception"),
option: get_id(artiq_builtins.get_item("Option").ok().flatten().unwrap()), option: id_fn
.call1((builtins_mod
.getattr("globals")
.unwrap()
.call0()
.unwrap()
.get_item("Option")
.unwrap(),))
.unwrap()
.extract()
.unwrap(),
}; };
let working_directory = tempfile::Builder::new().prefix("nac3-").tempdir().unwrap(); let working_directory = tempfile::Builder::new().prefix("nac3-").tempdir().unwrap();
@ -902,16 +879,12 @@ impl Nac3 {
primitive, primitive,
builtins, builtins,
primitive_ids, primitive_ids,
top_levels: Vec::default(), top_levels: Default::default(),
pyid_to_def: Arc::default(), pyid_to_def: Default::default(),
working_directory, working_directory,
string_store: Arc::default(), string_store: Default::default(),
exception_ids: Arc::default(), exception_ids: Default::default(),
deferred_eval_store: DeferredEvaluationStore::new(), deferred_eval_store: DeferredEvaluationStore::new(),
llvm_options: CodeGenLLVMOptions {
opt_level: OptimizationLevel::Default,
target: Nac3::get_llvm_target_options(isa),
}
}) })
} }
@ -924,11 +897,11 @@ impl Nac3 {
let id_fn = PyModule::import(py, "builtins")?.getattr("id")?; let id_fn = PyModule::import(py, "builtins")?.getattr("id")?;
let getmodule_fn = PyModule::import(py, "inspect")?.getattr("getmodule")?; let getmodule_fn = PyModule::import(py, "inspect")?.getattr("getmodule")?;
for function in functions { for function in functions.iter() {
let module = getmodule_fn.call1((function,))?.extract()?; let module = getmodule_fn.call1((function,))?.extract()?;
modules.insert(id_fn.call1((&module,))?.extract()?, module); modules.insert(id_fn.call1((&module,))?.extract()?, module);
} }
for class in classes { for class in classes.iter() {
let module = getmodule_fn.call1((class,))?.extract()?; let module = getmodule_fn.call1((class,))?.extract()?;
modules.insert(id_fn.call1((&module,))?.extract()?, module); modules.insert(id_fn.call1((&module,))?.extract()?, module);
class_ids.insert(id_fn.call1((class,))?.extract()?); class_ids.insert(id_fn.call1((class,))?.extract()?);
@ -937,7 +910,7 @@ impl Nac3 {
})?; })?;
for module in modules.into_values() { for module in modules.into_values() {
self.register_module(&module, &class_ids)?; self.register_module(module, &class_ids)?;
} }
Ok(()) Ok(())
} }
@ -1002,7 +975,7 @@ impl Nac3 {
let link_fn = |module: &Module| { let link_fn = |module: &Module| {
let working_directory = self.working_directory.path().to_owned(); let working_directory = self.working_directory.path().to_owned();
target_machine target_machine
.write_to_file(module, FileType::Object, &working_directory.join("module.o")) .write_to_file(&module, FileType::Object, &working_directory.join("module.o"))
.expect("couldn't write module to file"); .expect("couldn't write module to file");
let filename_path = self.working_directory.path().join("module.elf"); let filename_path = self.working_directory.path().join("module.elf");
@ -1019,7 +992,7 @@ impl Nac3 {
} else { } else {
let link_fn = |module: &Module| { let link_fn = |module: &Module| {
let object_mem = target_machine let object_mem = target_machine
.write_to_memory_buffer(module, FileType::Object) .write_to_memory_buffer(&module, FileType::Object)
.expect("couldn't write module to object file buffer"); .expect("couldn't write module to object file buffer");
if let Ok(dyn_lib) = Linker::ld(object_mem.as_slice()) { if let Ok(dyn_lib) = Linker::ld(object_mem.as_slice()) {
Ok(PyBytes::new(py, &dyn_lib).into()) Ok(PyBytes::new(py, &dyn_lib).into())

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +1,10 @@
use inkwell::{values::{BasicValueEnum, CallSiteValue}, AddressSpace, AtomicOrdering}; use inkwell::{values::BasicValueEnum, AddressSpace, AtomicOrdering};
use itertools::Either;
use nac3core::codegen::CodeGenContext; use nac3core::codegen::CodeGenContext;
/// Functions for manipulating the timeline.
pub trait TimeFns { pub trait TimeFns {
fn emit_now_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>) -> BasicValueEnum<'ctx>;
/// Emits LLVM IR for `now_mu`. fn emit_at_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>, t: BasicValueEnum<'ctx>);
fn emit_now_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> BasicValueEnum<'ctx>; fn emit_delay_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>, dt: BasicValueEnum<'ctx>);
/// Emits LLVM IR for `at_mu`.
fn emit_at_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, t: BasicValueEnum<'ctx>);
/// Emits LLVM IR for `delay_mu`.
fn emit_delay_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, dt: BasicValueEnum<'ctx>);
} }
pub struct NowPinningTimeFns64 {} pub struct NowPinningTimeFns64 {}
@ -20,79 +12,82 @@ pub struct NowPinningTimeFns64 {}
// For FPGA design reasons, on VexRiscv with 64-bit data bus, the "now" CSR is split into two 32-bit // For FPGA design reasons, on VexRiscv with 64-bit data bus, the "now" CSR is split into two 32-bit
// values that are each padded to 64-bits. // values that are each padded to 64-bits.
impl TimeFns for NowPinningTimeFns64 { impl TimeFns for NowPinningTimeFns64 {
fn emit_now_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> BasicValueEnum<'ctx> { fn emit_now_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>) -> BasicValueEnum<'ctx> {
let i64_type = ctx.ctx.i64_type(); let i64_type = ctx.ctx.i64_type();
let i32_type = ctx.ctx.i32_type(); let i32_type = ctx.ctx.i32_type();
let now = ctx let now = ctx
.module .module
.get_global("now") .get_global("now")
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now")); .unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
let now_hiptr = ctx.builder let now_hiptr =
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr") ctx.builder.build_bitcast(now, i32_type.ptr_type(AddressSpace::Generic), "now_hiptr");
.map(BasicValueEnum::into_pointer_value) if let BasicValueEnum::PointerValue(now_hiptr) = now_hiptr {
.unwrap(); let now_loptr = unsafe {
ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(2, false)], "now_gep")
let now_loptr = unsafe { };
ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(2, false)], "now.lo.addr") if let (BasicValueEnum::IntValue(now_hi), BasicValueEnum::IntValue(now_lo)) = (
}.unwrap(); ctx.builder.build_load(now_hiptr, "now_hi"),
ctx.builder.build_load(now_loptr, "now_lo"),
let now_hi = ctx.builder.build_load(now_hiptr, "now.hi") ) {
.map(BasicValueEnum::into_int_value) let zext_hi = ctx.builder.build_int_z_extend(now_hi, i64_type, "now_zext_hi");
.unwrap(); let shifted_hi = ctx.builder.build_left_shift(
let now_lo = ctx.builder.build_load(now_loptr, "now.lo") zext_hi,
.map(BasicValueEnum::into_int_value) i64_type.const_int(32, false),
.unwrap(); "now_shifted_zext_hi",
);
let zext_hi = ctx.builder.build_int_z_extend(now_hi, i64_type, "").unwrap(); let zext_lo = ctx.builder.build_int_z_extend(now_lo, i64_type, "now_zext_lo");
let shifted_hi = ctx.builder ctx.builder.build_or(shifted_hi, zext_lo, "now_or").into()
.build_left_shift(zext_hi, i64_type.const_int(32, false), "") } else {
.unwrap(); unreachable!();
let zext_lo = ctx.builder.build_int_z_extend(now_lo, i64_type, "").unwrap(); }
ctx.builder.build_or(shifted_hi, zext_lo, "now_mu").map(Into::into).unwrap() } else {
unreachable!();
}
} }
fn emit_at_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, t: BasicValueEnum<'ctx>) { fn emit_at_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>, t: BasicValueEnum<'ctx>) {
let i32_type = ctx.ctx.i32_type(); let i32_type = ctx.ctx.i32_type();
let i64_type = ctx.ctx.i64_type(); let i64_type = ctx.ctx.i64_type();
let i64_32 = i64_type.const_int(32, false); let i64_32 = i64_type.const_int(32, false);
let time = t.into_int_value(); if let BasicValueEnum::IntValue(time) = t {
let time_hi = ctx.builder.build_int_truncate(
let time_hi = ctx.builder ctx.builder.build_right_shift(time, i64_32, false, "now_lshr"),
.build_int_truncate(
ctx.builder.build_right_shift(time, i64_32, false, "time.hi").unwrap(),
i32_type, i32_type,
"", "now_trunc",
) );
.unwrap(); let time_lo = ctx.builder.build_int_truncate(time, i32_type, "now_trunc");
let time_lo = ctx.builder.build_int_truncate(time, i32_type, "time.lo").unwrap(); let now = ctx
let now = ctx .module
.module .get_global("now")
.get_global("now") .unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now")); let now_hiptr = ctx.builder.build_bitcast(
let now_hiptr = ctx.builder now,
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr") i32_type.ptr_type(AddressSpace::Generic),
.map(BasicValueEnum::into_pointer_value) "now_bitcast",
.unwrap(); );
if let BasicValueEnum::PointerValue(now_hiptr) = now_hiptr {
let now_loptr = unsafe { let now_loptr = unsafe {
ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(2, false)], "now.lo.addr") ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(2, false)], "now_gep")
}.unwrap(); };
ctx.builder ctx.builder
.build_store(now_hiptr, time_hi) .build_store(now_hiptr, time_hi)
.unwrap() .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap();
.unwrap(); ctx.builder
ctx.builder .build_store(now_loptr, time_lo)
.build_store(now_loptr, time_lo) .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.unwrap() .unwrap();
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) } else {
.unwrap(); unreachable!();
}
} else {
unreachable!();
}
} }
fn emit_delay_mu<'ctx>( fn emit_delay_mu<'ctx, 'a>(
&self, &self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
dt: BasicValueEnum<'ctx>, dt: BasicValueEnum<'ctx>,
) { ) {
let i64_type = ctx.ctx.i64_type(); let i64_type = ctx.ctx.i64_type();
@ -101,55 +96,57 @@ impl TimeFns for NowPinningTimeFns64 {
.module .module
.get_global("now") .get_global("now")
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now")); .unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
let now_hiptr = ctx.builder let now_hiptr =
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr") ctx.builder.build_bitcast(now, i32_type.ptr_type(AddressSpace::Generic), "now_hiptr");
.map(BasicValueEnum::into_pointer_value) if let BasicValueEnum::PointerValue(now_hiptr) = now_hiptr {
.unwrap(); let now_loptr = unsafe {
ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(2, false)], "now_loptr")
let now_loptr = unsafe { };
ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(2, false)], "now.lo.addr") if let (
}.unwrap(); BasicValueEnum::IntValue(now_hi),
BasicValueEnum::IntValue(now_lo),
let now_hi = ctx.builder.build_load(now_hiptr, "now.hi") BasicValueEnum::IntValue(dt),
.map(BasicValueEnum::into_int_value) ) = (
.unwrap(); ctx.builder.build_load(now_hiptr, "now_hi"),
let now_lo = ctx.builder.build_load(now_loptr, "now.lo") ctx.builder.build_load(now_loptr, "now_lo"),
.map(BasicValueEnum::into_int_value) dt,
.unwrap(); ) {
let dt = dt.into_int_value(); let zext_hi = ctx.builder.build_int_z_extend(now_hi, i64_type, "now_zext_hi");
let shifted_hi = ctx.builder.build_left_shift(
let zext_hi = ctx.builder.build_int_z_extend(now_hi, i64_type, "").unwrap(); zext_hi,
let shifted_hi = ctx.builder
.build_left_shift(zext_hi, i64_type.const_int(32, false), "")
.unwrap();
let zext_lo = ctx.builder.build_int_z_extend(now_lo, i64_type, "").unwrap();
let now_val = ctx.builder.build_or(shifted_hi, zext_lo, "now").unwrap();
let time = ctx.builder.build_int_add(now_val, dt, "time").unwrap();
let time_hi = ctx.builder
.build_int_truncate(
ctx.builder.build_right_shift(
time,
i64_type.const_int(32, false), i64_type.const_int(32, false),
false, "now_shifted_zext_hi",
"", );
).unwrap(), let zext_lo = ctx.builder.build_int_z_extend(now_lo, i64_type, "now_zext_lo");
i32_type, let now_val = ctx.builder.build_or(shifted_hi, zext_lo, "now_or");
"time.hi",
)
.unwrap();
let time_lo = ctx.builder.build_int_truncate(time, i32_type, "time.lo").unwrap();
ctx.builder let time = ctx.builder.build_int_add(now_val, dt, "now_add");
.build_store(now_hiptr, time_hi) let time_hi = ctx.builder.build_int_truncate(
.unwrap() ctx.builder.build_right_shift(
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) time,
.unwrap(); i64_type.const_int(32, false),
ctx.builder false,
.build_store(now_loptr, time_lo) "now_lshr",
.unwrap() ),
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) i32_type,
.unwrap(); "now_trunc",
);
let time_lo = ctx.builder.build_int_truncate(time, i32_type, "now_trunc");
ctx.builder
.build_store(now_hiptr, time_hi)
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.unwrap();
ctx.builder
.build_store(now_loptr, time_lo)
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.unwrap();
} else {
unreachable!();
}
} else {
unreachable!();
};
} }
} }
@ -158,66 +155,66 @@ pub static NOW_PINNING_TIME_FNS_64: NowPinningTimeFns64 = NowPinningTimeFns64 {}
pub struct NowPinningTimeFns {} pub struct NowPinningTimeFns {}
impl TimeFns for NowPinningTimeFns { impl TimeFns for NowPinningTimeFns {
fn emit_now_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> BasicValueEnum<'ctx> { fn emit_now_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>) -> BasicValueEnum<'ctx> {
let i64_type = ctx.ctx.i64_type(); let i64_type = ctx.ctx.i64_type();
let now = ctx let now = ctx
.module .module
.get_global("now") .get_global("now")
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now")); .unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
let now_raw = ctx.builder.build_load(now.as_pointer_value(), "now") let now_raw = ctx.builder.build_load(now.as_pointer_value(), "now");
.map(BasicValueEnum::into_int_value) if let BasicValueEnum::IntValue(now_raw) = now_raw {
.unwrap(); let i64_32 = i64_type.const_int(32, false);
let now_lo = ctx.builder.build_left_shift(now_raw, i64_32, "now_shl");
let i64_32 = i64_type.const_int(32, false); let now_hi = ctx.builder.build_right_shift(now_raw, i64_32, false, "now_lshr");
let now_lo = ctx.builder.build_left_shift(now_raw, i64_32, "now.lo").unwrap(); ctx.builder.build_or(now_lo, now_hi, "now_or").into()
let now_hi = ctx.builder.build_right_shift(now_raw, i64_32, false, "now.hi").unwrap(); } else {
ctx.builder.build_or(now_lo, now_hi, "now_mu") unreachable!();
.map(Into::into) }
.unwrap()
} }
fn emit_at_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, t: BasicValueEnum<'ctx>) { fn emit_at_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>, t: BasicValueEnum<'ctx>) {
let i32_type = ctx.ctx.i32_type(); let i32_type = ctx.ctx.i32_type();
let i64_type = ctx.ctx.i64_type(); let i64_type = ctx.ctx.i64_type();
let i64_32 = i64_type.const_int(32, false); let i64_32 = i64_type.const_int(32, false);
if let BasicValueEnum::IntValue(time) = t {
let time = t.into_int_value(); let time_hi = ctx.builder.build_int_truncate(
ctx.builder.build_right_shift(time, i64_32, false, "now_lshr"),
let time_hi = ctx.builder
.build_int_truncate(
ctx.builder.build_right_shift(time, i64_32, false, "").unwrap(),
i32_type, i32_type,
"time.hi", "now_trunc",
) );
.unwrap(); let time_lo = ctx.builder.build_int_truncate(time, i32_type, "now_trunc");
let time_lo = ctx.builder.build_int_truncate(time, i32_type, "now_trunc").unwrap(); let now = ctx
let now = ctx .module
.module .get_global("now")
.get_global("now") .unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now")); let now_hiptr = ctx.builder.build_bitcast(
let now_hiptr = ctx.builder now,
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr") i32_type.ptr_type(AddressSpace::Generic),
.map(BasicValueEnum::into_pointer_value) "now_bitcast",
.unwrap(); );
if let BasicValueEnum::PointerValue(now_hiptr) = now_hiptr {
let now_loptr = unsafe { let now_loptr = unsafe {
ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(1, false)], "now.lo.addr") ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(1, false)], "now_gep")
}.unwrap(); };
ctx.builder ctx.builder
.build_store(now_hiptr, time_hi) .build_store(now_hiptr, time_hi)
.unwrap() .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap();
.unwrap(); ctx.builder
ctx.builder .build_store(now_loptr, time_lo)
.build_store(now_loptr, time_lo) .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.unwrap() .unwrap();
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) } else {
.unwrap(); unreachable!();
}
} else {
unreachable!();
}
} }
fn emit_delay_mu<'ctx>( fn emit_delay_mu<'ctx, 'a>(
&self, &self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
dt: BasicValueEnum<'ctx>, dt: BasicValueEnum<'ctx>,
) { ) {
let i32_type = ctx.ctx.i32_type(); let i32_type = ctx.ctx.i32_type();
@ -227,43 +224,41 @@ impl TimeFns for NowPinningTimeFns {
.module .module
.get_global("now") .get_global("now")
.unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now")); .unwrap_or_else(|| ctx.module.add_global(i64_type, None, "now"));
let now_raw = ctx.builder let now_raw = ctx.builder.build_load(now.as_pointer_value(), "now");
.build_load(now.as_pointer_value(), "") if let (BasicValueEnum::IntValue(now_raw), BasicValueEnum::IntValue(dt)) = (now_raw, dt) {
.map(BasicValueEnum::into_int_value) let now_lo = ctx.builder.build_left_shift(now_raw, i64_32, "now_shl");
.unwrap(); let now_hi = ctx.builder.build_right_shift(now_raw, i64_32, false, "now_lshr");
let now_val = ctx.builder.build_or(now_lo, now_hi, "now_or");
let dt = dt.into_int_value(); let time = ctx.builder.build_int_add(now_val, dt, "now_add");
let time_hi = ctx.builder.build_int_truncate(
let now_lo = ctx.builder.build_left_shift(now_raw, i64_32, "now.lo").unwrap(); ctx.builder.build_right_shift(time, i64_32, false, "now_lshr"),
let now_hi = ctx.builder.build_right_shift(now_raw, i64_32, false, "now.hi").unwrap();
let now_val = ctx.builder.build_or(now_lo, now_hi, "now_val").unwrap();
let time = ctx.builder.build_int_add(now_val, dt, "time").unwrap();
let time_hi = ctx.builder
.build_int_truncate(
ctx.builder.build_right_shift(time, i64_32, false, "time.hi").unwrap(),
i32_type, i32_type,
"now_trunc", "now_trunc",
) );
.unwrap(); let time_lo = ctx.builder.build_int_truncate(time, i32_type, "now_trunc");
let time_lo = ctx.builder.build_int_truncate(time, i32_type, "time.lo").unwrap(); let now_hiptr = ctx.builder.build_bitcast(
let now_hiptr = ctx.builder now,
.build_bitcast(now, i32_type.ptr_type(AddressSpace::default()), "now.hi.addr") i32_type.ptr_type(AddressSpace::Generic),
.map(BasicValueEnum::into_pointer_value) "now_bitcast",
.unwrap(); );
if let BasicValueEnum::PointerValue(now_hiptr) = now_hiptr {
let now_loptr = unsafe { let now_loptr = unsafe {
ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(1, false)], "now.lo.addr") ctx.builder.build_gep(now_hiptr, &[i32_type.const_int(1, false)], "now_gep")
}.unwrap(); };
ctx.builder ctx.builder
.build_store(now_hiptr, time_hi) .build_store(now_hiptr, time_hi)
.unwrap() .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) .unwrap();
.unwrap(); ctx.builder
ctx.builder .build_store(now_loptr, time_lo)
.build_store(now_loptr, time_lo) .set_atomic_ordering(AtomicOrdering::SequentiallyConsistent)
.unwrap() .unwrap();
.set_atomic_ordering(AtomicOrdering::SequentiallyConsistent) } else {
.unwrap(); unreachable!();
}
} else {
unreachable!();
}
} }
} }
@ -272,17 +267,14 @@ pub static NOW_PINNING_TIME_FNS: NowPinningTimeFns = NowPinningTimeFns {};
pub struct ExternTimeFns {} pub struct ExternTimeFns {}
impl TimeFns for ExternTimeFns { impl TimeFns for ExternTimeFns {
fn emit_now_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>) -> BasicValueEnum<'ctx> { fn emit_now_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>) -> BasicValueEnum<'ctx> {
let now_mu = ctx.module.get_function("now_mu").unwrap_or_else(|| { let now_mu = ctx.module.get_function("now_mu").unwrap_or_else(|| {
ctx.module.add_function("now_mu", ctx.ctx.i64_type().fn_type(&[], false), None) ctx.module.add_function("now_mu", ctx.ctx.i64_type().fn_type(&[], false), None)
}); });
ctx.builder.build_call(now_mu, &[], "now_mu") ctx.builder.build_call(now_mu, &[], "now_mu").try_as_basic_value().left().unwrap()
.map(CallSiteValue::try_as_basic_value)
.map(Either::unwrap_left)
.unwrap()
} }
fn emit_at_mu<'ctx>(&self, ctx: &mut CodeGenContext<'ctx, '_>, t: BasicValueEnum<'ctx>) { fn emit_at_mu<'ctx, 'a>(&self, ctx: &mut CodeGenContext<'ctx, 'a>, t: BasicValueEnum<'ctx>) {
let at_mu = ctx.module.get_function("at_mu").unwrap_or_else(|| { let at_mu = ctx.module.get_function("at_mu").unwrap_or_else(|| {
ctx.module.add_function( ctx.module.add_function(
"at_mu", "at_mu",
@ -290,12 +282,12 @@ impl TimeFns for ExternTimeFns {
None, None,
) )
}); });
ctx.builder.build_call(at_mu, &[t.into()], "at_mu").unwrap(); ctx.builder.build_call(at_mu, &[t.into()], "at_mu");
} }
fn emit_delay_mu<'ctx>( fn emit_delay_mu<'ctx, 'a>(
&self, &self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
dt: BasicValueEnum<'ctx>, dt: BasicValueEnum<'ctx>,
) { ) {
let delay_mu = ctx.module.get_function("delay_mu").unwrap_or_else(|| { let delay_mu = ctx.module.get_function("delay_mu").unwrap_or_else(|| {
@ -305,7 +297,7 @@ impl TimeFns for ExternTimeFns {
None, None,
) )
}); });
ctx.builder.build_call(delay_mu, &[dt.into()], "delay_mu").unwrap(); ctx.builder.build_call(delay_mu, &[dt.into()], "delay_mu");
} }
} }

View File

@ -2,7 +2,7 @@
name = "nac3ast" name = "nac3ast"
version = "0.1.0" version = "0.1.0"
authors = ["RustPython Team", "M-Labs"] authors = ["RustPython Team", "M-Labs"]
edition = "2021" edition = "2018"
[features] [features]
default = ["constant-optimization", "fold"] default = ["constant-optimization", "fold"]
@ -12,5 +12,5 @@ fold = []
[dependencies] [dependencies]
lazy_static = "1.4" lazy_static = "1.4"
parking_lot = "0.12" parking_lot = "0.12"
string-interner = "0.15" string-interner = "0.14"
fxhash = "0.2" fxhash = "0.2"

View File

@ -5,10 +5,10 @@ pub use crate::constant::*;
use std::{fmt, collections::HashMap, cell::RefCell}; use std::{fmt, collections::HashMap, cell::RefCell};
use parking_lot::{Mutex, MutexGuard}; use parking_lot::{Mutex, MutexGuard};
use string_interner::{DefaultBackend, StringInterner, symbol::SymbolU32}; use string_interner::{DefaultBackend, DefaultSymbol, StringInterner, symbol::SymbolU32};
use fxhash::FxBuildHasher; use fxhash::FxBuildHasher;
pub type Interner = StringInterner<DefaultBackend, FxBuildHasher>; pub type Interner = StringInterner<DefaultBackend<DefaultSymbol>, FxBuildHasher>;
lazy_static! { lazy_static! {
static ref INTERNER: Mutex<Interner> = Mutex::new(StringInterner::with_hasher(FxBuildHasher::default())); static ref INTERNER: Mutex<Interner> = Mutex::new(StringInterner::with_hasher(FxBuildHasher::default()));
} }

View File

@ -1,9 +1,8 @@
//! Datatypes to support source location information. //! Datatypes to support source location information.
use std::cmp::Ordering;
use crate::ast_gen::StrRef; use crate::ast_gen::StrRef;
use std::fmt; use std::fmt;
#[derive(Clone, Copy, Debug, Eq, PartialEq)] #[derive(Clone, Copy, Debug, PartialEq)]
pub struct FileName(pub StrRef); pub struct FileName(pub StrRef);
impl Default for FileName { impl Default for FileName {
fn default() -> Self { fn default() -> Self {
@ -18,7 +17,7 @@ impl From<String> for FileName {
} }
/// A location somewhere in the sourcecode. /// A location somewhere in the sourcecode.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] #[derive(Clone, Copy, Debug, Default, PartialEq)]
pub struct Location { pub struct Location {
pub row: usize, pub row: usize,
pub column: usize, pub column: usize,
@ -27,29 +26,7 @@ pub struct Location {
impl fmt::Display for Location { impl fmt::Display for Location {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}:{}:{}", self.file.0, self.row, self.column) write!(f, "{}: line {} column {}", self.file.0, self.row, self.column)
}
}
impl Ord for Location {
fn cmp(&self, other: &Self) -> Ordering {
let file_cmp = self.file.0.to_string().cmp(&other.file.0.to_string());
if file_cmp != Ordering::Equal {
return file_cmp
}
let row_cmp = self.row.cmp(&other.row);
if row_cmp != Ordering::Equal {
return row_cmp
}
self.column.cmp(&other.column)
}
}
impl PartialOrd for Location {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
} }
} }

View File

@ -2,25 +2,25 @@
name = "nac3core" name = "nac3core"
version = "0.1.0" version = "0.1.0"
authors = ["M-Labs"] authors = ["M-Labs"]
edition = "2021" edition = "2018"
[dependencies] [dependencies]
itertools = "0.12" itertools = "0.10"
crossbeam = "0.8" crossbeam = "0.8"
indexmap = "2.2"
parking_lot = "0.12" parking_lot = "0.12"
rayon = "1.8" rayon = "1.5"
nac3parser = { path = "../nac3parser" } nac3parser = { path = "../nac3parser" }
lazy_static = "1.4"
[dependencies.inkwell] [dependencies.inkwell]
version = "0.4" git = "https://github.com/TheDan64/inkwell.git"
default-features = false default-features = false
features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"] features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]
[dev-dependencies] [dev-dependencies]
test-case = "1.2.0" test-case = "1.2.0"
indoc = "2.0" indoc = "1.0"
insta = "=1.11.0" insta = "=1.11.0"
[build-dependencies] [build-dependencies]
regex = "1.10" regex = "1"

View File

@ -9,20 +9,19 @@ use std::{
fn main() { fn main() {
const FILE: &str = "src/codegen/irrt/irrt.c"; const FILE: &str = "src/codegen/irrt/irrt.c";
println!("cargo:rerun-if-changed={}", FILE);
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir);
/* /*
* HACK: Sadly, clang doesn't let us emit generic LLVM bitcode. * HACK: Sadly, clang doesn't let us emit generic LLVM bitcode.
* Compiling for WASM32 and filtering the output with regex is the closest we can get. * Compiling for WASM32 and filtering the output with regex is the closest we can get.
*/ */
let flags: &[&str] = &[
const FLAG: &[&str] = &[
"--target=wasm32", "--target=wasm32",
FILE, FILE,
"-fno-discard-value-names", "-O3",
match env::var("PROFILE").as_deref() {
Ok("debug") => "-O0",
Ok("release") => "-O3",
flavor => panic!("Unknown or missing build flavor {flavor:?}"),
},
"-emit-llvm", "-emit-llvm",
"-S", "-S",
"-Wall", "-Wall",
@ -30,13 +29,8 @@ fn main() {
"-o", "-o",
"-", "-",
]; ];
let output = Command::new("clang")
println!("cargo:rerun-if-changed={FILE}"); .args(FLAG)
let out_dir = env::var("OUT_DIR").unwrap();
let out_path = Path::new(&out_dir);
let output = Command::new("clang-irrt")
.args(flags)
.output() .output()
.map(|o| { .map(|o| {
assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap()); assert!(o.status.success(), "{}", std::str::from_utf8(&o.stderr).unwrap());
@ -48,9 +42,9 @@ fn main() {
let output = std::str::from_utf8(&output.stdout).unwrap().replace("\r\n", "\n"); let output = std::str::from_utf8(&output.stdout).unwrap().replace("\r\n", "\n");
let mut filtered_output = String::with_capacity(output.len()); let mut filtered_output = String::with_capacity(output.len());
let regex_filter = Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)").unwrap(); let regex_filter = regex::Regex::new(r"(?ms:^define.*?\}$)|(?m:^declare.*?$)").unwrap();
for f in regex_filter.captures_iter(&output) { for f in regex_filter.captures_iter(&output) {
assert_eq!(f.len(), 1); assert!(f.len() == 1);
filtered_output.push_str(&f[0]); filtered_output.push_str(&f[0]);
filtered_output.push('\n'); filtered_output.push('\n');
} }
@ -67,12 +61,12 @@ fn main() {
file.write_all(filtered_output.as_bytes()).unwrap(); file.write_all(filtered_output.as_bytes()).unwrap();
} }
let mut llvm_as = Command::new("llvm-as-irrt") let mut llvm_as = Command::new("llvm-as")
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.arg("-o") .arg("-o")
.arg(out_path.join("irrt.bc")) .arg(out_path.join("irrt.bc"))
.spawn() .spawn()
.unwrap(); .unwrap();
llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap(); llvm_as.stdin.as_mut().unwrap().write_all(filtered_output.as_bytes()).unwrap();
assert!(llvm_as.wait().unwrap().success()); assert!(llvm_as.wait().unwrap().success())
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,13 +3,12 @@ use crate::{
toplevel::DefinitionId, toplevel::DefinitionId,
typecheck::{ typecheck::{
type_inferencer::PrimitiveStore, type_inferencer::PrimitiveStore,
typedef::{FunSignature, FuncArg, Type, TypeEnum, Unifier, VarMap}, typedef::{FunSignature, FuncArg, Type, TypeEnum, Unifier},
}, },
}; };
use nac3parser::ast::StrRef; use nac3parser::ast::StrRef;
use std::collections::HashMap; use std::collections::HashMap;
use indexmap::IndexMap;
pub struct ConcreteTypeStore { pub struct ConcreteTypeStore {
store: Vec<ConcreteTypeEnum>, store: Vec<ConcreteTypeEnum>,
@ -51,7 +50,7 @@ pub enum ConcreteTypeEnum {
TObj { TObj {
obj_id: DefinitionId, obj_id: DefinitionId,
fields: HashMap<StrRef, (ConcreteType, bool)>, fields: HashMap<StrRef, (ConcreteType, bool)>,
params: IndexMap<u32, ConcreteType>, params: HashMap<u32, ConcreteType>,
}, },
TVirtual { TVirtual {
ty: ConcreteType, ty: ConcreteType,
@ -61,13 +60,9 @@ pub enum ConcreteTypeEnum {
ret: ConcreteType, ret: ConcreteType,
vars: HashMap<u32, ConcreteType>, vars: HashMap<u32, ConcreteType>,
}, },
TLiteral {
values: Vec<SymbolValue>,
},
} }
impl ConcreteTypeStore { impl ConcreteTypeStore {
#[must_use]
pub fn new() -> ConcreteTypeStore { pub fn new() -> ConcreteTypeStore {
ConcreteTypeStore { ConcreteTypeStore {
store: vec![ store: vec![
@ -85,7 +80,6 @@ impl ConcreteTypeStore {
} }
} }
#[must_use]
pub fn get(&self, cty: ConcreteType) -> &ConcreteTypeEnum { pub fn get(&self, cty: ConcreteType) -> &ConcreteTypeEnum {
&self.store[cty.0] &self.store[cty.0]
} }
@ -200,12 +194,9 @@ impl ConcreteTypeStore {
ty: self.from_unifier_type(unifier, primitives, *ty, cache), ty: self.from_unifier_type(unifier, primitives, *ty, cache),
}, },
TypeEnum::TFunc(signature) => { TypeEnum::TFunc(signature) => {
self.from_signature(unifier, primitives, signature, cache) self.from_signature(unifier, primitives, &*signature, cache)
} }
TypeEnum::TLiteral { values, .. } => ConcreteTypeEnum::TLiteral { _ => unreachable!(),
values: values.clone(),
},
_ => unreachable!("{:?}", ty_enum.get_type_name()),
}; };
let index = if let Some(ConcreteType(index)) = cache.get(&ty).unwrap() { let index = if let Some(ConcreteType(index)) = cache.get(&ty).unwrap() {
self.store[*index] = result; self.store[*index] = result;
@ -275,7 +266,7 @@ impl ConcreteTypeStore {
params: params params: params
.iter() .iter()
.map(|(id, cty)| (*id, self.to_unifier_type(unifier, primitives, *cty, cache))) .map(|(id, cty)| (*id, self.to_unifier_type(unifier, primitives, *cty, cache)))
.collect::<VarMap>(), .collect::<HashMap<_, _>>(),
}, },
ConcreteTypeEnum::TFunc { args, ret, vars } => TypeEnum::TFunc(FunSignature { ConcreteTypeEnum::TFunc { args, ret, vars } => TypeEnum::TFunc(FunSignature {
args: args args: args
@ -290,12 +281,8 @@ impl ConcreteTypeStore {
vars: vars vars: vars
.iter() .iter()
.map(|(id, cty)| (*id, self.to_unifier_type(unifier, primitives, *cty, cache))) .map(|(id, cty)| (*id, self.to_unifier_type(unifier, primitives, *cty, cache)))
.collect::<VarMap>(), .collect::<HashMap<_, _>>(),
}), }),
ConcreteTypeEnum::TLiteral { values, .. } => TypeEnum::TLiteral {
values: values.clone(),
loc: None,
}
}; };
let result = unifier.add_ty(result); let result = unifier.add_ty(result);
if let Some(ty) = cache.get(&cty).unwrap() { if let Some(ty) = cache.get(&cty).unwrap() {

File diff suppressed because it is too large Load Diff

View File

@ -1,613 +0,0 @@
use inkwell::attributes::{Attribute, AttributeLoc};
use inkwell::values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue};
use itertools::Either;
use crate::codegen::CodeGenContext;
/// Invokes the [`tan`](https://en.cppreference.com/w/c/numeric/math/tan) function.
pub fn call_tan<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "tan";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`asin`](https://en.cppreference.com/w/c/numeric/math/asin) function.
pub fn call_asin<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "asin";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`acos`](https://en.cppreference.com/w/c/numeric/math/acos) function.
pub fn call_acos<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "acos";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`atan`](https://en.cppreference.com/w/c/numeric/math/atan) function.
pub fn call_atan<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "atan";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`sinh`](https://en.cppreference.com/w/c/numeric/math/sinh) function.
pub fn call_sinh<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "sinh";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`cosh`](https://en.cppreference.com/w/c/numeric/math/cosh) function.
pub fn call_cosh<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "cosh";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`tanh`](https://en.cppreference.com/w/c/numeric/math/tanh) function.
pub fn call_tanh<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "tanh";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`asinh`](https://en.cppreference.com/w/c/numeric/math/asinh) function.
pub fn call_asinh<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "asinh";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`acosh`](https://en.cppreference.com/w/c/numeric/math/acosh) function.
pub fn call_acosh<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "acosh";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`atanh`](https://en.cppreference.com/w/c/numeric/math/atanh) function.
pub fn call_atanh<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "atanh";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`expm1`](https://en.cppreference.com/w/c/numeric/math/expm1) function.
pub fn call_expm1<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "expm1";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`cbrt`](https://en.cppreference.com/w/c/numeric/math/cbrt) function.
pub fn call_cbrt<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "cbrt";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nosync", "nounwind", "readonly", "willreturn"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`erf`](https://en.cppreference.com/w/c/numeric/math/erf) function.
pub fn call_erf<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "erf";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id("nounwind"), 0)
);
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`erfc`](https://en.cppreference.com/w/c/numeric/math/erfc) function.
pub fn call_erfc<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "erfc";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id("nounwind"), 0)
);
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`j1`](https://www.gnu.org/software/libc/manual/html_node/Special-Functions.html#index-j1)
/// function.
pub fn call_j1<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "j1";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id("nounwind"), 0)
);
func
});
ctx.builder
.build_call(extern_fn, &[arg.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`atan2`](https://en.cppreference.com/w/c/numeric/math/atan2) function.
pub fn call_atan2<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
y: FloatValue<'ctx>,
x: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "atan2";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(y.get_type(), llvm_f64);
debug_assert_eq!(x.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into(), llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn", "writeonly"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[y.into(), x.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`ldexp`](https://en.cppreference.com/w/c/numeric/math/ldexp) function.
pub fn call_ldexp<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
arg: FloatValue<'ctx>,
exp: IntValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "ldexp";
let llvm_f64 = ctx.ctx.f64_type();
let llvm_i32 = ctx.ctx.i32_type();
debug_assert_eq!(arg.get_type(), llvm_f64);
debug_assert_eq!(exp.get_type(), llvm_i32);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into(), llvm_i32.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
for attr in ["mustprogress", "nofree", "nounwind", "willreturn"] {
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id(attr), 0)
);
}
func
});
ctx.builder
.build_call(extern_fn, &[arg.into(), exp.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`hypot`](https://en.cppreference.com/w/c/numeric/math/hypot) function.
pub fn call_hypot<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
x: FloatValue<'ctx>,
y: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "hypot";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(x.get_type(), llvm_f64);
debug_assert_eq!(y.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into(), llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id("nounwind"), 0)
);
func
});
ctx.builder
.build_call(extern_fn, &[x.into(), y.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`nextafter`](https://en.cppreference.com/w/c/numeric/math/nextafter) function.
pub fn call_nextafter<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
from: FloatValue<'ctx>,
to: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
const FN_NAME: &str = "nextafter";
let llvm_f64 = ctx.ctx.f64_type();
debug_assert_eq!(from.get_type(), llvm_f64);
debug_assert_eq!(to.get_type(), llvm_f64);
let extern_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into(), llvm_f64.into()], false);
let func = ctx.module.add_function(FN_NAME, fn_type, None);
func.add_attribute(
AttributeLoc::Function,
ctx.ctx.create_enum_attribute(Attribute::get_named_enum_kind_id("nounwind"), 0)
);
func
});
ctx.builder
.build_call(extern_fn, &[from.into(), to.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}

View File

@ -1,5 +1,5 @@
use crate::{ use crate::{
codegen::{classes::ArraySliceValue, expr::*, stmt::*, bool_to_i1, bool_to_i8, CodeGenContext}, codegen::{expr::*, stmt::*, CodeGenContext},
symbol_resolver::ValueEnum, symbol_resolver::ValueEnum,
toplevel::{DefinitionId, TopLevelDef}, toplevel::{DefinitionId, TopLevelDef},
typecheck::typedef::{FunSignature, Type}, typecheck::typedef::{FunSignature, Type},
@ -7,7 +7,7 @@ use crate::{
use inkwell::{ use inkwell::{
context::Context, context::Context,
types::{BasicTypeEnum, IntType}, types::{BasicTypeEnum, IntType},
values::{BasicValueEnum, IntValue, PointerValue}, values::{BasicValueEnum, PointerValue},
}; };
use nac3parser::ast::{Expr, Stmt, StrRef}; use nac3parser::ast::{Expr, Stmt, StrRef};
@ -22,9 +22,9 @@ pub trait CodeGenerator {
/// - fun: Function signature and definition ID. /// - fun: Function signature and definition ID.
/// - params: Function parameters. Note that this does not include the object even if the /// - params: Function parameters. Note that this does not include the object even if the
/// function is a class method. /// function is a class method.
fn gen_call<'ctx>( fn gen_call<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
obj: Option<(Type, ValueEnum<'ctx>)>, obj: Option<(Type, ValueEnum<'ctx>)>,
fun: (&FunSignature, DefinitionId), fun: (&FunSignature, DefinitionId),
params: Vec<(Option<StrRef>, ValueEnum<'ctx>)>, params: Vec<(Option<StrRef>, ValueEnum<'ctx>)>,
@ -39,9 +39,9 @@ pub trait CodeGenerator {
/// - signature: Function signature of the constructor. /// - signature: Function signature of the constructor.
/// - def: Class definition for the constructor class. /// - def: Class definition for the constructor class.
/// - params: Function parameters. /// - params: Function parameters.
fn gen_constructor<'ctx>( fn gen_constructor<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
signature: &FunSignature, signature: &FunSignature,
def: &TopLevelDef, def: &TopLevelDef,
params: Vec<(Option<StrRef>, ValueEnum<'ctx>)>, params: Vec<(Option<StrRef>, ValueEnum<'ctx>)>,
@ -59,20 +59,20 @@ pub trait CodeGenerator {
/// function is a class method. /// function is a class method.
/// Note that this function should check if the function is generated in another thread (due to /// Note that this function should check if the function is generated in another thread (due to
/// possible race condition), see the default implementation for an example. /// possible race condition), see the default implementation for an example.
fn gen_func_instance<'ctx>( fn gen_func_instance<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
obj: Option<(Type, ValueEnum<'ctx>)>, obj: Option<(Type, ValueEnum<'ctx>)>,
fun: (&FunSignature, &mut TopLevelDef, String), fun: (&FunSignature, &mut TopLevelDef, String),
id: usize, id: usize,
) -> Result<String, String> { ) -> Result<String, String> {
gen_func_instance(ctx, &obj, fun, id) gen_func_instance(ctx, obj, fun, id)
} }
/// Generate the code for an expression. /// Generate the code for an expression.
fn gen_expr<'ctx>( fn gen_expr<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
expr: &Expr<Option<Type>>, expr: &Expr<Option<Type>>,
) -> Result<Option<ValueEnum<'ctx>>, String> ) -> Result<Option<ValueEnum<'ctx>>, String>
where where
@ -83,44 +83,30 @@ pub trait CodeGenerator {
/// Allocate memory for a variable and return a pointer pointing to it. /// Allocate memory for a variable and return a pointer pointing to it.
/// The default implementation places the allocations at the start of the function. /// The default implementation places the allocations at the start of the function.
fn gen_var_alloc<'ctx>( fn gen_var_alloc<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
ty: BasicTypeEnum<'ctx>, ty: BasicTypeEnum<'ctx>,
name: Option<&str>,
) -> Result<PointerValue<'ctx>, String> { ) -> Result<PointerValue<'ctx>, String> {
gen_var(ctx, ty, name) gen_var(ctx, ty)
}
/// Allocate memory for a variable and return a pointer pointing to it.
/// The default implementation places the allocations at the start of the function.
fn gen_array_var_alloc<'ctx>(
&mut self,
ctx: &mut CodeGenContext<'ctx, '_>,
ty: BasicTypeEnum<'ctx>,
size: IntValue<'ctx>,
name: Option<&'ctx str>,
) -> Result<ArraySliceValue<'ctx>, String> {
gen_array_var(ctx, ty, size, name)
} }
/// Return a pointer pointing to the target of the expression. /// Return a pointer pointing to the target of the expression.
fn gen_store_target<'ctx>( fn gen_store_target<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
pattern: &Expr<Option<Type>>, pattern: &Expr<Option<Type>>,
name: Option<&str>, ) -> Result<PointerValue<'ctx>, String>
) -> Result<Option<PointerValue<'ctx>>, String>
where where
Self: Sized, Self: Sized,
{ {
gen_store_target(self, ctx, pattern, name) gen_store_target(self, ctx, pattern)
} }
/// Generate code for an assignment expression. /// Generate code for an assignment expression.
fn gen_assign<'ctx>( fn gen_assign<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
target: &Expr<Option<Type>>, target: &Expr<Option<Type>>,
value: ValueEnum<'ctx>, value: ValueEnum<'ctx>,
) -> Result<(), String> ) -> Result<(), String>
@ -132,9 +118,9 @@ pub trait CodeGenerator {
/// Generate code for a while expression. /// Generate code for a while expression.
/// Return true if the while loop must early return /// Return true if the while loop must early return
fn gen_while( fn gen_while<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
stmt: &Stmt<Option<Type>>, stmt: &Stmt<Option<Type>>,
) -> Result<(), String> ) -> Result<(), String>
where where
@ -145,9 +131,9 @@ pub trait CodeGenerator {
/// Generate code for a while expression. /// Generate code for a while expression.
/// Return true if the while loop must early return /// Return true if the while loop must early return
fn gen_for( fn gen_for<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
stmt: &Stmt<Option<Type>>, stmt: &Stmt<Option<Type>>,
) -> Result<(), String> ) -> Result<(), String>
where where
@ -158,9 +144,9 @@ pub trait CodeGenerator {
/// Generate code for an if expression. /// Generate code for an if expression.
/// Return true if the statement must early return /// Return true if the statement must early return
fn gen_if( fn gen_if<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
stmt: &Stmt<Option<Type>>, stmt: &Stmt<Option<Type>>,
) -> Result<(), String> ) -> Result<(), String>
where where
@ -169,9 +155,9 @@ pub trait CodeGenerator {
gen_if(self, ctx, stmt) gen_if(self, ctx, stmt)
} }
fn gen_with( fn gen_with<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
stmt: &Stmt<Option<Type>>, stmt: &Stmt<Option<Type>>,
) -> Result<(), String> ) -> Result<(), String>
where where
@ -181,11 +167,10 @@ pub trait CodeGenerator {
} }
/// Generate code for a statement /// Generate code for a statement
///
/// Return true if the statement must early return /// Return true if the statement must early return
fn gen_stmt( fn gen_stmt<'ctx, 'a>(
&mut self, &mut self,
ctx: &mut CodeGenContext<'_, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
stmt: &Stmt<Option<Type>>, stmt: &Stmt<Option<Type>>,
) -> Result<(), String> ) -> Result<(), String>
where where
@ -193,36 +178,6 @@ pub trait CodeGenerator {
{ {
gen_stmt(self, ctx, stmt) gen_stmt(self, ctx, stmt)
} }
/// Generates code for a block statement.
fn gen_block<'a, I: Iterator<Item = &'a Stmt<Option<Type>>>>(
&mut self,
ctx: &mut CodeGenContext<'_, '_>,
stmts: I,
) -> Result<(), String>
where
Self: Sized,
{
gen_block(self, ctx, stmts)
}
/// See [`bool_to_i1`].
fn bool_to_i1<'ctx>(
&self,
ctx: &CodeGenContext<'ctx, '_>,
bool_value: IntValue<'ctx>
) -> IntValue<'ctx> {
bool_to_i1(&ctx.builder, bool_value)
}
/// See [`bool_to_i8`].
fn bool_to_i8<'ctx>(
&self,
ctx: &CodeGenContext<'ctx, '_>,
bool_value: IntValue<'ctx>
) -> IntValue<'ctx> {
bool_to_i8(&ctx.builder, ctx.ctx, bool_value)
}
} }
pub struct DefaultCodeGenerator { pub struct DefaultCodeGenerator {
@ -231,21 +186,17 @@ pub struct DefaultCodeGenerator {
} }
impl DefaultCodeGenerator { impl DefaultCodeGenerator {
#[must_use]
pub fn new(name: String, size_t: u32) -> DefaultCodeGenerator { pub fn new(name: String, size_t: u32) -> DefaultCodeGenerator {
assert!(matches!(size_t, 32 | 64)); assert!(size_t == 32 || size_t == 64);
DefaultCodeGenerator { name, size_t } DefaultCodeGenerator { name, size_t }
} }
} }
impl CodeGenerator for DefaultCodeGenerator { impl CodeGenerator for DefaultCodeGenerator {
/// Returns the name for this [`CodeGenerator`].
fn get_name(&self) -> &str { fn get_name(&self) -> &str {
&self.name &self.name
} }
/// Returns an LLVM integer type representing `size_t`.
fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx> { fn get_size_type<'ctx>(&self, ctx: &'ctx Context) -> IntType<'ctx> {
// it should be unsigned, but we don't really need unsigned and this could save us from // it should be unsigned, but we don't really need unsigned and this could save us from
// having to do a bit cast... // having to do a bit cast...

View File

@ -1,15 +1,13 @@
typedef _BitInt(8) int8_t; typedef _ExtInt(8) int8_t;
typedef unsigned _BitInt(8) uint8_t; typedef unsigned _ExtInt(8) uint8_t;
typedef _BitInt(32) int32_t; typedef _ExtInt(32) int32_t;
typedef unsigned _BitInt(32) uint32_t; typedef unsigned _ExtInt(32) uint32_t;
typedef _BitInt(64) int64_t; typedef _ExtInt(64) int64_t;
typedef unsigned _BitInt(64) uint64_t; typedef unsigned _ExtInt(64) uint64_t;
# define MAX(a, b) (a > b ? a : b) # define MAX(a, b) (a > b ? a : b)
# define MIN(a, b) (a > b ? b : a) # define MIN(a, b) (a > b ? b : a)
# define NULL ((void *) 0)
// adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c // adapted from GNU Scientific Library: https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
// need to make sure `exp >= 0` before calling this function // need to make sure `exp >= 0` before calling this function
#define DEF_INT_EXP(T) T __nac3_int_exp_##T( \ #define DEF_INT_EXP(T) T __nac3_int_exp_##T( \
@ -139,243 +137,4 @@ int32_t __nac3_list_slice_assign_var_size(
return dest_arr_len - (dest_end - dest_ind) - 1; return dest_arr_len - (dest_end - dest_ind) - 1;
} }
return dest_arr_len; return dest_arr_len;
} }
int32_t __nac3_isinf(double x) {
return __builtin_isinf(x);
}
int32_t __nac3_isnan(double x) {
return __builtin_isnan(x);
}
double tgamma(double arg);
double __nac3_gamma(double z) {
// Handling for denormals
// | x | Python gamma(x) | C tgamma(x) |
// --- | ----------------- | --------------- | ----------- |
// (1) | nan | nan | nan |
// (2) | -inf | -inf | inf |
// (3) | inf | inf | inf |
// (4) | 0.0 | inf | inf |
// (5) | {-1.0, -2.0, ...} | inf | nan |
// (1)-(3)
if (__builtin_isinf(z) || __builtin_isnan(z)) {
return z;
}
double v = tgamma(z);
// (4)-(5)
return __builtin_isinf(v) || __builtin_isnan(v) ? __builtin_inf() : v;
}
double lgamma(double arg);
double __nac3_gammaln(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: gammaln(-inf) -> -inf
// - libm : lgamma(-inf) -> inf
if (__builtin_isinf(x)) {
return x;
}
return lgamma(x);
}
double j0(double x);
double __nac3_j0(double x) {
// libm's handling of value overflows differs from scipy:
// - scipy: j0(inf) -> nan
// - libm : j0(inf) -> 0.0
if (__builtin_isinf(x)) {
return __builtin_nan("");
}
return j0(x);
}
uint32_t __nac3_ndarray_calc_size(
const uint64_t *list_data,
uint32_t list_len
) {
uint32_t num_elems = 1;
for (uint32_t i = 0; i < list_len; ++i) {
uint64_t val = list_data[i];
__builtin_assume(val > 0);
num_elems *= val;
}
return num_elems;
}
uint64_t __nac3_ndarray_calc_size64(
const uint64_t *list_data,
uint64_t list_len
) {
uint64_t num_elems = 1;
for (uint64_t i = 0; i < list_len; ++i) {
uint64_t val = list_data[i];
__builtin_assume(val > 0);
num_elems *= val;
}
return num_elems;
}
void __nac3_ndarray_calc_nd_indices(
uint32_t index,
const uint32_t* dims,
uint32_t num_dims,
uint32_t* idxs
) {
uint32_t stride = 1;
for (uint32_t dim = 0; dim < num_dims; dim++) {
uint32_t i = num_dims - dim - 1;
__builtin_assume(dims[i] > 0);
idxs[i] = (index / stride) % dims[i];
stride *= dims[i];
}
}
void __nac3_ndarray_calc_nd_indices64(
uint64_t index,
const uint64_t* dims,
uint64_t num_dims,
uint32_t* idxs
) {
uint64_t stride = 1;
for (uint64_t dim = 0; dim < num_dims; dim++) {
uint64_t i = num_dims - dim - 1;
__builtin_assume(dims[i] > 0);
idxs[i] = (uint32_t) ((index / stride) % dims[i]);
stride *= dims[i];
}
}
uint32_t __nac3_ndarray_flatten_index(
const uint32_t* dims,
uint32_t num_dims,
const uint32_t* indices,
uint32_t num_indices
) {
uint32_t idx = 0;
uint32_t stride = 1;
for (uint32_t i = 0; i < num_dims; ++i) {
uint32_t ri = num_dims - i - 1;
if (ri < num_indices) {
idx += (stride * indices[ri]);
}
__builtin_assume(dims[i] > 0);
stride *= dims[ri];
}
return idx;
}
uint64_t __nac3_ndarray_flatten_index64(
const uint64_t* dims,
uint64_t num_dims,
const uint32_t* indices,
uint64_t num_indices
) {
uint64_t idx = 0;
uint64_t stride = 1;
for (uint64_t i = 0; i < num_dims; ++i) {
uint64_t ri = num_dims - i - 1;
if (ri < num_indices) {
idx += (stride * indices[ri]);
}
__builtin_assume(dims[i] > 0);
stride *= dims[ri];
}
return idx;
}
void __nac3_ndarray_calc_broadcast(
const uint32_t *lhs_dims,
uint32_t lhs_ndims,
const uint32_t *rhs_dims,
uint32_t rhs_ndims,
uint32_t *out_dims
) {
uint32_t max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
for (uint32_t i = 0; i < max_ndims; ++i) {
uint32_t *lhs_dim_sz = i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : NULL;
uint32_t *rhs_dim_sz = i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : NULL;
uint32_t *out_dim = &out_dims[max_ndims - i - 1];
if (lhs_dim_sz == NULL) {
*out_dim = *rhs_dim_sz;
} else if (rhs_dim_sz == NULL) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == 1) {
*out_dim = *rhs_dim_sz;
} else if (*rhs_dim_sz == 1) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == *rhs_dim_sz) {
*out_dim = *lhs_dim_sz;
} else {
__builtin_unreachable();
}
}
}
void __nac3_ndarray_calc_broadcast64(
const uint64_t *lhs_dims,
uint64_t lhs_ndims,
const uint64_t *rhs_dims,
uint64_t rhs_ndims,
uint64_t *out_dims
) {
uint64_t max_ndims = lhs_ndims > rhs_ndims ? lhs_ndims : rhs_ndims;
for (uint64_t i = 0; i < max_ndims; ++i) {
uint64_t *lhs_dim_sz = i < lhs_ndims ? &lhs_dims[lhs_ndims - i - 1] : NULL;
uint64_t *rhs_dim_sz = i < rhs_ndims ? &rhs_dims[rhs_ndims - i - 1] : NULL;
uint64_t *out_dim = &out_dims[max_ndims - i - 1];
if (lhs_dim_sz == NULL) {
*out_dim = *rhs_dim_sz;
} else if (rhs_dim_sz == NULL) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == 1) {
*out_dim = *rhs_dim_sz;
} else if (*rhs_dim_sz == 1) {
*out_dim = *lhs_dim_sz;
} else if (*lhs_dim_sz == *rhs_dim_sz) {
*out_dim = *lhs_dim_sz;
} else {
__builtin_unreachable();
}
}
}
void __nac3_ndarray_calc_broadcast_idx(
const uint32_t *src_dims,
uint32_t src_ndims,
const uint32_t *in_idx,
uint32_t *out_idx
) {
for (uint32_t i = 0; i < src_ndims; ++i) {
uint32_t src_i = src_ndims - i - 1;
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : in_idx[src_i];
}
}
void __nac3_ndarray_calc_broadcast_idx64(
const uint64_t *src_dims,
uint64_t src_ndims,
const uint32_t *in_idx,
uint32_t *out_idx
) {
for (uint64_t i = 0; i < src_ndims; ++i) {
uint64_t src_i = src_ndims - i - 1;
out_idx[src_i] = src_dims[src_i] == 1 ? 0 : (uint32_t) in_idx[src_i];
}
}

View File

@ -1,34 +1,17 @@
use crate::typecheck::typedef::Type; use crate::typecheck::typedef::Type;
use super::{ use super::{CodeGenContext, CodeGenerator};
classes::{
ArrayLikeIndexer,
ArrayLikeValue,
ArraySliceValue,
ListValue,
NDArrayValue,
TypedArrayLikeAdapter,
UntypedArrayLikeAccessor,
},
CodeGenContext,
CodeGenerator,
llvm_intrinsics,
};
use inkwell::{ use inkwell::{
attributes::{Attribute, AttributeLoc}, attributes::{Attribute, AttributeLoc},
context::Context, context::Context,
memory_buffer::MemoryBuffer, memory_buffer::MemoryBuffer,
module::Module, module::Module,
types::{BasicTypeEnum, IntType}, types::BasicTypeEnum,
values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue}, values::{IntValue, PointerValue},
AddressSpace, IntPredicate, AddressSpace, IntPredicate,
}; };
use itertools::Either;
use nac3parser::ast::Expr; use nac3parser::ast::Expr;
use crate::codegen::classes::TypedArrayLikeAccessor;
use crate::codegen::stmt::gen_for_callback_incrementing;
#[must_use]
pub fn load_irrt(ctx: &Context) -> Module { pub fn load_irrt(ctx: &Context) -> Module {
let bitcode_buf = MemoryBuffer::create_from_memory_range( let bitcode_buf = MemoryBuffer::create_from_memory_range(
include_bytes!(concat!(env!("OUT_DIR"), "/irrt.bc")), include_bytes!(concat!(env!("OUT_DIR"), "/irrt.bc")),
@ -50,9 +33,9 @@ pub fn load_irrt(ctx: &Context) -> Module {
// repeated squaring method adapted from GNU Scientific Library: // repeated squaring method adapted from GNU Scientific Library:
// https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c // https://git.savannah.gnu.org/cgit/gsl.git/tree/sys/pow_int.c
pub fn integer_power<'ctx, G: CodeGenerator + ?Sized>( pub fn integer_power<'ctx, 'a>(
generator: &mut G, generator: &mut dyn CodeGenerator,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
base: IntValue<'ctx>, base: IntValue<'ctx>,
exp: IntValue<'ctx>, exp: IntValue<'ctx>,
signed: bool, signed: bool,
@ -75,7 +58,7 @@ pub fn integer_power<'ctx, G: CodeGenerator + ?Sized>(
exp, exp,
exp.get_type().const_zero(), exp.get_type().const_zero(),
"assert_int_pow_ge_0", "assert_int_pow_ge_0",
).unwrap(); );
ctx.make_assert( ctx.make_assert(
generator, generator,
ge_zero, ge_zero,
@ -86,15 +69,14 @@ pub fn integer_power<'ctx, G: CodeGenerator + ?Sized>(
); );
ctx.builder ctx.builder
.build_call(pow_fun, &[base.into(), exp.into()], "call_int_pow") .build_call(pow_fun, &[base.into(), exp.into()], "call_int_pow")
.map(CallSiteValue::try_as_basic_value) .try_as_basic_value()
.map(|v| v.map_left(BasicValueEnum::into_int_value)) .unwrap_left()
.map(Either::unwrap_left) .into_int_value()
.unwrap()
} }
pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>( pub fn calculate_len_for_slice_range<'ctx, 'a>(
generator: &mut G, generator: &mut dyn CodeGenerator,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
start: IntValue<'ctx>, start: IntValue<'ctx>,
end: IntValue<'ctx>, end: IntValue<'ctx>,
step: IntValue<'ctx>, step: IntValue<'ctx>,
@ -112,7 +94,7 @@ pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>(
step, step,
step.get_type().const_zero(), step.get_type().const_zero(),
"range_step_ne", "range_step_ne",
).unwrap(); );
ctx.make_assert( ctx.make_assert(
generator, generator,
not_zero, not_zero,
@ -123,10 +105,10 @@ pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>(
); );
ctx.builder ctx.builder
.build_call(len_func, &[start.into(), end.into(), step.into()], "calc_len") .build_call(len_func, &[start.into(), end.into(), step.into()], "calc_len")
.map(CallSiteValue::try_as_basic_value) .try_as_basic_value()
.map(|v| v.map_left(BasicValueEnum::into_int_value)) .left()
.map(Either::unwrap_left)
.unwrap() .unwrap()
.into_int_value()
} }
/// NOTE: the output value of the end index of this function should be compared ***inclusively***, /// NOTE: the output value of the end index of this function should be compared ***inclusively***,
@ -169,55 +151,47 @@ pub fn calculate_len_for_slice_range<'ctx, G: CodeGenerator + ?Sized>(
/// ,step /// ,step
/// ) /// )
/// ``` /// ```
pub fn handle_slice_indices<'ctx, G: CodeGenerator>( pub fn handle_slice_indices<'a, 'ctx, G: CodeGenerator>(
start: &Option<Box<Expr<Option<Type>>>>, start: &Option<Box<Expr<Option<Type>>>>,
end: &Option<Box<Expr<Option<Type>>>>, end: &Option<Box<Expr<Option<Type>>>>,
step: &Option<Box<Expr<Option<Type>>>>, step: &Option<Box<Expr<Option<Type>>>>,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
generator: &mut G, generator: &mut G,
list: ListValue<'ctx>, list: PointerValue<'ctx>,
) -> Result<Option<(IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>)>, String> { ) -> Result<(IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>), String> {
let int32 = ctx.ctx.i32_type(); let int32 = ctx.ctx.i32_type();
let zero = int32.const_zero(); let zero = int32.const_zero();
let one = int32.const_int(1, false); let one = int32.const_int(1, false);
let length = list.load_size(ctx, Some("length")); let length = ctx.build_gep_and_load(list, &[zero, one]).into_int_value();
let length = ctx.builder.build_int_truncate_or_bit_cast(length, int32, "leni32").unwrap(); let length = ctx.builder.build_int_truncate_or_bit_cast(length, int32, "leni32");
Ok(Some(match (start, end, step) { Ok(match (start, end, step) {
(s, e, None) => ( (s, e, None) => (
if let Some(s) = s.as_ref() { s.as_ref().map_or_else(
match handle_slice_index_bound(s, ctx, generator, length)? { || Ok(int32.const_zero()),
Some(v) => v, |s| handle_slice_index_bound(s, ctx, generator, length),
None => return Ok(None), )?,
}
} else {
int32.const_zero()
},
{ {
let e = if let Some(s) = e.as_ref() { let e = e.as_ref().map_or_else(
match handle_slice_index_bound(s, ctx, generator, length)? { || Ok(length),
Some(v) => v, |e| handle_slice_index_bound(e, ctx, generator, length),
None => return Ok(None), )?;
} ctx.builder.build_int_sub(e, one, "final_end")
} else {
length
};
ctx.builder.build_int_sub(e, one, "final_end").unwrap()
}, },
one, one,
), ),
(s, e, Some(step)) => { (s, e, Some(step)) => {
let step = if let Some(v) = generator.gen_expr(ctx, step)? { let step = generator
v.to_basic_value_enum(ctx, generator, ctx.primitives.int32)?.into_int_value() .gen_expr(ctx, step)?
} else { .unwrap()
return Ok(None) .to_basic_value_enum(ctx, generator, ctx.primitives.int32)?
}; .into_int_value();
// assert step != 0, throw exception if not // assert step != 0, throw exception if not
let not_zero = ctx.builder.build_int_compare( let not_zero = ctx.builder.build_int_compare(
IntPredicate::NE, IntPredicate::NE,
step, step,
step.get_type().const_zero(), step.get_type().const_zero(),
"range_step_ne", "range_step_ne",
).unwrap(); );
ctx.make_assert( ctx.make_assert(
generator, generator,
not_zero, not_zero,
@ -226,14 +200,12 @@ pub fn handle_slice_indices<'ctx, G: CodeGenerator>(
[None, None, None], [None, None, None],
ctx.current_loc, ctx.current_loc,
); );
let len_id = ctx.builder.build_int_sub(length, one, "lenmin1").unwrap(); let len_id = ctx.builder.build_int_sub(length, one, "lenmin1");
let neg = ctx.builder.build_int_compare(IntPredicate::SLT, step, zero, "step_is_neg").unwrap(); let neg = ctx.builder.build_int_compare(IntPredicate::SLT, step, zero, "step_is_neg");
( (
match s { match s {
Some(s) => { Some(s) => {
let Some(s) = handle_slice_index_bound(s, ctx, generator, length)? else { let s = handle_slice_index_bound(s, ctx, generator, length)?;
return Ok(None)
};
ctx.builder ctx.builder
.build_select( .build_select(
ctx.builder.build_and( ctx.builder.build_and(
@ -242,54 +214,46 @@ pub fn handle_slice_indices<'ctx, G: CodeGenerator>(
s, s,
length, length,
"s_eq_len", "s_eq_len",
).unwrap(), ),
neg, neg,
"should_minus_one", "should_minus_one",
).unwrap(), ),
ctx.builder.build_int_sub(s, one, "s_min").unwrap(), ctx.builder.build_int_sub(s, one, "s_min"),
s, s,
"final_start", "final_start",
) )
.map(BasicValueEnum::into_int_value) .into_int_value()
.unwrap()
} }
None => ctx.builder.build_select(neg, len_id, zero, "stt") None => ctx.builder.build_select(neg, len_id, zero, "stt").into_int_value(),
.map(BasicValueEnum::into_int_value)
.unwrap(),
}, },
match e { match e {
Some(e) => { Some(e) => {
let Some(e) = handle_slice_index_bound(e, ctx, generator, length)? else { let e = handle_slice_index_bound(e, ctx, generator, length)?;
return Ok(None)
};
ctx.builder ctx.builder
.build_select( .build_select(
neg, neg,
ctx.builder.build_int_add(e, one, "end_add_one").unwrap(), ctx.builder.build_int_add(e, one, "end_add_one"),
ctx.builder.build_int_sub(e, one, "end_sub_one").unwrap(), ctx.builder.build_int_sub(e, one, "end_sub_one"),
"final_end", "final_end",
) )
.map(BasicValueEnum::into_int_value) .into_int_value()
.unwrap()
} }
None => ctx.builder.build_select(neg, zero, len_id, "end") None => ctx.builder.build_select(neg, zero, len_id, "end").into_int_value(),
.map(BasicValueEnum::into_int_value)
.unwrap(),
}, },
step, step,
) )
} }
})) })
} }
/// this function allows index out of range, since python /// this function allows index out of range, since python
/// allows index out of range in slice (`a = [1,2,3]; a[1:10] == [2,3]`). /// allows index out of range in slice (`a = [1,2,3]; a[1:10] == [2,3]`).
pub fn handle_slice_index_bound<'ctx, G: CodeGenerator>( pub fn handle_slice_index_bound<'a, 'ctx, G: CodeGenerator>(
i: &Expr<Option<Type>>, i: &Expr<Option<Type>>,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
generator: &mut G, generator: &mut G,
length: IntValue<'ctx>, length: IntValue<'ctx>,
) -> Result<Option<IntValue<'ctx>>, String> { ) -> Result<IntValue<'ctx>, String> {
const SYMBOL: &str = "__nac3_slice_index_bound"; const SYMBOL: &str = "__nac3_slice_index_bound";
let func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| { let func = ctx.module.get_function(SYMBOL).unwrap_or_else(|| {
let i32_t = ctx.ctx.i32_type(); let i32_t = ctx.ctx.i32_type();
@ -297,34 +261,30 @@ pub fn handle_slice_index_bound<'ctx, G: CodeGenerator>(
ctx.module.add_function(SYMBOL, fn_t, None) ctx.module.add_function(SYMBOL, fn_t, None)
}); });
let i = if let Some(v) = generator.gen_expr(ctx, i)? { let i = generator.gen_expr(ctx, i)?.unwrap().to_basic_value_enum(ctx, generator, i.custom.unwrap())?;
v.to_basic_value_enum(ctx, generator, i.custom.unwrap())? Ok(ctx
} else {
return Ok(None)
};
Ok(Some(ctx
.builder .builder
.build_call(func, &[i.into(), length.into()], "bounded_ind") .build_call(func, &[i.into(), length.into()], "bounded_ind")
.map(CallSiteValue::try_as_basic_value) .try_as_basic_value()
.map(|v| v.map_left(BasicValueEnum::into_int_value)) .left()
.map(Either::unwrap_left) .unwrap()
.unwrap())) .into_int_value())
} }
/// This function handles 'end' **inclusively**. /// This function handles 'end' **inclusively**.
/// Order of tuples `assign_idx` and `value_idx` is ('start', 'end', 'step'). /// Order of tuples assign_idx and value_idx is ('start', 'end', 'step').
/// Negative index should be handled before entering this function /// Negative index should be handled before entering this function
pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>( pub fn list_slice_assignment<'ctx, 'a>(
generator: &mut G, generator: &mut dyn CodeGenerator,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
ty: BasicTypeEnum<'ctx>, ty: BasicTypeEnum<'ctx>,
dest_arr: ListValue<'ctx>, dest_arr: PointerValue<'ctx>,
dest_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>), dest_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
src_arr: ListValue<'ctx>, src_arr: PointerValue<'ctx>,
src_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>), src_idx: (IntValue<'ctx>, IntValue<'ctx>, IntValue<'ctx>),
) { ) {
let size_ty = generator.get_size_type(ctx.ctx); let size_ty = generator.get_size_type(ctx.ctx);
let int8_ptr = ctx.ctx.i8_type().ptr_type(AddressSpace::default()); let int8_ptr = ctx.ctx.i8_type().ptr_type(AddressSpace::Generic);
let int32 = ctx.ctx.i32_type(); let int32 = ctx.ctx.i32_type();
let (fun_symbol, elem_ptr_type) = ("__nac3_list_slice_assign_var_size", int8_ptr); let (fun_symbol, elem_ptr_type) = ("__nac3_list_slice_assign_var_size", int8_ptr);
let slice_assign_fun = { let slice_assign_fun = {
@ -349,22 +309,22 @@ pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
let zero = int32.const_zero(); let zero = int32.const_zero();
let one = int32.const_int(1, false); let one = int32.const_int(1, false);
let dest_arr_ptr = dest_arr.data().base_ptr(ctx, generator); let dest_arr_ptr = ctx.build_gep_and_load(dest_arr, &[zero, zero]);
let dest_arr_ptr = ctx.builder.build_pointer_cast( let dest_arr_ptr = ctx.builder.build_pointer_cast(
dest_arr_ptr, dest_arr_ptr.into_pointer_value(),
elem_ptr_type, elem_ptr_type,
"dest_arr_ptr_cast", "dest_arr_ptr_cast",
).unwrap(); );
let dest_len = dest_arr.load_size(ctx, Some("dest.len")); let dest_len = ctx.build_gep_and_load(dest_arr, &[zero, one]).into_int_value();
let dest_len = ctx.builder.build_int_truncate_or_bit_cast(dest_len, int32, "srclen32").unwrap(); let dest_len = ctx.builder.build_int_truncate_or_bit_cast(dest_len, int32, "srclen32");
let src_arr_ptr = src_arr.data().base_ptr(ctx, generator); let src_arr_ptr = ctx.build_gep_and_load(src_arr, &[zero, zero]);
let src_arr_ptr = ctx.builder.build_pointer_cast( let src_arr_ptr = ctx.builder.build_pointer_cast(
src_arr_ptr, src_arr_ptr.into_pointer_value(),
elem_ptr_type, elem_ptr_type,
"src_arr_ptr_cast", "src_arr_ptr_cast",
).unwrap(); );
let src_len = src_arr.load_size(ctx, Some("src.len")); let src_len = ctx.build_gep_and_load(src_arr, &[zero, one]).into_int_value();
let src_len = ctx.builder.build_int_truncate_or_bit_cast(src_len, int32, "srclen32").unwrap(); let src_len = ctx.builder.build_int_truncate_or_bit_cast(src_len, int32, "srclen32");
// index in bound and positive should be done // index in bound and positive should be done
// assert if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest), and // assert if dest.step == 1 then len(src) <= len(dest) else len(src) == len(dest), and
@ -372,31 +332,29 @@ pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
let src_end = ctx.builder let src_end = ctx.builder
.build_select( .build_select(
ctx.builder.build_int_compare( ctx.builder.build_int_compare(
IntPredicate::SLT, inkwell::IntPredicate::SLT,
src_idx.2, src_idx.2,
zero, zero,
"is_neg", "is_neg",
).unwrap(), ),
ctx.builder.build_int_sub(src_idx.1, one, "e_min_one").unwrap(), ctx.builder.build_int_sub(src_idx.1, one, "e_min_one"),
ctx.builder.build_int_add(src_idx.1, one, "e_add_one").unwrap(), ctx.builder.build_int_add(src_idx.1, one, "e_add_one"),
"final_e", "final_e",
) )
.map(BasicValueEnum::into_int_value) .into_int_value();
.unwrap();
let dest_end = ctx.builder let dest_end = ctx.builder
.build_select( .build_select(
ctx.builder.build_int_compare( ctx.builder.build_int_compare(
IntPredicate::SLT, inkwell::IntPredicate::SLT,
dest_idx.2, dest_idx.2,
zero, zero,
"is_neg", "is_neg",
).unwrap(), ),
ctx.builder.build_int_sub(dest_idx.1, one, "e_min_one").unwrap(), ctx.builder.build_int_sub(dest_idx.1, one, "e_min_one"),
ctx.builder.build_int_add(dest_idx.1, one, "e_add_one").unwrap(), ctx.builder.build_int_add(dest_idx.1, one, "e_add_one"),
"final_e", "final_e",
) )
.map(BasicValueEnum::into_int_value) .into_int_value();
.unwrap();
let src_slice_len = let src_slice_len =
calculate_len_for_slice_range(generator, ctx, src_idx.0, src_end, src_idx.2); calculate_len_for_slice_range(generator, ctx, src_idx.0, src_end, src_idx.2);
let dest_slice_len = let dest_slice_len =
@ -406,21 +364,21 @@ pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
src_slice_len, src_slice_len,
dest_slice_len, dest_slice_len,
"slice_src_eq_dest", "slice_src_eq_dest",
).unwrap(); );
let src_slt_dest = ctx.builder.build_int_compare( let src_slt_dest = ctx.builder.build_int_compare(
IntPredicate::SLT, IntPredicate::SLT,
src_slice_len, src_slice_len,
dest_slice_len, dest_slice_len,
"slice_src_slt_dest", "slice_src_slt_dest",
).unwrap(); );
let dest_step_eq_one = ctx.builder.build_int_compare( let dest_step_eq_one = ctx.builder.build_int_compare(
IntPredicate::EQ, IntPredicate::EQ,
dest_idx.2, dest_idx.2,
dest_idx.2.get_type().const_int(1, false), dest_idx.2.get_type().const_int(1, false),
"slice_dest_step_eq_one", "slice_dest_step_eq_one",
).unwrap(); );
let cond_1 = ctx.builder.build_and(dest_step_eq_one, src_slt_dest, "slice_cond_1").unwrap(); let cond_1 = ctx.builder.build_and(dest_step_eq_one, src_slt_dest, "slice_cond_1");
let cond = ctx.builder.build_or(src_eq_dest, cond_1, "slice_cond").unwrap(); let cond = ctx.builder.build_or(src_eq_dest, cond_1, "slice_cond");
ctx.make_assert( ctx.make_assert(
generator, generator,
cond, cond,
@ -450,532 +408,27 @@ pub fn list_slice_assignment<'ctx, G: CodeGenerator + ?Sized>(
BasicTypeEnum::StructType(t) => t.size_of().unwrap(), BasicTypeEnum::StructType(t) => t.size_of().unwrap(),
_ => unreachable!(), _ => unreachable!(),
}; };
ctx.builder.build_int_truncate_or_bit_cast(s, int32, "size").unwrap() ctx.builder.build_int_truncate_or_bit_cast(s, int32, "size")
} }
.into(), .into(),
]; ];
ctx.builder ctx.builder
.build_call(slice_assign_fun, args.as_slice(), "slice_assign") .build_call(slice_assign_fun, args.as_slice(), "slice_assign")
.map(CallSiteValue::try_as_basic_value) .try_as_basic_value()
.map(|v| v.map_left(BasicValueEnum::into_int_value)) .unwrap_left()
.map(Either::unwrap_left) .into_int_value()
.unwrap()
}; };
// update length // update length
let need_update = ctx.builder let need_update =
.build_int_compare(IntPredicate::NE, new_len, dest_len, "need_update") ctx.builder.build_int_compare(IntPredicate::NE, new_len, dest_len, "need_update");
.unwrap();
let current = ctx.builder.get_insert_block().unwrap().get_parent().unwrap(); let current = ctx.builder.get_insert_block().unwrap().get_parent().unwrap();
let update_bb = ctx.ctx.append_basic_block(current, "update"); let update_bb = ctx.ctx.append_basic_block(current, "update");
let cont_bb = ctx.ctx.append_basic_block(current, "cont"); let cont_bb = ctx.ctx.append_basic_block(current, "cont");
ctx.builder.build_conditional_branch(need_update, update_bb, cont_bb).unwrap(); ctx.builder.build_conditional_branch(need_update, update_bb, cont_bb);
ctx.builder.position_at_end(update_bb); ctx.builder.position_at_end(update_bb);
let new_len = ctx.builder let dest_len_ptr = unsafe { ctx.builder.build_gep(dest_arr, &[zero, one], "dest_len_ptr") };
.build_int_z_extend_or_bit_cast(new_len, size_ty, "new_len") let new_len = ctx.builder.build_int_z_extend_or_bit_cast(new_len, size_ty, "new_len");
.unwrap(); ctx.builder.build_store(dest_len_ptr, new_len);
dest_arr.store_size(ctx, generator, new_len); ctx.builder.build_unconditional_branch(cont_bb);
ctx.builder.build_unconditional_branch(cont_bb).unwrap();
ctx.builder.position_at_end(cont_bb); ctx.builder.position_at_end(cont_bb);
} }
/// Generates a call to `isinf` in IR. Returns an `i1` representing the result.
pub fn call_isinf<'ctx, G: CodeGenerator + ?Sized>(
generator: &mut G,
ctx: &CodeGenContext<'ctx, '_>,
v: FloatValue<'ctx>,
) -> IntValue<'ctx> {
let intrinsic_fn = ctx.module.get_function("__nac3_isinf").unwrap_or_else(|| {
let fn_type = ctx.ctx.i32_type().fn_type(&[ctx.ctx.f64_type().into()], false);
ctx.module.add_function("__nac3_isinf", fn_type, None)
});
let ret = ctx.builder
.build_call(intrinsic_fn, &[v.into()], "isinf")
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap();
generator.bool_to_i1(ctx, ret)
}
/// Generates a call to `isnan` in IR. Returns an `i1` representing the result.
pub fn call_isnan<'ctx, G: CodeGenerator + ?Sized>(
generator: &mut G,
ctx: &CodeGenContext<'ctx, '_>,
v: FloatValue<'ctx>,
) -> IntValue<'ctx> {
let intrinsic_fn = ctx.module.get_function("__nac3_isnan").unwrap_or_else(|| {
let fn_type = ctx.ctx.i32_type().fn_type(&[ctx.ctx.f64_type().into()], false);
ctx.module.add_function("__nac3_isnan", fn_type, None)
});
let ret = ctx.builder
.build_call(intrinsic_fn, &[v.into()], "isnan")
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap();
generator.bool_to_i1(ctx, ret)
}
/// Generates a call to `gamma` in IR. Returns an `f64` representing the result.
pub fn call_gamma<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
v: FloatValue<'ctx>,
) -> FloatValue<'ctx> {
let llvm_f64 = ctx.ctx.f64_type();
let intrinsic_fn = ctx.module.get_function("__nac3_gamma").unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
ctx.module.add_function("__nac3_gamma", fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[v.into()], "gamma")
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Generates a call to `gammaln` in IR. Returns an `f64` representing the result.
pub fn call_gammaln<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
v: FloatValue<'ctx>,
) -> FloatValue<'ctx> {
let llvm_f64 = ctx.ctx.f64_type();
let intrinsic_fn = ctx.module.get_function("__nac3_gammaln").unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
ctx.module.add_function("__nac3_gammaln", fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[v.into()], "gammaln")
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Generates a call to `j0` in IR. Returns an `f64` representing the result.
pub fn call_j0<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
v: FloatValue<'ctx>,
) -> FloatValue<'ctx> {
let llvm_f64 = ctx.ctx.f64_type();
let intrinsic_fn = ctx.module.get_function("__nac3_j0").unwrap_or_else(|| {
let fn_type = llvm_f64.fn_type(&[llvm_f64.into()], false);
ctx.module.add_function("__nac3_j0", fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[v.into()], "j0")
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Generates a call to `__nac3_ndarray_calc_size`. Returns an [`IntValue`] representing the
/// calculated total size.
///
/// * `num_dims` - An [`IntValue`] containing the number of dimensions.
/// * `dims` - A [`PointerValue`] to an array containing the size of each dimension.
pub fn call_ndarray_calc_size<'ctx, G, Dims>(
generator: &G,
ctx: &CodeGenContext<'ctx, '_>,
dims: &Dims,
) -> IntValue<'ctx>
where
G: CodeGenerator + ?Sized,
Dims: ArrayLikeIndexer<'ctx>, {
let llvm_i64 = ctx.ctx.i64_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pi64 = llvm_i64.ptr_type(AddressSpace::default());
let ndarray_calc_size_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_calc_size",
64 => "__nac3_ndarray_calc_size64",
bw => unreachable!("Unsupported size type bit width: {}", bw)
};
let ndarray_calc_size_fn_t = llvm_usize.fn_type(
&[
llvm_pi64.into(),
llvm_usize.into(),
],
false,
);
let ndarray_calc_size_fn = ctx.module.get_function(ndarray_calc_size_fn_name)
.unwrap_or_else(|| {
ctx.module.add_function(ndarray_calc_size_fn_name, ndarray_calc_size_fn_t, None)
});
ctx.builder
.build_call(
ndarray_calc_size_fn,
&[
dims.base_ptr(ctx, generator).into(),
dims.size(ctx, generator).into(),
],
"",
)
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Generates a call to `__nac3_ndarray_calc_nd_indices`. Returns a [`TypeArrayLikeAdpater`]
/// containing `i32` indices of the flattened index.
///
/// * `index` - The index to compute the multidimensional index for.
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
/// `NDArray`.
pub fn call_ndarray_calc_nd_indices<'ctx, G: CodeGenerator + ?Sized>(
generator: &G,
ctx: &mut CodeGenContext<'ctx, '_>,
index: IntValue<'ctx>,
ndarray: NDArrayValue<'ctx>,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
let llvm_void = ctx.ctx.void_type();
let llvm_i32 = ctx.ctx.i32_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
let ndarray_calc_nd_indices_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_calc_nd_indices",
64 => "__nac3_ndarray_calc_nd_indices64",
bw => unreachable!("Unsupported size type bit width: {}", bw)
};
let ndarray_calc_nd_indices_fn = ctx.module.get_function(ndarray_calc_nd_indices_fn_name).unwrap_or_else(|| {
let fn_type = llvm_void.fn_type(
&[
llvm_usize.into(),
llvm_pusize.into(),
llvm_usize.into(),
llvm_pi32.into(),
],
false,
);
ctx.module.add_function(ndarray_calc_nd_indices_fn_name, fn_type, None)
});
let ndarray_num_dims = ndarray.load_ndims(ctx);
let ndarray_dims = ndarray.dim_sizes();
let indices = ctx.builder.build_array_alloca(
llvm_i32,
ndarray_num_dims,
"",
).unwrap();
ctx.builder
.build_call(
ndarray_calc_nd_indices_fn,
&[
index.into(),
ndarray_dims.base_ptr(ctx, generator).into(),
ndarray_num_dims.into(),
indices.into(),
],
"",
)
.unwrap();
TypedArrayLikeAdapter::from(
ArraySliceValue::from_ptr_val(indices, ndarray_num_dims, None),
Box::new(|_, v| v.into_int_value()),
Box::new(|_, v| v.into()),
)
}
fn call_ndarray_flatten_index_impl<'ctx, G, Indices>(
generator: &G,
ctx: &CodeGenContext<'ctx, '_>,
ndarray: NDArrayValue<'ctx>,
indices: &Indices,
) -> IntValue<'ctx>
where
G: CodeGenerator + ?Sized,
Indices: ArrayLikeIndexer<'ctx>, {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
debug_assert_eq!(
IntType::try_from(indices.element_type(ctx, generator))
.map(IntType::get_bit_width)
.unwrap_or_default(),
llvm_i32.get_bit_width(),
"Expected i32 value for argument `indices` to `call_ndarray_flatten_index_impl`"
);
debug_assert_eq!(
indices.size(ctx, generator).get_type().get_bit_width(),
llvm_usize.get_bit_width(),
"Expected usize integer value for argument `indices_size` to `call_ndarray_flatten_index_impl`"
);
let ndarray_flatten_index_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_flatten_index",
64 => "__nac3_ndarray_flatten_index64",
bw => unreachable!("Unsupported size type bit width: {}", bw)
};
let ndarray_flatten_index_fn = ctx.module.get_function(ndarray_flatten_index_fn_name).unwrap_or_else(|| {
let fn_type = llvm_usize.fn_type(
&[
llvm_pusize.into(),
llvm_usize.into(),
llvm_pi32.into(),
llvm_usize.into(),
],
false,
);
ctx.module.add_function(ndarray_flatten_index_fn_name, fn_type, None)
});
let ndarray_num_dims = ndarray.load_ndims(ctx);
let ndarray_dims = ndarray.dim_sizes();
let index = ctx.builder
.build_call(
ndarray_flatten_index_fn,
&[
ndarray_dims.base_ptr(ctx, generator).into(),
ndarray_num_dims.into(),
indices.base_ptr(ctx, generator).into(),
indices.size(ctx, generator).into(),
],
"",
)
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap();
index
}
/// Generates a call to `__nac3_ndarray_flatten_index`. Returns the flattened index for the
/// multidimensional index.
///
/// * `ndarray` - LLVM pointer to the `NDArray`. This value must be the LLVM representation of an
/// `NDArray`.
/// * `indices` - The multidimensional index to compute the flattened index for.
pub fn call_ndarray_flatten_index<'ctx, G, Index>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
ndarray: NDArrayValue<'ctx>,
indices: &Index,
) -> IntValue<'ctx>
where
G: CodeGenerator + ?Sized,
Index: ArrayLikeIndexer<'ctx>, {
call_ndarray_flatten_index_impl(
generator,
ctx,
ndarray,
indices,
)
}
/// Generates a call to `__nac3_ndarray_calc_broadcast`. Returns a tuple containing the number of
/// dimension and size of each dimension of the resultant `ndarray`.
pub fn call_ndarray_calc_broadcast<'ctx, G: CodeGenerator + ?Sized>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
lhs: NDArrayValue<'ctx>,
rhs: NDArrayValue<'ctx>,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_calc_broadcast",
64 => "__nac3_ndarray_calc_broadcast64",
bw => unreachable!("Unsupported size type bit width: {}", bw)
};
let ndarray_calc_broadcast_fn = ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
let fn_type = llvm_usize.fn_type(
&[
llvm_pusize.into(),
llvm_usize.into(),
llvm_pusize.into(),
llvm_usize.into(),
llvm_pusize.into(),
],
false,
);
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
});
let lhs_ndims = lhs.load_ndims(ctx);
let rhs_ndims = rhs.load_ndims(ctx);
let min_ndims = llvm_intrinsics::call_int_umin(ctx, lhs_ndims, rhs_ndims, None);
gen_for_callback_incrementing(
generator,
ctx,
llvm_usize.const_zero(),
(min_ndims, false),
|generator, ctx, idx| {
let idx = ctx.builder.build_int_sub(min_ndims, idx, "").unwrap();
let (lhs_dim_sz, rhs_dim_sz) = unsafe {
(
lhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
rhs.dim_sizes().get_typed_unchecked(ctx, generator, &idx, None),
)
};
let llvm_usize_const_one = llvm_usize.const_int(1, false);
let lhs_eqz = ctx.builder.build_int_compare(
IntPredicate::EQ,
lhs_dim_sz,
llvm_usize_const_one,
"",
).unwrap();
let rhs_eqz = ctx.builder.build_int_compare(
IntPredicate::EQ,
rhs_dim_sz,
llvm_usize_const_one,
"",
).unwrap();
let lhs_or_rhs_eqz = ctx.builder.build_or(
lhs_eqz,
rhs_eqz,
""
).unwrap();
let lhs_eq_rhs = ctx.builder.build_int_compare(
IntPredicate::EQ,
lhs_dim_sz,
rhs_dim_sz,
""
).unwrap();
let is_compatible = ctx.builder.build_or(
lhs_or_rhs_eqz,
lhs_eq_rhs,
""
).unwrap();
ctx.make_assert(
generator,
is_compatible,
"0:ValueError",
"operands could not be broadcast together",
[None, None, None],
ctx.current_loc,
);
Ok(())
},
llvm_usize.const_int(1, false),
).unwrap();
let max_ndims = llvm_intrinsics::call_int_umax(ctx, lhs_ndims, rhs_ndims, None);
let lhs_dims = lhs.dim_sizes().base_ptr(ctx, generator);
let lhs_ndims = lhs.load_ndims(ctx);
let rhs_dims = rhs.dim_sizes().base_ptr(ctx, generator);
let rhs_ndims = rhs.load_ndims(ctx);
let out_dims = ctx.builder.build_array_alloca(llvm_usize, max_ndims, "").unwrap();
let out_dims = ArraySliceValue::from_ptr_val(out_dims, max_ndims, None);
ctx.builder
.build_call(
ndarray_calc_broadcast_fn,
&[
lhs_dims.into(),
lhs_ndims.into(),
rhs_dims.into(),
rhs_ndims.into(),
out_dims.base_ptr(ctx, generator).into(),
],
"",
)
.unwrap();
TypedArrayLikeAdapter::from(
out_dims,
Box::new(|_, v| v.into_int_value()),
Box::new(|_, v| v.into()),
)
}
/// Generates a call to `__nac3_ndarray_calc_broadcast_idx`. Returns an [`ArrayAllocaValue`]
/// containing the indices used for accessing `array` corresponding to the index of the broadcasted
/// array `broadcast_idx`.
pub fn call_ndarray_calc_broadcast_index<'ctx, G: CodeGenerator + ?Sized, BroadcastIdx: UntypedArrayLikeAccessor<'ctx>>(
generator: &mut G,
ctx: &mut CodeGenContext<'ctx, '_>,
array: NDArrayValue<'ctx>,
broadcast_idx: &BroadcastIdx,
) -> TypedArrayLikeAdapter<'ctx, IntValue<'ctx>> {
let llvm_i32 = ctx.ctx.i32_type();
let llvm_usize = generator.get_size_type(ctx.ctx);
let llvm_pi32 = llvm_i32.ptr_type(AddressSpace::default());
let llvm_pusize = llvm_usize.ptr_type(AddressSpace::default());
let ndarray_calc_broadcast_fn_name = match llvm_usize.get_bit_width() {
32 => "__nac3_ndarray_calc_broadcast_idx",
64 => "__nac3_ndarray_calc_broadcast_idx64",
bw => unreachable!("Unsupported size type bit width: {}", bw)
};
let ndarray_calc_broadcast_fn = ctx.module.get_function(ndarray_calc_broadcast_fn_name).unwrap_or_else(|| {
let fn_type = llvm_usize.fn_type(
&[
llvm_pusize.into(),
llvm_usize.into(),
llvm_pi32.into(),
llvm_pi32.into(),
],
false,
);
ctx.module.add_function(ndarray_calc_broadcast_fn_name, fn_type, None)
});
let broadcast_size = broadcast_idx.size(ctx, generator);
let out_idx = ctx.builder.build_array_alloca(llvm_i32, broadcast_size, "").unwrap();
let array_dims = array.dim_sizes().base_ptr(ctx, generator);
let array_ndims = array.load_ndims(ctx);
let broadcast_idx_ptr = unsafe {
broadcast_idx.ptr_offset_unchecked(
ctx,
generator,
&llvm_usize.const_zero(),
None
)
};
ctx.builder
.build_call(
ndarray_calc_broadcast_fn,
&[
array_dims.into(),
array_ndims.into(),
broadcast_idx_ptr.into(),
out_idx.into(),
],
"",
)
.unwrap();
TypedArrayLikeAdapter::from(
ArraySliceValue::from_ptr_val(out_idx, broadcast_size, None),
Box::new(|_, v| v.into_int_value()),
Box::new(|_, v| v.into()),
)
}

View File

@ -1,773 +0,0 @@
use inkwell::AddressSpace;
use inkwell::context::Context;
use inkwell::types::AnyTypeEnum::IntType;
use inkwell::types::FloatType;
use inkwell::values::{BasicValueEnum, CallSiteValue, FloatValue, IntValue, PointerValue};
use itertools::Either;
use crate::codegen::CodeGenContext;
/// Returns the string representation for the floating-point type `ft` when used in intrinsic
/// functions.
fn get_float_intrinsic_repr(ctx: &Context, ft: FloatType) -> &'static str {
// Standard LLVM floating-point types
if ft == ctx.f16_type() {
return "f16"
}
if ft == ctx.f32_type() {
return "f32"
}
if ft == ctx.f64_type() {
return "f64"
}
if ft == ctx.f128_type() {
return "f128"
}
// Non-standard floating-point types
if ft == ctx.x86_f80_type() {
return "f80"
}
if ft == ctx.ppc_f128_type() {
return "ppcf128"
}
unreachable!()
}
/// Invokes the [`llvm.stacksave`](https://llvm.org/docs/LangRef.html#llvm-stacksave-intrinsic)
/// intrinsic.
pub fn call_stacksave<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
name: Option<&str>,
) -> PointerValue<'ctx> {
const FN_NAME: &str = "llvm.stacksave";
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let llvm_i8 = ctx.ctx.i8_type();
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
let fn_type = llvm_p0i8.fn_type(&[], false);
ctx.module.add_function(FN_NAME, fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_pointer_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the
/// [`llvm.stackrestore`](https://llvm.org/docs/LangRef.html#llvm-stackrestore-intrinsic) intrinsic.
pub fn call_stackrestore<'ctx>(ctx: &CodeGenContext<'ctx, '_>, ptr: PointerValue<'ctx>) {
const FN_NAME: &str = "llvm.stackrestore";
let intrinsic_fn = ctx.module.get_function(FN_NAME).unwrap_or_else(|| {
let llvm_void = ctx.ctx.void_type();
let llvm_i8 = ctx.ctx.i8_type();
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
let fn_type = llvm_void.fn_type(&[llvm_p0i8.into()], false);
ctx.module.add_function(FN_NAME, fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[ptr.into()], "")
.unwrap();
}
/// Invokes the [`llvm.abs`](https://llvm.org/docs/LangRef.html#llvm-abs-intrinsic) intrinsic.
///
/// * `src` - The value for which the absolute value is to be returned.
/// * `is_int_min_poison` - Whether `poison` is to be returned if `src` is `INT_MIN`.
pub fn call_int_abs<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
src: IntValue<'ctx>,
is_int_min_poison: IntValue<'ctx>,
name: Option<&str>,
) -> IntValue<'ctx> {
debug_assert_eq!(is_int_min_poison.get_type().get_bit_width(), 1);
debug_assert!(is_int_min_poison.is_const());
let llvm_src_t = src.get_type();
let fn_name = format!("llvm.abs.i{}", llvm_src_t.get_bit_width());
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let llvm_i1 = ctx.ctx.bool_type();
let fn_type = llvm_src_t.fn_type(&[llvm_src_t.into(), llvm_i1.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[src.into(), is_int_min_poison.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.smax`](https://llvm.org/docs/LangRef.html#llvm-smax-intrinsic) intrinsic.
pub fn call_int_smax<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
a: IntValue<'ctx>,
b: IntValue<'ctx>,
name: Option<&str>,
) -> IntValue<'ctx> {
debug_assert_eq!(a.get_type().get_bit_width(), b.get_type().get_bit_width());
let llvm_int_t = a.get_type();
let fn_name = format!("llvm.smax.i{}", llvm_int_t.get_bit_width());
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_int_t.fn_type(&[llvm_int_t.into(), llvm_int_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[a.into(), b.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.smin`](https://llvm.org/docs/LangRef.html#llvm-smin-intrinsic) intrinsic.
pub fn call_int_smin<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
a: IntValue<'ctx>,
b: IntValue<'ctx>,
name: Option<&str>,
) -> IntValue<'ctx> {
debug_assert_eq!(a.get_type().get_bit_width(), b.get_type().get_bit_width());
let llvm_int_t = a.get_type();
let fn_name = format!("llvm.smin.i{}", llvm_int_t.get_bit_width());
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_int_t.fn_type(&[llvm_int_t.into(), llvm_int_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[a.into(), b.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.umax`](https://llvm.org/docs/LangRef.html#llvm-umax-intrinsic) intrinsic.
pub fn call_int_umax<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
a: IntValue<'ctx>,
b: IntValue<'ctx>,
name: Option<&str>,
) -> IntValue<'ctx> {
debug_assert_eq!(a.get_type().get_bit_width(), b.get_type().get_bit_width());
let llvm_int_t = a.get_type();
let fn_name = format!("llvm.umax.i{}", llvm_int_t.get_bit_width());
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_int_t.fn_type(&[llvm_int_t.into(), llvm_int_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[a.into(), b.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.umin`](https://llvm.org/docs/LangRef.html#llvm-umin-intrinsic) intrinsic.
pub fn call_int_umin<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
a: IntValue<'ctx>,
b: IntValue<'ctx>,
name: Option<&str>,
) -> IntValue<'ctx> {
debug_assert_eq!(a.get_type().get_bit_width(), b.get_type().get_bit_width());
let llvm_int_t = a.get_type();
let fn_name = format!("llvm.umin.i{}", llvm_int_t.get_bit_width());
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_int_t.fn_type(&[llvm_int_t.into(), llvm_int_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[a.into(), b.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.memcpy`](https://llvm.org/docs/LangRef.html#llvm-memcpy-intrinsic) intrinsic.
///
/// * `dest` - The pointer to the destination. Must be a pointer to an integer type.
/// * `src` - The pointer to the source. Must be a pointer to an integer type.
/// * `len` - The number of bytes to copy.
/// * `is_volatile` - Whether the `memcpy` operation should be `volatile`.
pub fn call_memcpy<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
dest: PointerValue<'ctx>,
src: PointerValue<'ctx>,
len: IntValue<'ctx>,
is_volatile: IntValue<'ctx>,
) {
debug_assert!(dest.get_type().get_element_type().is_int_type());
debug_assert!(src.get_type().get_element_type().is_int_type());
debug_assert_eq!(
dest.get_type().get_element_type().into_int_type().get_bit_width(),
src.get_type().get_element_type().into_int_type().get_bit_width(),
);
debug_assert!(matches!(len.get_type().get_bit_width(), 32 | 64));
debug_assert_eq!(is_volatile.get_type().get_bit_width(), 1);
let llvm_dest_t = dest.get_type();
let llvm_src_t = src.get_type();
let llvm_len_t = len.get_type();
let fn_name = format!(
"llvm.memcpy.p0i{}.p0i{}.i{}",
llvm_dest_t.get_element_type().into_int_type().get_bit_width(),
llvm_src_t.get_element_type().into_int_type().get_bit_width(),
llvm_len_t.get_bit_width(),
);
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let llvm_void = ctx.ctx.void_type();
let fn_type = llvm_void.fn_type(
&[
llvm_dest_t.into(),
llvm_src_t.into(),
llvm_len_t.into(),
is_volatile.get_type().into(),
],
false,
);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[dest.into(), src.into(), len.into(), is_volatile.into()], "")
.unwrap();
}
/// Invokes the `llvm.memcpy` intrinsic.
///
/// Unlike [`call_memcpy`], this function accepts any type of pointer value. If `dest` or `src` is
/// not a pointer to an integer, the pointer(s) will be cast to `i8*` before invoking `memcpy`.
pub fn call_memcpy_generic<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
dest: PointerValue<'ctx>,
src: PointerValue<'ctx>,
len: IntValue<'ctx>,
is_volatile: IntValue<'ctx>,
) {
let llvm_i8 = ctx.ctx.i8_type();
let llvm_p0i8 = llvm_i8.ptr_type(AddressSpace::default());
let dest_elem_t = dest.get_type().get_element_type();
let src_elem_t = src.get_type().get_element_type();
let dest = if matches!(dest_elem_t, IntType(t) if t.get_bit_width() == 8) {
dest
} else {
ctx.builder
.build_bitcast(dest, llvm_p0i8, "")
.map(BasicValueEnum::into_pointer_value)
.unwrap()
};
let src = if matches!(src_elem_t, IntType(t) if t.get_bit_width() == 8) {
src
} else {
ctx.builder
.build_bitcast(src, llvm_p0i8, "")
.map(BasicValueEnum::into_pointer_value)
.unwrap()
};
call_memcpy(ctx, dest, src, len, is_volatile);
}
/// Invokes the [`llvm.sqrt`](https://llvm.org/docs/LangRef.html#llvm-sqrt-intrinsic) intrinsic.
pub fn call_float_sqrt<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.sqrt.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.powi`](https://llvm.org/docs/LangRef.html#llvm-powi-intrinsic) intrinsic.
pub fn call_float_powi<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
power: IntValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_val_t = val.get_type();
let llvm_power_t = power.get_type();
let fn_name = format!(
"llvm.powi.{}.i{}",
get_float_intrinsic_repr(ctx.ctx, llvm_val_t),
llvm_power_t.get_bit_width(),
);
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_val_t.fn_type(&[llvm_val_t.into(), llvm_power_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into(), power.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.sin`](https://llvm.org/docs/LangRef.html#llvm-sin-intrinsic) intrinsic.
pub fn call_float_sin<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.sin.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.cos`](https://llvm.org/docs/LangRef.html#llvm-cos-intrinsic) intrinsic.
pub fn call_float_cos<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.cos.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.pow`](https://llvm.org/docs/LangRef.html#llvm-pow-intrinsic) intrinsic.
pub fn call_float_pow<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
power: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
debug_assert_eq!(val.get_type(), power.get_type());
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.pow.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into(), llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into(), power.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.exp`](https://llvm.org/docs/LangRef.html#llvm-exp-intrinsic) intrinsic.
pub fn call_float_exp<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.exp.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.exp2`](https://llvm.org/docs/LangRef.html#llvm-exp2-intrinsic) intrinsic.
pub fn call_float_exp2<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.exp2.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.log`](https://llvm.org/docs/LangRef.html#llvm-log-intrinsic) intrinsic.
pub fn call_float_log<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.log.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.log10`](https://llvm.org/docs/LangRef.html#llvm-log10-intrinsic) intrinsic.
pub fn call_float_log10<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.log10.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.log2`](https://llvm.org/docs/LangRef.html#llvm-log2-intrinsic) intrinsic.
pub fn call_float_log2<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.log2.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.fabs`](https://llvm.org/docs/LangRef.html#llvm-fabs-intrinsic) intrinsic.
pub fn call_float_fabs<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
src: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_src_t = src.get_type();
let fn_name = format!("llvm.fabs.{}", get_float_intrinsic_repr(ctx.ctx, llvm_src_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_src_t.fn_type(&[llvm_src_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[src.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.minnum`](https://llvm.org/docs/LangRef.html#llvm-minnum-intrinsic) intrinsic.
pub fn call_float_minnum<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val1: FloatValue<'ctx>,
val2: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
debug_assert_eq!(val1.get_type(), val2.get_type());
let llvm_float_t = val1.get_type();
let fn_name = format!("llvm.minnum.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into(), llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val1.into(), val2.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.maxnum`](https://llvm.org/docs/LangRef.html#llvm-maxnum-intrinsic) intrinsic.
pub fn call_float_maxnum<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val1: FloatValue<'ctx>,
val2: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
debug_assert_eq!(val1.get_type(), val2.get_type());
let llvm_float_t = val1.get_type();
let fn_name = format!("llvm.maxnum.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into(), llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val1.into(), val2.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.copysign`](https://llvm.org/docs/LangRef.html#llvm-copysign-intrinsic) intrinsic.
pub fn call_float_copysign<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
mag: FloatValue<'ctx>,
sgn: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
debug_assert_eq!(mag.get_type(), sgn.get_type());
let llvm_float_t = mag.get_type();
let fn_name = format!("llvm.copysign.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into(), llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[mag.into(), sgn.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.floor`](https://llvm.org/docs/LangRef.html#llvm-floor-intrinsic) intrinsic.
pub fn call_float_floor<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.floor.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.ceil`](https://llvm.org/docs/LangRef.html#llvm-ceil-intrinsic) intrinsic.
pub fn call_float_ceil<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.ceil.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.round`](https://llvm.org/docs/LangRef.html#llvm-round-intrinsic) intrinsic.
pub fn call_float_round<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.round.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the
/// [`llvm.roundeven`](https://llvm.org/docs/LangRef.html#llvm-roundeven-intrinsic) intrinsic.
pub fn call_float_roundeven<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: FloatValue<'ctx>,
name: Option<&str>,
) -> FloatValue<'ctx> {
let llvm_float_t = val.get_type();
let fn_name = format!("llvm.roundeven.{}", get_float_intrinsic_repr(ctx.ctx, llvm_float_t));
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_float_t.fn_type(&[llvm_float_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_float_value))
.map(Either::unwrap_left)
.unwrap()
}
/// Invokes the [`llvm.expect`](https://llvm.org/docs/LangRef.html#llvm-expect-intrinsic) intrinsic.
pub fn call_expect<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
val: IntValue<'ctx>,
expected_val: IntValue<'ctx>,
name: Option<&str>,
) -> IntValue<'ctx> {
debug_assert_eq!(val.get_type().get_bit_width(), expected_val.get_type().get_bit_width());
let llvm_int_t = val.get_type();
let fn_name = format!("llvm.expect.i{}", llvm_int_t.get_bit_width());
let intrinsic_fn = ctx.module.get_function(fn_name.as_str()).unwrap_or_else(|| {
let fn_type = llvm_int_t.fn_type(&[llvm_int_t.into(), llvm_int_t.into()], false);
ctx.module.add_function(fn_name.as_str(), fn_type, None)
});
ctx.builder
.build_call(intrinsic_fn, &[val.into(), expected_val.into()], name.unwrap_or_default())
.map(CallSiteValue::try_as_basic_value)
.map(|v| v.map_left(BasicValueEnum::into_int_value))
.map(Either::unwrap_left)
.unwrap()
}

View File

@ -1,11 +1,6 @@
use crate::{ use crate::{
symbol_resolver::{StaticValue, SymbolResolver}, symbol_resolver::{StaticValue, SymbolResolver},
toplevel::{ toplevel::{TopLevelContext, TopLevelDef},
helper::PRIMITIVE_DEF_IDS,
numpy::unpack_ndarray_var_tys,
TopLevelContext,
TopLevelDef,
},
typecheck::{ typecheck::{
type_inferencer::{CodeLocation, PrimitiveStore}, type_inferencer::{CodeLocation, PrimitiveStore},
typedef::{CallId, FuncArg, Type, TypeEnum, Unifier}, typedef::{CallId, FuncArg, Type, TypeEnum, Unifier},
@ -14,17 +9,15 @@ use crate::{
use crossbeam::channel::{unbounded, Receiver, Sender}; use crossbeam::channel::{unbounded, Receiver, Sender};
use inkwell::{ use inkwell::{
AddressSpace, AddressSpace,
IntPredicate,
OptimizationLevel, OptimizationLevel,
attributes::{Attribute, AttributeLoc}, attributes::{Attribute, AttributeLoc},
basic_block::BasicBlock, basic_block::BasicBlock,
builder::Builder, builder::Builder,
context::Context, context::Context,
module::Module, module::Module,
passes::PassBuilderOptions, passes::{PassManager, PassManagerBuilder},
targets::{CodeModel, RelocMode, Target, TargetMachine, TargetTriple},
types::{AnyType, BasicType, BasicTypeEnum}, types::{AnyType, BasicType, BasicTypeEnum},
values::{BasicValueEnum, FunctionValue, IntValue, PhiValue, PointerValue}, values::{BasicValueEnum, FunctionValue, PhiValue, PointerValue},
debug_info::{ debug_info::{
DebugInfoBuilder, DICompileUnit, DISubprogram, AsDIScope, DIFlagsConstants, DIScope DebugInfoBuilder, DICompileUnit, DISubprogram, AsDIScope, DIFlagsConstants, DIScope
}, },
@ -38,16 +31,12 @@ use std::sync::{
Arc, Arc,
}; };
use std::thread; use std::thread;
use lazy_static::lazy_static;
pub mod builtin_fns;
pub mod classes;
pub mod concrete_type; pub mod concrete_type;
pub mod expr; pub mod expr;
pub mod extern_fns;
mod generator; mod generator;
pub mod irrt; pub mod irrt;
pub mod llvm_intrinsics;
pub mod numpy;
pub mod stmt; pub mod stmt;
#[cfg(test)] #[cfg(test)]
@ -64,151 +53,47 @@ pub struct StaticValueStore {
pub type VarValue<'ctx> = (PointerValue<'ctx>, Option<Arc<dyn StaticValue + Send + Sync>>, i64); pub type VarValue<'ctx> = (PointerValue<'ctx>, Option<Arc<dyn StaticValue + Send + Sync>>, i64);
/// Additional options for LLVM during codegen. lazy_static!(
#[derive(Clone, Debug, Eq, PartialEq)] // HACK: The Mutex is a work-around for issue
pub struct CodeGenLLVMOptions { // https://git.m-labs.hk/M-Labs/nac3/issues/275
/// The optimization level to apply on the generated LLVM IR. static ref PASSES_INIT_LOCK: Mutex<AtomicBool> = Mutex::new(AtomicBool::new(true));
pub opt_level: OptimizationLevel, );
/// Options related to the target machine.
pub target: CodeGenTargetMachineOptions,
}
/// Additional options for code generation for the target machine.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CodeGenTargetMachineOptions {
/// The target machine triple.
pub triple: String,
/// The target machine CPU.
pub cpu: String,
/// Additional target machine features.
pub features: String,
/// Relocation mode for code generation.
pub reloc_mode: RelocMode,
/// Code model for code generation.
pub code_model: CodeModel,
}
impl CodeGenTargetMachineOptions {
/// Creates an instance of [`CodeGenTargetMachineOptions`] using the triple of the host machine.
/// Other options are set to defaults.
#[must_use]
pub fn from_host_triple() -> CodeGenTargetMachineOptions {
CodeGenTargetMachineOptions {
triple: TargetMachine::get_default_triple().as_str().to_string_lossy().into_owned(),
cpu: String::default(),
features: String::default(),
reloc_mode: RelocMode::Default,
code_model: CodeModel::Default,
}
}
/// Creates an instance of [`CodeGenTargetMachineOptions`] using the properties of the host
/// machine. Other options are set to defaults.
#[must_use]
pub fn from_host() -> CodeGenTargetMachineOptions {
CodeGenTargetMachineOptions {
cpu: TargetMachine::get_host_cpu_name().to_string(),
features: TargetMachine::get_host_cpu_features().to_string(),
..CodeGenTargetMachineOptions::from_host_triple()
}
}
/// Creates a [`TargetMachine`] using the target options specified by this struct.
///
/// See [`Target::create_target_machine`].
#[must_use]
pub fn create_target_machine(
&self,
level: OptimizationLevel,
) -> Option<TargetMachine> {
let triple = TargetTriple::create(self.triple.as_str());
let target = Target::from_triple(&triple)
.unwrap_or_else(|_| panic!("could not create target from target triple {}", self.triple));
target.create_target_machine(
&triple,
self.cpu.as_str(),
self.features.as_str(),
level,
self.reloc_mode,
self.code_model
)
}
}
pub struct CodeGenContext<'ctx, 'a> { pub struct CodeGenContext<'ctx, 'a> {
/// The LLVM context associated with [this context][CodeGenContext].
pub ctx: &'ctx Context, pub ctx: &'ctx Context,
/// The [Builder] instance for creating LLVM IR statements.
pub builder: Builder<'ctx>, pub builder: Builder<'ctx>,
/// The [DebugInfoBuilder], [compilation unit information][DICompileUnit], and
/// [scope information][DIScope] of this context.
pub debug_info: (DebugInfoBuilder<'ctx>, DICompileUnit<'ctx>, DIScope<'ctx>), pub debug_info: (DebugInfoBuilder<'ctx>, DICompileUnit<'ctx>, DIScope<'ctx>),
/// The module for which [this context][CodeGenContext] is generating into.
pub module: Module<'ctx>, pub module: Module<'ctx>,
/// The [TopLevelContext] associated with [this context][CodeGenContext].
pub top_level: &'a TopLevelContext, pub top_level: &'a TopLevelContext,
pub unifier: Unifier, pub unifier: Unifier,
pub resolver: Arc<dyn SymbolResolver + Send + Sync>, pub resolver: Arc<dyn SymbolResolver + Send + Sync>,
pub static_value_store: Arc<Mutex<StaticValueStore>>, pub static_value_store: Arc<Mutex<StaticValueStore>>,
/// A [HashMap] containing the mapping between the names of variables currently in-scope and
/// its value information.
pub var_assignment: HashMap<StrRef, VarValue<'ctx>>, pub var_assignment: HashMap<StrRef, VarValue<'ctx>>,
///
pub type_cache: HashMap<Type, BasicTypeEnum<'ctx>>, pub type_cache: HashMap<Type, BasicTypeEnum<'ctx>>,
pub primitives: PrimitiveStore, pub primitives: PrimitiveStore,
pub calls: Arc<HashMap<CodeLocation, CallId>>, pub calls: Arc<HashMap<CodeLocation, CallId>>,
pub registry: &'a WorkerRegistry, pub registry: &'a WorkerRegistry,
// const string cache
/// Cache for constant strings.
pub const_strings: HashMap<String, BasicValueEnum<'ctx>>, pub const_strings: HashMap<String, BasicValueEnum<'ctx>>,
// stores the alloca for variables
/// [BasicBlock] containing all `alloca` statements for the current function.
pub init_bb: BasicBlock<'ctx>, pub init_bb: BasicBlock<'ctx>,
pub exception_val: Option<PointerValue<'ctx>>, // the first one is the test_bb, and the second one is bb after the loop
/// The header and exit basic blocks of a loop in this context. See
/// https://llvm.org/docs/LoopTerminology.html for explanation of these terminology.
pub loop_target: Option<(BasicBlock<'ctx>, BasicBlock<'ctx>)>, pub loop_target: Option<(BasicBlock<'ctx>, BasicBlock<'ctx>)>,
// unwind target bb
/// The target [BasicBlock] to jump to when performing stack unwind.
pub unwind_target: Option<BasicBlock<'ctx>>, pub unwind_target: Option<BasicBlock<'ctx>>,
// return target bb, just emit ret if no such target
/// The target [BasicBlock] to jump to before returning from the function.
///
/// If this field is [None] when generating a return from a function, `ret` with no argument can
/// be emitted.
pub return_target: Option<BasicBlock<'ctx>>, pub return_target: Option<BasicBlock<'ctx>>,
/// The [PointerValue] containing the return value of the function.
pub return_buffer: Option<PointerValue<'ctx>>, pub return_buffer: Option<PointerValue<'ctx>>,
// outer catch clauses // outer catch clauses
pub outer_catch_clauses: pub outer_catch_clauses:
Option<(Vec<Option<BasicValueEnum<'ctx>>>, BasicBlock<'ctx>, PhiValue<'ctx>)>, Option<(Vec<Option<BasicValueEnum<'ctx>>>, BasicBlock<'ctx>, PhiValue<'ctx>)>,
/// Whether `sret` is needed for the first parameter of the function.
///
/// See [need_sret].
pub need_sret: bool, pub need_sret: bool,
/// The current source location.
pub current_loc: Location, pub current_loc: Location,
} }
impl<'ctx, 'a> CodeGenContext<'ctx, 'a> { impl<'ctx, 'a> CodeGenContext<'ctx, 'a> {
/// Whether the [current basic block][Builder::get_insert_block] referenced by `builder`
/// contains a [terminator statement][BasicBlock::get_terminator].
pub fn is_terminated(&self) -> bool { pub fn is_terminated(&self) -> bool {
self.builder.get_insert_block().and_then(BasicBlock::get_terminator).is_some() self.builder.get_insert_block().unwrap().get_terminator().is_some()
} }
} }
@ -219,54 +104,40 @@ pub struct WithCall {
} }
impl WithCall { impl WithCall {
#[must_use]
pub fn new(fp: Fp) -> WithCall { pub fn new(fp: Fp) -> WithCall {
WithCall { fp } WithCall { fp }
} }
pub fn run(&self, m: &Module) { pub fn run<'ctx>(&self, m: &Module<'ctx>) {
(self.fp)(m); (self.fp)(m)
} }
} }
pub struct WorkerRegistry { pub struct WorkerRegistry {
sender: Arc<Sender<Option<CodeGenTask>>>, sender: Arc<Sender<Option<CodeGenTask>>>,
receiver: Arc<Receiver<Option<CodeGenTask>>>, receiver: Arc<Receiver<Option<CodeGenTask>>>,
/// Whether any thread in this registry has panicked.
panicked: AtomicBool, panicked: AtomicBool,
/// The total number of tasks queued or completed in the registry.
task_count: Mutex<usize>, task_count: Mutex<usize>,
/// The number of threads available for this registry.
thread_count: usize, thread_count: usize,
wait_condvar: Condvar, wait_condvar: Condvar,
top_level_ctx: Arc<TopLevelContext>, top_level_ctx: Arc<TopLevelContext>,
static_value_store: Arc<Mutex<StaticValueStore>>, static_value_store: Arc<Mutex<StaticValueStore>>,
/// LLVM-related options for code generation.
pub llvm_options: CodeGenLLVMOptions,
} }
impl WorkerRegistry { impl WorkerRegistry {
/// Creates workers for this registry.
#[must_use]
pub fn create_workers<G: CodeGenerator + Send + 'static>( pub fn create_workers<G: CodeGenerator + Send + 'static>(
generators: Vec<Box<G>>, generators: Vec<Box<G>>,
top_level_ctx: Arc<TopLevelContext>, top_level_ctx: Arc<TopLevelContext>,
llvm_options: &CodeGenLLVMOptions, f: Arc<WithCall>,
f: &Arc<WithCall>,
) -> (Arc<WorkerRegistry>, Vec<thread::JoinHandle<()>>) { ) -> (Arc<WorkerRegistry>, Vec<thread::JoinHandle<()>>) {
let (sender, receiver) = unbounded(); let (sender, receiver) = unbounded();
let task_count = Mutex::new(0); let task_count = Mutex::new(0);
let wait_condvar = Condvar::new(); let wait_condvar = Condvar::new();
// init: 0 to be empty // init: 0 to be empty
let mut static_value_store = StaticValueStore::default(); let mut static_value_store: StaticValueStore = Default::default();
static_value_store.lookup.insert(Vec::default(), 0); static_value_store.lookup.insert(Default::default(), 0);
static_value_store.store.push(HashMap::default()); static_value_store.store.push(Default::default());
let registry = Arc::new(WorkerRegistry { let registry = Arc::new(WorkerRegistry {
sender: Arc::new(sender), sender: Arc::new(sender),
@ -277,23 +148,22 @@ impl WorkerRegistry {
task_count, task_count,
wait_condvar, wait_condvar,
top_level_ctx, top_level_ctx,
llvm_options: llvm_options.clone(),
}); });
let mut handles = Vec::new(); let mut handles = Vec::new();
for mut generator in generators { for mut generator in generators.into_iter() {
let registry = registry.clone(); let registry = registry.clone();
let registry2 = registry.clone(); let registry2 = registry.clone();
let f = f.clone(); let f = f.clone();
let handle = thread::spawn(move || { let handle = thread::spawn(move || {
registry.worker_thread(generator.as_mut(), &f); registry.worker_thread(generator.as_mut(), f);
}); });
let handle = thread::spawn(move || { let handle = thread::spawn(move || {
if let Err(e) = handle.join() { if let Err(e) = handle.join() {
if let Some(e) = e.downcast_ref::<&'static str>() { if let Some(e) = e.downcast_ref::<&'static str>() {
eprintln!("Got an error: {e}"); eprintln!("Got an error: {}", e);
} else { } else {
eprintln!("Got an unknown error: {e:?}"); eprintln!("Got an unknown error: {:?}", e);
} }
registry2.panicked.store(true, Ordering::SeqCst); registry2.panicked.store(true, Ordering::SeqCst);
registry2.wait_condvar.notify_all(); registry2.wait_condvar.notify_all();
@ -329,17 +199,17 @@ impl WorkerRegistry {
for handle in handles { for handle in handles {
handle.join().unwrap(); handle.join().unwrap();
} }
assert!(!self.panicked.load(Ordering::SeqCst), "tasks panicked"); if self.panicked.load(Ordering::SeqCst) {
panic!("tasks panicked");
}
} }
/// Adds a task to this [`WorkerRegistry`].
pub fn add_task(&self, task: CodeGenTask) { pub fn add_task(&self, task: CodeGenTask) {
*self.task_count.lock() += 1; *self.task_count.lock() += 1;
self.sender.send(Some(task)).unwrap(); self.sender.send(Some(task)).unwrap();
} }
/// Function executed by worker thread for generating IR for each function. fn worker_thread<G: CodeGenerator>(&self, generator: &mut G, f: Arc<WithCall>) {
fn worker_thread<G: CodeGenerator>(&self, generator: &mut G, f: &Arc<WithCall>) {
let context = Context::create(); let context = Context::create();
let mut builder = context.create_builder(); let mut builder = context.create_builder();
let mut module = context.create_module(generator.get_name()); let mut module = context.create_module(generator.get_name());
@ -355,11 +225,23 @@ impl WorkerRegistry {
context.i32_type().const_int(4, false), context.i32_type().const_int(4, false),
); );
let passes = PassManager::create(&module);
// HACK: This critical section is a work-around for issue
// https://git.m-labs.hk/M-Labs/nac3/issues/275
{
let _data = PASSES_INIT_LOCK.lock();
let pass_builder = PassManagerBuilder::create();
pass_builder.set_optimization_level(OptimizationLevel::Default);
pass_builder.populate_function_pass_manager(&passes);
}
let mut errors = HashSet::new(); let mut errors = HashSet::new();
while let Some(task) = self.receiver.recv().unwrap() { while let Some(task) = self.receiver.recv().unwrap() {
match gen_func(&context, generator, self, builder, module, task) { match gen_func(&context, generator, self, builder, module, task) {
Ok(result) => { Ok(result) => {
builder = result.0; builder = result.0;
passes.run_on(&result.2);
module = result.1; module = result.1;
} }
Err((old_builder, e)) => { Err((old_builder, e)) => {
@ -372,28 +254,16 @@ impl WorkerRegistry {
*self.task_count.lock() -= 1; *self.task_count.lock() -= 1;
self.wait_condvar.notify_all(); self.wait_condvar.notify_all();
} }
assert!(errors.is_empty(), "Codegen error: {}", errors.into_iter().sorted().join("\n----------\n")); if !errors.is_empty() {
panic!("Codegen error: {}", errors.into_iter().sorted().join("\n----------\n"));
}
let result = module.verify(); let result = module.verify();
if let Err(err) = result { if let Err(err) = result {
println!("{}", module.print_to_string().to_str().unwrap()); println!("{}", module.print_to_string().to_str().unwrap());
panic!("{}", err.to_string()) println!("{}", err.to_string());
panic!()
} }
let pass_options = PassBuilderOptions::create();
let target_machine = self
.llvm_options
.target
.create_target_machine(self.llvm_options.opt_level)
.unwrap_or_else(|| panic!("could not create target machine from properties {:?}", self.llvm_options.target));
let passes = format!("default<O{}>", self.llvm_options.opt_level as u32);
let result = module.run_passes(passes.as_str(), &target_machine, pass_options);
if let Err(err) = result {
panic!("Failed to run optimization for module `{}`: {}",
module.get_name().to_str().unwrap(),
err.to_string());
}
f.run(&module); f.run(&module);
let mut lock = self.task_count.lock(); let mut lock = self.task_count.lock();
*lock += 1; *lock += 1;
@ -413,107 +283,85 @@ pub struct CodeGenTask {
pub id: usize, pub id: usize,
} }
/// Retrieves the [LLVM type][BasicTypeEnum] corresponding to the [Type]. fn get_llvm_type<'ctx>(
///
/// This function is used to obtain the in-memory representation of `ty`, e.g. a `bool` variable
/// would be represented by an `i8`.
#[allow(clippy::too_many_arguments)]
fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
ctx: &'ctx Context, ctx: &'ctx Context,
module: &Module<'ctx>, module: &Module<'ctx>,
generator: &mut G, generator: &mut dyn CodeGenerator,
unifier: &mut Unifier, unifier: &mut Unifier,
top_level: &TopLevelContext, top_level: &TopLevelContext,
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>, type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
primitives: &PrimitiveStore,
ty: Type, ty: Type,
) -> BasicTypeEnum<'ctx> { ) -> BasicTypeEnum<'ctx> {
use TypeEnum::*; use TypeEnum::*;
// we assume the type cache should already contain primitive types, // we assume the type cache should already contain primitive types,
// and they should be passed by value instead of passing as pointer. // and they should be passed by value instead of passing as pointer.
type_cache.get(&unifier.get_representative(ty)).copied().unwrap_or_else(|| { type_cache.get(&unifier.get_representative(ty)).cloned().unwrap_or_else(|| {
let ty_enum = unifier.get_ty(ty); let ty_enum = unifier.get_ty(ty);
let result = match &*ty_enum { let result = match &*ty_enum {
TObj { obj_id, fields, .. } => { TObj { obj_id, fields, .. } => {
// check to avoid treating non-class primitives as classes // check to avoid treating primitives other than Option as classes
if obj_id.0 <= PRIMITIVE_DEF_IDS.max_id().0 { if obj_id.0 <= 10 {
return match &*unifier.get_ty_immutable(ty) { match (unifier.get_ty(ty).as_ref(), unifier.get_ty(primitives.option).as_ref())
TObj { obj_id, params, .. } if *obj_id == PRIMITIVE_DEF_IDS.option => { {
get_llvm_type( (
TypeEnum::TObj { obj_id, params, .. },
TypeEnum::TObj { obj_id: opt_id, .. },
) if *obj_id == *opt_id => {
return get_llvm_type(
ctx, ctx,
module, module,
generator, generator,
unifier, unifier,
top_level, top_level,
type_cache, type_cache,
primitives,
*params.iter().next().unwrap().1, *params.iter().next().unwrap().1,
) )
.ptr_type(AddressSpace::default()) .ptr_type(AddressSpace::Generic)
.into() .into();
} }
_ => unreachable!("must be option type"),
TObj { obj_id, .. } if *obj_id == PRIMITIVE_DEF_IDS.ndarray => {
let llvm_usize = generator.get_size_type(ctx);
let (dtype, _) = unpack_ndarray_var_tys(unifier, ty);
let element_type = get_llvm_type(
ctx,
module,
generator,
unifier,
top_level,
type_cache,
dtype,
);
// struct NDArray { num_dims: size_t, dims: size_t*, data: T* }
//
// * num_dims: Number of dimensions in the array
// * dims: Pointer to an array containing the size of each dimension
// * data: Pointer to an array containing the array data
let fields = [
llvm_usize.into(),
llvm_usize.ptr_type(AddressSpace::default()).into(),
element_type.ptr_type(AddressSpace::default()).into(),
];
ctx.struct_type(&fields, false).ptr_type(AddressSpace::default()).into()
}
_ => unreachable!("LLVM type for primitive {} is missing", unifier.stringify(ty)),
} }
} }
// a struct with fields in the order of declaration // a struct with fields in the order of declaration
let top_level_defs = top_level.definitions.read(); let top_level_defs = top_level.definitions.read();
let definition = top_level_defs.get(obj_id.0).unwrap(); let definition = top_level_defs.get(obj_id.0).unwrap();
let TopLevelDef::Class { fields: fields_list, .. } = &*definition.read() else { let ty = if let TopLevelDef::Class { fields: fields_list, .. } =
&*definition.read()
{
let name = unifier.stringify(ty);
match module.get_struct_type(&name) {
Some(t) => t.ptr_type(AddressSpace::Generic).into(),
None => {
let struct_type = ctx.opaque_struct_type(&name);
type_cache.insert(
unifier.get_representative(ty),
struct_type.ptr_type(AddressSpace::Generic).into()
);
let fields = fields_list
.iter()
.map(|f| {
get_llvm_type(
ctx,
module,
generator,
unifier,
top_level,
type_cache,
primitives,
fields[&f.0].0,
)
})
.collect_vec();
struct_type.set_body(&fields, false);
struct_type.ptr_type(AddressSpace::Generic).into()
}
}
} else {
unreachable!() unreachable!()
}; };
return ty;
let name = unifier.stringify(ty);
let ty = if let Some(t) = module.get_struct_type(&name) {
t.ptr_type(AddressSpace::default()).into()
} else {
let struct_type = ctx.opaque_struct_type(&name);
type_cache.insert(
unifier.get_representative(ty),
struct_type.ptr_type(AddressSpace::default()).into()
);
let fields = fields_list
.iter()
.map(|f| {
get_llvm_type(
ctx,
module,
generator,
unifier,
top_level,
type_cache,
fields[&f.0].0,
)
})
.collect_vec();
struct_type.set_body(&fields, false);
struct_type.ptr_type(AddressSpace::default()).into()
};
return ty
} }
TTuple { ty } => { TTuple { ty } => {
// a struct with fields in the order present in the tuple // a struct with fields in the order present in the tuple
@ -521,7 +369,7 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
.iter() .iter()
.map(|ty| { .map(|ty| {
get_llvm_type( get_llvm_type(
ctx, module, generator, unifier, top_level, type_cache, *ty, ctx, module, generator, unifier, top_level, type_cache, primitives, *ty,
) )
}) })
.collect_vec(); .collect_vec();
@ -530,13 +378,13 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
TList { ty } => { TList { ty } => {
// a struct with an integer and a pointer to an array // a struct with an integer and a pointer to an array
let element_type = get_llvm_type( let element_type = get_llvm_type(
ctx, module, generator, unifier, top_level, type_cache, *ty, ctx, module, generator, unifier, top_level, type_cache, primitives, *ty,
); );
let fields = [ let fields = [
element_type.ptr_type(AddressSpace::default()).into(), element_type.ptr_type(AddressSpace::Generic).into(),
generator.get_size_type(ctx).into(), generator.get_size_type(ctx).into(),
]; ];
ctx.struct_type(&fields, false).ptr_type(AddressSpace::default()).into() ctx.struct_type(&fields, false).ptr_type(AddressSpace::Generic).into()
} }
TVirtual { .. } => unimplemented!(), TVirtual { .. } => unimplemented!(),
_ => unreachable!("{}", ty_enum.get_type_name()), _ => unreachable!("{}", ty_enum.get_type_name()),
@ -546,57 +394,19 @@ fn get_llvm_type<'ctx, G: CodeGenerator + ?Sized>(
}) })
} }
/// Retrieves the [LLVM type][`BasicTypeEnum`] corresponding to the [`Type`]. fn need_sret<'ctx>(ctx: &'ctx Context, ty: BasicTypeEnum<'ctx>) -> bool {
/// fn need_sret_impl<'ctx>(ctx: &'ctx Context, ty: BasicTypeEnum<'ctx>, maybe_large: bool) -> bool {
/// This function is used mainly to obtain the ABI representation of `ty`, e.g. a `bool` is
/// would be represented by an `i1`.
///
/// The difference between the in-memory representation (as returned by [`get_llvm_type`]) and the
/// ABI representation is that the in-memory representation must be at least byte-sized and must
/// be byte-aligned for the variable to be addressable in memory, whereas there is no such
/// restriction for ABI representations.
#[allow(clippy::too_many_arguments)]
fn get_llvm_abi_type<'ctx, G: CodeGenerator + ?Sized>(
ctx: &'ctx Context,
module: &Module<'ctx>,
generator: &mut G,
unifier: &mut Unifier,
top_level: &TopLevelContext,
type_cache: &mut HashMap<Type, BasicTypeEnum<'ctx>>,
primitives: &PrimitiveStore,
ty: Type,
) -> BasicTypeEnum<'ctx> {
// If the type is used in the definition of a function, return `i1` instead of `i8` for ABI
// consistency.
return if unifier.unioned(ty, primitives.bool) {
ctx.bool_type().into()
} else {
get_llvm_type(ctx, module, generator, unifier, top_level, type_cache, ty)
}
}
/// Whether `sret` is needed for a return value with type `ty`.
///
/// When returning a large data structure (e.g. structures that do not fit in 1-2 native words of
/// the target processor) by value, a synthetic parameter with a pointer type will be passed in the
/// slot of the first parameter to act as the location of which the return value is passed into.
///
/// See <https://releases.llvm.org/14.0.0/docs/LangRef.html#parameter-attributes> for more
/// information.
fn need_sret(ty: BasicTypeEnum) -> bool {
fn need_sret_impl(ty: BasicTypeEnum, maybe_large: bool) -> bool {
match ty { match ty {
BasicTypeEnum::IntType(_) | BasicTypeEnum::PointerType(_) => false, BasicTypeEnum::IntType(_) | BasicTypeEnum::PointerType(_) => false,
BasicTypeEnum::FloatType(_) if maybe_large => false, BasicTypeEnum::FloatType(_) if maybe_large => false,
BasicTypeEnum::StructType(ty) if maybe_large && ty.count_fields() <= 2 => BasicTypeEnum::StructType(ty) if maybe_large && ty.count_fields() <= 2 =>
ty.get_field_types().iter().any(|ty| need_sret_impl(*ty, false)), ty.get_field_types().iter().any(|ty| need_sret_impl(ctx, *ty, false)),
_ => true, _ => true,
} }
} }
need_sret_impl(ty, true) need_sret_impl(ctx, ty, true)
} }
/// Implementation for generating LLVM IR for a function.
pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenContext) -> Result<(), String>> ( pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenContext) -> Result<(), String>> (
context: &'ctx Context, context: &'ctx Context,
generator: &mut G, generator: &mut G,
@ -612,11 +422,10 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
let (unifier, primitives) = &top_level_ctx.unifiers.read()[task.unifier_index]; let (unifier, primitives) = &top_level_ctx.unifiers.read()[task.unifier_index];
(Unifier::from_shared_unifier(unifier), *primitives) (Unifier::from_shared_unifier(unifier), *primitives)
}; };
unifier.put_primitive_store(&primitives);
unifier.top_level = Some(top_level_ctx.clone()); unifier.top_level = Some(top_level_ctx.clone());
let mut cache = HashMap::new(); let mut cache = HashMap::new();
for (a, b) in &task.subst { for (a, b) in task.subst.iter() {
// this should be unification between variables and concrete types // this should be unification between variables and concrete types
// and should not cause any problem... // and should not cause any problem...
let b = task.store.to_unifier_type(&mut unifier, &primitives, *b, &mut cache); let b = task.store.to_unifier_type(&mut unifier, &primitives, *b, &mut cache);
@ -630,7 +439,7 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
Err(err) Err(err)
} }
}) })
.unwrap(); .unwrap()
} }
// rebuild primitive store with unique representatives // rebuild primitive store with unique representatives
@ -646,7 +455,6 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
str: unifier.get_representative(primitives.str), str: unifier.get_representative(primitives.str),
exception: unifier.get_representative(primitives.exception), exception: unifier.get_representative(primitives.exception),
option: unifier.get_representative(primitives.option), option: unifier.get_representative(primitives.option),
..primitives
}; };
let mut type_cache: HashMap<_, _> = [ let mut type_cache: HashMap<_, _> = [
@ -655,14 +463,14 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
(primitives.uint32, context.i32_type().into()), (primitives.uint32, context.i32_type().into()),
(primitives.uint64, context.i64_type().into()), (primitives.uint64, context.i64_type().into()),
(primitives.float, context.f64_type().into()), (primitives.float, context.f64_type().into()),
(primitives.bool, context.i8_type().into()), (primitives.bool, context.bool_type().into()),
(primitives.str, { (primitives.str, {
let name = "str"; let name = "str";
match module.get_struct_type(name) { match module.get_struct_type(name) {
None => { None => {
let str_type = context.opaque_struct_type("str"); let str_type = context.opaque_struct_type("str");
let fields = [ let fields = [
context.i8_type().ptr_type(AddressSpace::default()).into(), context.i8_type().ptr_type(AddressSpace::Generic).into(),
generator.get_size_type(context).into(), generator.get_size_type(context).into(),
]; ];
str_type.set_body(&fields, false); str_type.set_body(&fields, false);
@ -671,54 +479,56 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
Some(t) => t.as_basic_type_enum() Some(t) => t.as_basic_type_enum()
} }
}), }),
(primitives.range, context.i32_type().array_type(3).ptr_type(AddressSpace::default()).into()), (primitives.range, context.i32_type().array_type(3).ptr_type(AddressSpace::Generic).into()),
(primitives.exception, { (primitives.exception, {
let name = "Exception"; let name = "Exception";
if let Some(t) = module.get_struct_type(name) { match module.get_struct_type(name) {
t.ptr_type(AddressSpace::default()).as_basic_type_enum() Some(t) => t.ptr_type(AddressSpace::Generic).as_basic_type_enum(),
} else { None => {
let exception = context.opaque_struct_type("Exception"); let exception = context.opaque_struct_type("Exception");
let int32 = context.i32_type().into(); let int32 = context.i32_type().into();
let int64 = context.i64_type().into(); let int64 = context.i64_type().into();
let str_ty = module.get_struct_type("str").unwrap().as_basic_type_enum(); let str_ty = module.get_struct_type("str").unwrap().as_basic_type_enum();
let fields = [int32, str_ty, int32, int32, str_ty, str_ty, int64, int64, int64]; let fields = [int32, str_ty, int32, int32, str_ty, str_ty, int64, int64, int64];
exception.set_body(&fields, false); exception.set_body(&fields, false);
exception.ptr_type(AddressSpace::default()).as_basic_type_enum() exception.ptr_type(AddressSpace::Generic).as_basic_type_enum()
}
} }
}) })
] ]
.iter() .iter()
.copied() .cloned()
.collect(); .collect();
// NOTE: special handling of option cannot use this type cache since it contains type var, // NOTE: special handling of option cannot use this type cache since it contains type var,
// handled inside get_llvm_type instead // handled inside get_llvm_type instead
let ConcreteTypeEnum::TFunc { args, ret, .. } = let (args, ret) = if let ConcreteTypeEnum::TFunc { args, ret, .. } =
task.store.get(task.signature) else { task.store.get(task.signature)
{
(
args.iter()
.map(|arg| FuncArg {
name: arg.name,
ty: task.store.to_unifier_type(&mut unifier, &primitives, arg.ty, &mut cache),
default_value: arg.default_value.clone(),
})
.collect_vec(),
task.store.to_unifier_type(&mut unifier, &primitives, *ret, &mut cache),
)
} else {
unreachable!() unreachable!()
}; };
let (args, ret) = (
args.iter()
.map(|arg| FuncArg {
name: arg.name,
ty: task.store.to_unifier_type(&mut unifier, &primitives, arg.ty, &mut cache),
default_value: arg.default_value.clone(),
})
.collect_vec(),
task.store.to_unifier_type(&mut unifier, &primitives, *ret, &mut cache),
);
let ret_type = if unifier.unioned(ret, primitives.none) { let ret_type = if unifier.unioned(ret, primitives.none) {
None None
} else { } else {
Some(get_llvm_abi_type(context, &module, generator, &mut unifier, top_level_ctx.as_ref(), &mut type_cache, &primitives, ret)) Some(get_llvm_type(context, &module, generator, &mut unifier, top_level_ctx.as_ref(), &mut type_cache, &primitives, ret))
}; };
let has_sret = ret_type.map_or(false, |ty| need_sret(ty)); let has_sret = ret_type.map_or(false, |ty| need_sret(context, ty));
let mut params = args let mut params = args
.iter() .iter()
.map(|arg| { .map(|arg| {
get_llvm_abi_type( get_llvm_type(
context, context,
&module, &module,
generator, generator,
@ -733,7 +543,7 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
.collect_vec(); .collect_vec();
if has_sret { if has_sret {
params.insert(0, ret_type.unwrap().ptr_type(AddressSpace::default()).into()); params.insert(0, ret_type.unwrap().ptr_type(AddressSpace::Generic).into());
} }
let fn_type = match ret_type { let fn_type = match ret_type {
@ -763,56 +573,42 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
let body_bb = context.append_basic_block(fn_val, "body"); let body_bb = context.append_basic_block(fn_val, "body");
let mut var_assignment = HashMap::new(); let mut var_assignment = HashMap::new();
let offset = u32::from(has_sret); let offset = if has_sret { 1 } else { 0 };
for (n, arg) in args.iter().enumerate() { for (n, arg) in args.iter().enumerate() {
let param = fn_val.get_nth_param((n as u32) + offset).unwrap(); let param = fn_val.get_nth_param((n as u32) + offset).unwrap();
let local_type = get_llvm_type( let alloca = builder.build_alloca(
context, get_llvm_type(
&module, context,
generator, &module,
&mut unifier, generator,
top_level_ctx.as_ref(), &mut unifier,
&mut type_cache, top_level_ctx.as_ref(),
arg.ty, &mut type_cache,
&primitives,
arg.ty,
),
&arg.name.to_string(),
); );
let alloca = builder builder.build_store(alloca, param);
.build_alloca(local_type, &format!("{}.addr", &arg.name.to_string()))
.unwrap();
// Remap boolean parameters into i8
let param = if local_type.is_int_type() && param.is_int_value() {
let expected_ty = local_type.into_int_type();
let param_val = param.into_int_value();
if expected_ty.get_bit_width() == 8 && param_val.get_type().get_bit_width() == 1 {
bool_to_i8(&builder, context, param_val)
} else {
param_val
}.into()
} else {
param
};
builder.build_store(alloca, param).unwrap();
var_assignment.insert(arg.name, (alloca, None, 0)); var_assignment.insert(arg.name, (alloca, None, 0));
} }
let return_buffer = if has_sret { let return_buffer = if has_sret {
Some(fn_val.get_nth_param(0).unwrap().into_pointer_value()) Some(fn_val.get_nth_param(0).unwrap().into_pointer_value())
} else { } else {
fn_type.get_return_type().map(|v| builder.build_alloca(v, "$ret").unwrap()) fn_type.get_return_type().map(|v| builder.build_alloca(v, "$ret"))
}; };
let static_values = { let static_values = {
let store = registry.static_value_store.lock(); let store = registry.static_value_store.lock();
store.store[task.id].clone() store.store[task.id].clone()
}; };
for (k, v) in static_values { for (k, v) in static_values.into_iter() {
let (_, static_val, _) = var_assignment.get_mut(&args[k].name).unwrap(); let (_, static_val, _) = var_assignment.get_mut(&args[k].name).unwrap();
*static_val = Some(v); *static_val = Some(v);
} }
builder.build_unconditional_branch(body_bb).unwrap(); builder.build_unconditional_branch(body_bb);
builder.position_at_end(body_bb); builder.position_at_end(body_bb);
let (dibuilder, compile_unit) = module.create_debug_info_builder( let (dibuilder, compile_unit) = module.create_debug_info_builder(
@ -821,14 +617,14 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
/* filename */ /* filename */
&task &task
.body .body
.first() .get(0)
.map_or_else( .map_or_else(
|| "<nac3_internal>".to_string(), || "<nac3_internal>".to_string(),
|f| f.location.file.0.to_string(), |f| f.location.file.0.to_string(),
), ),
/* directory */ "", /* directory */ "",
/* producer */ "NAC3", /* producer */ "NAC3",
/* is_optimized */ registry.llvm_options.opt_level != OptimizationLevel::None, /* is_optimized */ true,
/* compiler command line flags */ "", /* compiler command line flags */ "",
/* runtime_ver */ 0, /* runtime_ver */ 0,
/* split_name */ "", /* split_name */ "",
@ -851,7 +647,7 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
inkwell::debug_info::DIFlags::PUBLIC, inkwell::debug_info::DIFlags::PUBLIC,
); );
let (row, col) = let (row, col) =
task.body.first().map_or_else(|| (0, 0), |b| (b.location.row, b.location.column)); task.body.get(0).map_or_else(|| (0, 0), |b| (b.location.row, b.location.column));
let func_scope: DISubprogram<'_> = dibuilder.create_function( let func_scope: DISubprogram<'_> = dibuilder.create_function(
/* scope */ compile_unit.as_debug_info_scope(), /* scope */ compile_unit.as_debug_info_scope(),
/* func name */ symbol, /* func name */ symbol,
@ -863,7 +659,7 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
/* is_definition */ true, /* is_definition */ true,
/* scope_line */ row as u32, /* scope_line */ row as u32,
/* flags */ inkwell::debug_info::DIFlags::PUBLIC, /* flags */ inkwell::debug_info::DIFlags::PUBLIC,
/* is_optimized */ registry.llvm_options.opt_level != OptimizationLevel::None, /* is_optimized */ true,
); );
fn_val.set_subprogram(func_scope); fn_val.set_subprogram(func_scope);
@ -877,19 +673,18 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
return_buffer, return_buffer,
unwind_target: None, unwind_target: None,
outer_catch_clauses: None, outer_catch_clauses: None,
const_strings: HashMap::default(), const_strings: Default::default(),
registry, registry,
var_assignment, var_assignment,
type_cache, type_cache,
primitives, primitives,
init_bb, init_bb,
exception_val: Option::default(),
builder, builder,
module, module,
unifier, unifier,
static_value_store, static_value_store,
need_sret: has_sret, need_sret: has_sret,
current_loc: Location::default(), current_loc: Default::default(),
debug_info: (dibuilder, compile_unit, func_scope.as_debug_info_scope()), debug_info: (dibuilder, compile_unit, func_scope.as_debug_info_scope()),
}; };
@ -900,13 +695,13 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
func_scope.as_debug_info_scope(), func_scope.as_debug_info_scope(),
None None
); );
code_gen_context.builder.set_current_debug_location(loc); code_gen_context.builder.set_current_debug_location(context, loc);
let result = codegen_function(generator, &mut code_gen_context); let result = codegen_function(generator, &mut code_gen_context);
// after static analysis, only void functions can have no return at the end. // after static analysis, only void functions can have no return at the end.
if !code_gen_context.is_terminated() { if !code_gen_context.is_terminated() {
code_gen_context.builder.build_return(None).unwrap(); code_gen_context.builder.build_return(None);
} }
code_gen_context.builder.unset_current_debug_location(); code_gen_context.builder.unset_current_debug_location();
@ -920,15 +715,6 @@ pub fn gen_func_impl<'ctx, G: CodeGenerator, F: FnOnce(&mut G, &mut CodeGenConte
Ok((builder, module, fn_val)) Ok((builder, module, fn_val))
} }
/// Generates LLVM IR for a function.
///
/// * `context` - The [LLVM Context][`Context`] used in generating the function body.
/// * `generator` - The [`CodeGenerator`] for generating various program constructs.
/// * `registry` - The [`WorkerRegistry`] responsible for monitoring this function generation task.
/// * `builder` - The [`Builder`] used for generating LLVM IR.
/// * `module` - The [`Module`] of which the generated LLVM function will be inserted into.
/// * `task` - The [`CodeGenTask`] associated with this function generation task.
///
pub fn gen_func<'ctx, G: CodeGenerator>( pub fn gen_func<'ctx, G: CodeGenerator>(
context: &'ctx Context, context: &'ctx Context,
generator: &mut G, generator: &mut G,
@ -939,79 +725,9 @@ pub fn gen_func<'ctx, G: CodeGenerator>(
) -> Result<(Builder<'ctx>, Module<'ctx>, FunctionValue<'ctx>), (Builder<'ctx>, String)> { ) -> Result<(Builder<'ctx>, Module<'ctx>, FunctionValue<'ctx>), (Builder<'ctx>, String)> {
let body = task.body.clone(); let body = task.body.clone();
gen_func_impl(context, generator, registry, builder, module, task, |generator, ctx| { gen_func_impl(context, generator, registry, builder, module, task, |generator, ctx| {
generator.gen_block(ctx, body.iter()) for stmt in body.iter() {
generator.gen_stmt(ctx, stmt)?;
}
Ok(())
}) })
} }
/// Converts the value of a boolean-like value `bool_value` into an `i1`.
fn bool_to_i1<'ctx>(builder: &Builder<'ctx>, bool_value: IntValue<'ctx>) -> IntValue<'ctx> {
if bool_value.get_type().get_bit_width() == 1 {
bool_value
} else {
builder
.build_int_compare(
IntPredicate::NE,
bool_value,
bool_value.get_type().const_zero(),
"tobool",
)
.unwrap()
}
}
/// Converts the value of a boolean-like value `bool_value` into an `i8`.
fn bool_to_i8<'ctx>(
builder: &Builder<'ctx>,
ctx: &'ctx Context,
bool_value: IntValue<'ctx>
) -> IntValue<'ctx> {
let value_bits = bool_value.get_type().get_bit_width();
match value_bits {
8 => bool_value,
1 => builder.build_int_z_extend(bool_value, ctx.i8_type(), "frombool").unwrap(),
_ => bool_to_i8(
builder,
ctx,
builder
.build_int_compare(
IntPredicate::NE,
bool_value,
bool_value.get_type().const_zero(),
"",
)
.unwrap()
),
}
}
/// Generates a sequence of IR which checks whether `value` does not exceed the upper bound of the
/// range as defined by `stop` and `step`.
///
/// Note that the generated IR will **not** check whether value is part of the range or whether
/// value exceeds the lower bound of the range (as evident by the missing `start` argument).
///
/// The generated IR is equivalent to the following Rust code:
///
/// ```rust,ignore
/// let sign = step > 0;
/// let (lo, hi) = if sign { (value, stop) } else { (stop, value) };
/// let cmp = lo < hi;
/// ```
///
/// Returns an `i1` [`IntValue`] representing the result of whether the `value` is in the range.
fn gen_in_range_check<'ctx>(
ctx: &CodeGenContext<'ctx, '_>,
value: IntValue<'ctx>,
stop: IntValue<'ctx>,
step: IntValue<'ctx>,
) -> IntValue<'ctx> {
let sign = ctx.builder.build_int_compare(IntPredicate::SGT, step, ctx.ctx.i32_type().const_zero(), "").unwrap();
let lo = ctx.builder.build_select(sign, value, stop, "")
.map(BasicValueEnum::into_int_value)
.unwrap();
let hi = ctx.builder.build_select(sign, stop, value, "")
.map(BasicValueEnum::into_int_value)
.unwrap();
ctx.builder.build_int_compare(IntPredicate::SLT, lo, hi, "cmp").unwrap()
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +1,18 @@
use crate::{ use crate::{
codegen::{ codegen::{
concrete_type::ConcreteTypeStore, CodeGenContext, CodeGenLLVMOptions, concrete_type::ConcreteTypeStore, CodeGenContext, CodeGenTask, DefaultCodeGenerator,
CodeGenTargetMachineOptions, CodeGenTask, DefaultCodeGenerator, WithCall, WorkerRegistry, WithCall, WorkerRegistry,
}, },
symbol_resolver::{SymbolResolver, ValueEnum}, symbol_resolver::{SymbolResolver, ValueEnum},
toplevel::{ toplevel::{
composer::{ComposerConfig, TopLevelComposer}, DefinitionId, FunInstance, TopLevelContext, composer::TopLevelComposer, DefinitionId, FunInstance, TopLevelContext, TopLevelDef,
TopLevelDef,
}, },
typecheck::{ typecheck::{
type_inferencer::{FunctionData, Inferencer, PrimitiveStore}, type_inferencer::{FunctionData, Inferencer, PrimitiveStore},
typedef::{FunSignature, FuncArg, Type, TypeEnum, Unifier, VarMap}, typedef::{FunSignature, FuncArg, Type, TypeEnum, Unifier},
}, },
}; };
use indoc::indoc; use indoc::indoc;
use inkwell::{
targets::{InitializationConfig, Target},
OptimizationLevel
};
use nac3parser::{ use nac3parser::{
ast::{fold::Fold, StrRef}, ast::{fold::Fold, StrRef},
parser::parse_program, parser::parse_program,
@ -64,14 +59,12 @@ impl SymbolResolver for Resolver {
unimplemented!() unimplemented!()
} }
fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, HashSet<String>> { fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, String> {
self.id_to_def self.id_to_def
.read() .read()
.get(&id) .get(&id)
.cloned() .cloned()
.ok_or_else(|| HashSet::from([ .ok_or_else(|| format!("cannot find symbol `{}`", id))
format!("cannot find symbol `{}`", id),
]))
} }
fn get_string_id(&self, _: &str) -> i32 { fn get_string_id(&self, _: &str) -> i32 {
@ -92,7 +85,7 @@ fn test_primitives() {
"}; "};
let statements = parse_program(source, Default::default()).unwrap(); let statements = parse_program(source, Default::default()).unwrap();
let composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 32).0; let composer: TopLevelComposer = Default::default();
let mut unifier = composer.unifier.clone(); let mut unifier = composer.unifier.clone();
let primitives = composer.primitives_ty; let primitives = composer.primitives_ty;
let top_level = Arc::new(composer.make_top_level_context()); let top_level = Arc::new(composer.make_top_level_context());
@ -111,7 +104,7 @@ fn test_primitives() {
FuncArg { name: "b".into(), ty: primitives.int32, default_value: None }, FuncArg { name: "b".into(), ty: primitives.int32, default_value: None },
], ],
ret: primitives.int32, ret: primitives.int32,
vars: VarMap::new(), vars: HashMap::new(),
}; };
let mut store = ConcreteTypeStore::new(); let mut store = ConcreteTypeStore::new();
@ -187,18 +180,24 @@ fn test_primitives() {
let expected = indoc! {" let expected = indoc! {"
; ModuleID = 'test' ; ModuleID = 'test'
source_filename = \"test\" source_filename = \"test\"
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn define i32 @testing(i32 %0, i32 %1) !dbg !4 {
define i32 @testing(i32 %0, i32 %1) local_unnamed_addr #0 !dbg !4 {
init: init:
%add = add i32 %1, %0, !dbg !9 %add = add i32 %0, %1, !dbg !9
%cmp = icmp eq i32 %add, 1, !dbg !10 %cmp = icmp eq i32 %add, 1, !dbg !10
%. = select i1 %cmp, i32 %0, i32 0, !dbg !11 br i1 %cmp, label %then, label %else, !dbg !10
ret i32 %., !dbg !12
then: ; preds = %init
br label %cont, !dbg !11
else: ; preds = %init
br label %cont, !dbg !12
cont: ; preds = %else, %then
%if_exp_result.0 = phi i32 [ %0, %then ], [ 0, %else ], !dbg !13
ret i32 %if_exp_result.0, !dbg !14
} }
attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone willreturn }
!llvm.module.flags = !{!0, !1} !llvm.module.flags = !{!0, !1}
!llvm.dbg.cu = !{!2} !llvm.dbg.cu = !{!2}
@ -213,25 +212,15 @@ fn test_primitives() {
!8 = !{} !8 = !{}
!9 = !DILocation(line: 1, column: 9, scope: !4) !9 = !DILocation(line: 1, column: 9, scope: !4)
!10 = !DILocation(line: 2, column: 15, scope: !4) !10 = !DILocation(line: 2, column: 15, scope: !4)
!11 = !DILocation(line: 0, scope: !4) !11 = !DILocation(line: 2, column: 5, scope: !4)
!12 = !DILocation(line: 3, column: 8, scope: !4) !12 = !DILocation(line: 2, column: 22, scope: !4)
!13 = !DILocation(line: 0, scope: !4)
!14 = !DILocation(line: 3, column: 8, scope: !4)
"} "}
.trim(); .trim();
assert_eq!(expected, module.print_to_string().to_str().unwrap().trim()); assert_eq!(expected, module.print_to_string().to_str().unwrap().trim());
}))); })));
let (registry, handles) = WorkerRegistry::create_workers(threads, top_level, f);
Target::initialize_all(&InitializationConfig::default());
let llvm_options = CodeGenLLVMOptions {
opt_level: OptimizationLevel::Default,
target: CodeGenTargetMachineOptions::from_host_triple(),
};
let (registry, handles) = WorkerRegistry::create_workers(
threads,
top_level,
&llvm_options,
&f
);
registry.add_task(task); registry.add_task(task);
registry.wait_tasks_complete(handles); registry.wait_tasks_complete(handles);
} }
@ -249,7 +238,7 @@ fn test_simple_call() {
"}; "};
let statements_2 = parse_program(source_2, Default::default()).unwrap(); let statements_2 = parse_program(source_2, Default::default()).unwrap();
let composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 32).0; let composer: TopLevelComposer = Default::default();
let mut unifier = composer.unifier.clone(); let mut unifier = composer.unifier.clone();
let primitives = composer.primitives_ty; let primitives = composer.primitives_ty;
let top_level = Arc::new(composer.make_top_level_context()); let top_level = Arc::new(composer.make_top_level_context());
@ -258,7 +247,7 @@ fn test_simple_call() {
let signature = FunSignature { let signature = FunSignature {
args: vec![FuncArg { name: "a".into(), ty: primitives.int32, default_value: None }], args: vec![FuncArg { name: "a".into(), ty: primitives.int32, default_value: None }],
ret: primitives.int32, ret: primitives.int32,
vars: VarMap::new(), vars: HashMap::new(),
}; };
let fun_ty = unifier.add_ty(TypeEnum::TFunc(signature.clone())); let fun_ty = unifier.add_ty(TypeEnum::TFunc(signature.clone()));
let mut store = ConcreteTypeStore::new(); let mut store = ConcreteTypeStore::new();
@ -372,26 +361,22 @@ fn test_simple_call() {
; ModuleID = 'test' ; ModuleID = 'test'
source_filename = \"test\" source_filename = \"test\"
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn define i32 @testing(i32 %0) !dbg !5 {
define i32 @testing(i32 %0) local_unnamed_addr #0 !dbg !5 {
init: init:
%add.i = shl i32 %0, 1, !dbg !10 %call = call i32 @foo.0(i32 %0), !dbg !10
%mul = add i32 %add.i, 2, !dbg !10 %mul = mul i32 %call, 2, !dbg !11
ret i32 %mul, !dbg !10 ret i32 %mul, !dbg !11
} }
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn define i32 @foo.0(i32 %0) !dbg !12 {
define i32 @foo.0(i32 %0) local_unnamed_addr #0 !dbg !11 {
init: init:
%add = add i32 %0, 1, !dbg !12 %add = add i32 %0, 1, !dbg !13
ret i32 %add, !dbg !12 ret i32 %add, !dbg !13
} }
attributes #0 = { mustprogress nofree norecurse nosync nounwind readnone willreturn }
!llvm.module.flags = !{!0, !1} !llvm.module.flags = !{!0, !1}
!llvm.dbg.cu = !{!2, !4} !llvm.dbg.cu = !{!2, !4}
!0 = !{i32 2, !\"Debug Info Version\", i32 3} !0 = !{i32 2, !\"Debug Info Version\", i32 3}
!1 = !{i32 2, !\"Dwarf Version\", i32 4} !1 = !{i32 2, !\"Dwarf Version\", i32 4}
!2 = distinct !DICompileUnit(language: DW_LANG_Python, file: !3, producer: \"NAC3\", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug) !2 = distinct !DICompileUnit(language: DW_LANG_Python, file: !3, producer: \"NAC3\", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
@ -402,26 +387,15 @@ fn test_simple_call() {
!7 = !{!8} !7 = !{!8}
!8 = !DIBasicType(name: \"_\", flags: DIFlagPublic) !8 = !DIBasicType(name: \"_\", flags: DIFlagPublic)
!9 = !{} !9 = !{}
!10 = !DILocation(line: 2, column: 12, scope: !5) !10 = !DILocation(line: 1, column: 9, scope: !5)
!11 = distinct !DISubprogram(name: \"foo.0\", linkageName: \"foo.0\", scope: null, file: !3, line: 1, type: !6, scopeLine: 1, flags: DIFlagPublic, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !4, retainedNodes: !9) !11 = !DILocation(line: 2, column: 12, scope: !5)
!12 = !DILocation(line: 1, column: 12, scope: !11) !12 = distinct !DISubprogram(name: \"foo.0\", linkageName: \"foo.0\", scope: null, file: !3, line: 1, type: !6, scopeLine: 1, flags: DIFlagPublic, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !4, retainedNodes: !9)
!13 = !DILocation(line: 1, column: 12, scope: !12)
"} "}
.trim(); .trim();
assert_eq!(expected, module.print_to_string().to_str().unwrap().trim()); assert_eq!(expected, module.print_to_string().to_str().unwrap().trim());
}))); })));
let (registry, handles) = WorkerRegistry::create_workers(threads, top_level, f);
Target::initialize_all(&InitializationConfig::default());
let llvm_options = CodeGenLLVMOptions {
opt_level: OptimizationLevel::Default,
target: CodeGenTargetMachineOptions::from_host_triple(),
};
let (registry, handles) = WorkerRegistry::create_workers(
threads,
top_level,
&llvm_options,
&f
);
registry.add_task(task); registry.add_task(task);
registry.wait_tasks_complete(handles); registry.wait_tasks_complete(handles);
} }

View File

@ -1,19 +1,22 @@
use std::fmt::Debug; use std::fmt::Debug;
use std::sync::Arc; use std::sync::Arc;
use std::{collections::HashMap, collections::HashSet, fmt::Display}; use std::{collections::HashMap, fmt::Display};
use std::rc::Rc;
use crate::typecheck::typedef::TypeEnum;
use crate::{ use crate::{
codegen::{CodeGenContext, CodeGenerator}, codegen::CodeGenContext,
toplevel::{DefinitionId, TopLevelDef, type_annotation::TypeAnnotation}, toplevel::{DefinitionId, TopLevelDef},
};
use crate::{
codegen::CodeGenerator,
typecheck::{ typecheck::{
type_inferencer::PrimitiveStore, type_inferencer::PrimitiveStore,
typedef::{Type, TypeEnum, Unifier, VarMap}, typedef::{Type, Unifier},
}, },
}; };
use inkwell::values::{BasicValueEnum, FloatValue, IntValue, PointerValue, StructValue}; use inkwell::values::{BasicValueEnum, FloatValue, IntValue, PointerValue, StructValue};
use itertools::{chain, Itertools, izip}; use itertools::{chain, izip};
use nac3parser::ast::{Constant, Expr, Location, StrRef}; use nac3parser::ast::{Expr, Location, StrRef};
use parking_lot::RwLock; use parking_lot::RwLock;
#[derive(Clone, PartialEq, Debug)] #[derive(Clone, PartialEq, Debug)]
@ -30,190 +33,15 @@ pub enum SymbolValue {
OptionNone, OptionNone,
} }
impl SymbolValue {
/// Creates a [`SymbolValue`] from a [`Constant`].
///
/// * `constant` - The constant to create the value from.
/// * `expected_ty` - The expected type of the [`SymbolValue`].
pub fn from_constant(
constant: &Constant,
expected_ty: Type,
primitives: &PrimitiveStore,
unifier: &mut Unifier
) -> Result<Self, String> {
match constant {
Constant::None => {
if unifier.unioned(expected_ty, primitives.option) {
Ok(SymbolValue::OptionNone)
} else {
Err(format!("Expected {expected_ty:?}, but got Option"))
}
}
Constant::Bool(b) => {
if unifier.unioned(expected_ty, primitives.bool) {
Ok(SymbolValue::Bool(*b))
} else {
Err(format!("Expected {expected_ty:?}, but got bool"))
}
}
Constant::Str(s) => {
if unifier.unioned(expected_ty, primitives.str) {
Ok(SymbolValue::Str(s.to_string()))
} else {
Err(format!("Expected {expected_ty:?}, but got str"))
}
},
Constant::Int(i) => {
if unifier.unioned(expected_ty, primitives.int32) {
i32::try_from(*i)
.map(SymbolValue::I32)
.map_err(|e| e.to_string())
} else if unifier.unioned(expected_ty, primitives.int64) {
i64::try_from(*i)
.map(SymbolValue::I64)
.map_err(|e| e.to_string())
} else if unifier.unioned(expected_ty, primitives.uint32) {
u32::try_from(*i)
.map(SymbolValue::U32)
.map_err(|e| e.to_string())
} else if unifier.unioned(expected_ty, primitives.uint64) {
u64::try_from(*i)
.map(SymbolValue::U64)
.map_err(|e| e.to_string())
} else {
Err(format!("Expected {}, but got int", unifier.stringify(expected_ty)))
}
}
Constant::Tuple(t) => {
let expected_ty = unifier.get_ty(expected_ty);
let TypeEnum::TTuple { ty } = expected_ty.as_ref() else {
return Err(format!("Expected {:?}, but got Tuple", expected_ty.get_type_name()))
};
assert_eq!(ty.len(), t.len());
let elems = t
.iter()
.zip(ty)
.map(|(constant, ty)| Self::from_constant(constant, *ty, primitives, unifier))
.collect::<Result<Vec<SymbolValue>, _>>()?;
Ok(SymbolValue::Tuple(elems))
}
Constant::Float(f) => {
if unifier.unioned(expected_ty, primitives.float) {
Ok(SymbolValue::Double(*f))
} else {
Err(format!("Expected {expected_ty:?}, but got float"))
}
},
_ => Err(format!("Unsupported value type {constant:?}")),
}
}
/// Creates a [`SymbolValue`] from a [`Constant`], with its type being inferred from the constant value.
///
/// * `constant` - The constant to create the value from.
pub fn from_constant_inferred(
constant: &Constant,
) -> Result<Self, String> {
match constant {
Constant::None => Ok(SymbolValue::OptionNone),
Constant::Bool(b) => Ok(SymbolValue::Bool(*b)),
Constant::Str(s) => Ok(SymbolValue::Str(s.to_string())),
Constant::Int(i) => {
let i = *i;
if i >= 0 {
i32::try_from(i).map(SymbolValue::I32)
.or_else(|_| i64::try_from(i).map(SymbolValue::I64))
.map_err(|_| format!("Literal cannot be expressed as any integral type: {i}"))
} else {
u32::try_from(i).map(SymbolValue::U32)
.or_else(|_| u64::try_from(i).map(SymbolValue::U64))
.map_err(|_| format!("Literal cannot be expressed as any integral type: {i}"))
}
}
Constant::Tuple(t) => {
let elems = t
.iter()
.map(Self::from_constant_inferred)
.collect::<Result<Vec<SymbolValue>, _>>()?;
Ok(SymbolValue::Tuple(elems))
}
Constant::Float(f) => Ok(SymbolValue::Double(*f)),
_ => Err(format!("Unsupported value type {constant:?}")),
}
}
/// Returns the [`Type`] representing the data type of this value.
pub fn get_type(&self, primitives: &PrimitiveStore, unifier: &mut Unifier) -> Type {
match self {
SymbolValue::I32(_) => primitives.int32,
SymbolValue::I64(_) => primitives.int64,
SymbolValue::U32(_) => primitives.uint32,
SymbolValue::U64(_) => primitives.uint64,
SymbolValue::Str(_) => primitives.str,
SymbolValue::Double(_) => primitives.float,
SymbolValue::Bool(_) => primitives.bool,
SymbolValue::Tuple(vs) => {
let vs_tys = vs
.iter()
.map(|v| v.get_type(primitives, unifier))
.collect::<Vec<_>>();
unifier.add_ty(TypeEnum::TTuple {
ty: vs_tys,
})
}
SymbolValue::OptionSome(_) | SymbolValue::OptionNone => primitives.option,
}
}
/// Returns the [`TypeAnnotation`] representing the data type of this value.
pub fn get_type_annotation(&self, primitives: &PrimitiveStore, unifier: &mut Unifier) -> TypeAnnotation {
match self {
SymbolValue::Bool(..)
| SymbolValue::Double(..)
| SymbolValue::I32(..)
| SymbolValue::I64(..)
| SymbolValue::U32(..)
| SymbolValue::U64(..)
| SymbolValue::Str(..) => TypeAnnotation::Primitive(self.get_type(primitives, unifier)),
SymbolValue::Tuple(vs) => {
let vs_tys = vs
.iter()
.map(|v| v.get_type_annotation(primitives, unifier))
.collect::<Vec<_>>();
TypeAnnotation::Tuple(vs_tys)
}
SymbolValue::OptionNone => TypeAnnotation::CustomClass {
id: primitives.option.obj_id(unifier).unwrap(),
params: Vec::default(),
},
SymbolValue::OptionSome(v) => {
let ty = v.get_type_annotation(primitives, unifier);
TypeAnnotation::CustomClass {
id: primitives.option.obj_id(unifier).unwrap(),
params: vec![ty],
}
}
}
}
/// Returns the [`TypeEnum`] representing the data type of this value.
pub fn get_type_enum(&self, primitives: &PrimitiveStore, unifier: &mut Unifier) -> Rc<TypeEnum> {
let ty = self.get_type(primitives, unifier);
unifier.get_ty(ty)
}
}
impl Display for SymbolValue { impl Display for SymbolValue {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
SymbolValue::I32(i) => write!(f, "{i}"), SymbolValue::I32(i) => write!(f, "{}", i),
SymbolValue::I64(i) => write!(f, "int64({i})"), SymbolValue::I64(i) => write!(f, "int64({})", i),
SymbolValue::U32(i) => write!(f, "uint32({i})"), SymbolValue::U32(i) => write!(f, "uint32({})", i),
SymbolValue::U64(i) => write!(f, "uint64({i})"), SymbolValue::U64(i) => write!(f, "uint64({})", i),
SymbolValue::Str(s) => write!(f, "\"{s}\""), SymbolValue::Str(s) => write!(f, "\"{}\"", s),
SymbolValue::Double(d) => write!(f, "{d}"), SymbolValue::Double(d) => write!(f, "{}", d),
SymbolValue::Bool(b) => { SymbolValue::Bool(b) => {
if *b { if *b {
write!(f, "True") write!(f, "True")
@ -222,82 +50,42 @@ impl Display for SymbolValue {
} }
} }
SymbolValue::Tuple(t) => { SymbolValue::Tuple(t) => {
write!(f, "({})", t.iter().map(|v| format!("{v}")).collect::<Vec<_>>().join(", ")) write!(f, "({})", t.iter().map(|v| format!("{}", v)).collect::<Vec<_>>().join(", "))
} }
SymbolValue::OptionSome(v) => write!(f, "Some({v})"), SymbolValue::OptionSome(v) => write!(f, "Some({})", v),
SymbolValue::OptionNone => write!(f, "none"), SymbolValue::OptionNone => write!(f, "none"),
} }
} }
} }
impl TryFrom<SymbolValue> for u64 {
type Error = ();
/// Tries to convert a [`SymbolValue`] into a [`u64`], returning [`Err`] if the value is not
/// numeric or if the value cannot be converted into a `u64` without overflow.
fn try_from(value: SymbolValue) -> Result<Self, Self::Error> {
match value {
SymbolValue::I32(v) => u64::try_from(v).map_err(|_| ()),
SymbolValue::I64(v) => u64::try_from(v).map_err(|_| ()),
SymbolValue::U32(v) => Ok(v as u64),
SymbolValue::U64(v) => Ok(v),
_ => Err(()),
}
}
}
impl TryFrom<SymbolValue> for i128 {
type Error = ();
/// Tries to convert a [`SymbolValue`] into a [`i128`], returning [`Err`] if the value is not
/// numeric.
fn try_from(value: SymbolValue) -> Result<Self, Self::Error> {
match value {
SymbolValue::I32(v) => Ok(v as i128),
SymbolValue::I64(v) => Ok(v as i128),
SymbolValue::U32(v) => Ok(v as i128),
SymbolValue::U64(v) => Ok(v as i128),
_ => Err(()),
}
}
}
pub trait StaticValue { pub trait StaticValue {
/// Returns a unique identifier for this value.
fn get_unique_identifier(&self) -> u64; fn get_unique_identifier(&self) -> u64;
/// Returns the constant object represented by this unique identifier. fn get_const_obj<'ctx, 'a>(
fn get_const_obj<'ctx>(
&self, &self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
generator: &mut dyn CodeGenerator, generator: &mut dyn CodeGenerator,
) -> BasicValueEnum<'ctx>; ) -> BasicValueEnum<'ctx>;
/// Converts this value to a LLVM [`BasicValueEnum`]. fn to_basic_value_enum<'ctx, 'a>(
fn to_basic_value_enum<'ctx>(
&self, &self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
generator: &mut dyn CodeGenerator, generator: &mut dyn CodeGenerator,
expected_ty: Type, expected_ty: Type,
) -> Result<BasicValueEnum<'ctx>, String>; ) -> Result<BasicValueEnum<'ctx>, String>;
/// Returns a field within this value. fn get_field<'ctx, 'a>(
fn get_field<'ctx>(
&self, &self,
name: StrRef, name: StrRef,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
) -> Option<ValueEnum<'ctx>>; ) -> Option<ValueEnum<'ctx>>;
/// Returns a single element of this tuple.
fn get_tuple_element<'ctx>(&self, index: u32) -> Option<ValueEnum<'ctx>>; fn get_tuple_element<'ctx>(&self, index: u32) -> Option<ValueEnum<'ctx>>;
} }
#[derive(Clone)] #[derive(Clone)]
pub enum ValueEnum<'ctx> { pub enum ValueEnum<'ctx> {
/// [ValueEnum] representing a static value.
Static(Arc<dyn StaticValue + Send + Sync>), Static(Arc<dyn StaticValue + Send + Sync>),
/// [ValueEnum] representing a dynamic value.
Dynamic(BasicValueEnum<'ctx>), Dynamic(BasicValueEnum<'ctx>),
} }
@ -332,8 +120,6 @@ impl<'ctx> From<StructValue<'ctx>> for ValueEnum<'ctx> {
} }
impl<'ctx> ValueEnum<'ctx> { impl<'ctx> ValueEnum<'ctx> {
/// Converts this [`ValueEnum`] to a [`BasicValueEnum`].
pub fn to_basic_value_enum<'a>( pub fn to_basic_value_enum<'a>(
self, self,
ctx: &mut CodeGenContext<'ctx, 'a>, ctx: &mut CodeGenContext<'ctx, 'a>,
@ -348,7 +134,7 @@ impl<'ctx> ValueEnum<'ctx> {
} }
pub trait SymbolResolver { pub trait SymbolResolver {
/// Get type of type variable identifier or top-level function type, // get type of type variable identifier or top-level function type
fn get_symbol_type( fn get_symbol_type(
&self, &self,
unifier: &mut Unifier, unifier: &mut Unifier,
@ -357,16 +143,16 @@ pub trait SymbolResolver {
str: StrRef, str: StrRef,
) -> Result<Type, String>; ) -> Result<Type, String>;
/// Get the top-level definition of identifiers. // get the top-level definition of identifiers
fn get_identifier_def(&self, str: StrRef) -> Result<DefinitionId, HashSet<String>>; fn get_identifier_def(&self, str: StrRef) -> Result<DefinitionId, String>;
fn get_symbol_value<'ctx>( fn get_symbol_value<'ctx, 'a>(
&self, &self,
str: StrRef, str: StrRef,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
) -> Option<ValueEnum<'ctx>>; ) -> Option<ValueEnum<'ctx>>;
fn get_default_param_value(&self, expr: &Expr) -> Option<SymbolValue>; fn get_default_param_value(&self, expr: &nac3parser::ast::Expr) -> Option<SymbolValue>;
fn get_string_id(&self, s: &str) -> i32; fn get_string_id(&self, s: &str) -> i32;
fn get_exception_id(&self, tyid: usize) -> usize; fn get_exception_id(&self, tyid: usize) -> usize;
@ -381,7 +167,7 @@ pub trait SymbolResolver {
} }
thread_local! { thread_local! {
static IDENTIFIER_ID: [StrRef; 12] = [ static IDENTIFIER_ID: [StrRef; 11] = [
"int32".into(), "int32".into(),
"int64".into(), "int64".into(),
"float".into(), "float".into(),
@ -393,18 +179,17 @@ thread_local! {
"Exception".into(), "Exception".into(),
"uint32".into(), "uint32".into(),
"uint64".into(), "uint64".into(),
"Literal".into(),
]; ];
} }
/// Converts a type annotation into a [Type]. // convert type annotation into type
pub fn parse_type_annotation<T>( pub fn parse_type_annotation<T>(
resolver: &dyn SymbolResolver, resolver: &dyn SymbolResolver,
top_level_defs: &[Arc<RwLock<TopLevelDef>>], top_level_defs: &[Arc<RwLock<TopLevelDef>>],
unifier: &mut Unifier, unifier: &mut Unifier,
primitives: &PrimitiveStore, primitives: &PrimitiveStore,
expr: &Expr<T>, expr: &Expr<T>,
) -> Result<Type, HashSet<String>> { ) -> Result<Type, String> {
use nac3parser::ast::ExprKind::*; use nac3parser::ast::ExprKind::*;
let ids = IDENTIFIER_ID.with(|ids| *ids); let ids = IDENTIFIER_ID.with(|ids| *ids);
let int32_id = ids[0]; let int32_id = ids[0];
@ -418,7 +203,6 @@ pub fn parse_type_annotation<T>(
let exn_id = ids[8]; let exn_id = ids[8];
let uint32_id = ids[9]; let uint32_id = ids[9];
let uint64_id = ids[10]; let uint64_id = ids[10];
let literal_id = ids[11];
let name_handling = |id: &StrRef, loc: Location, unifier: &mut Unifier| { let name_handling = |id: &StrRef, loc: Location, unifier: &mut Unifier| {
if *id == int32_id { if *id == int32_id {
@ -439,44 +223,39 @@ pub fn parse_type_annotation<T>(
Ok(primitives.exception) Ok(primitives.exception)
} else { } else {
let obj_id = resolver.get_identifier_def(*id); let obj_id = resolver.get_identifier_def(*id);
if let Ok(obj_id) = obj_id { match obj_id {
let def = top_level_defs[obj_id.0].read(); Ok(obj_id) => {
if let TopLevelDef::Class { fields, methods, type_vars, .. } = &*def { let def = top_level_defs[obj_id.0].read();
if !type_vars.is_empty() { if let TopLevelDef::Class { fields, methods, type_vars, .. } = &*def {
return Err(HashSet::from([ if !type_vars.is_empty() {
format!( return Err(format!(
"Unexpected number of type parameters: expected {} but got 0", "Unexpected number of type parameters: expected {} but got 0",
type_vars.len() type_vars.len()
), ));
])) }
} let fields = chain(
let fields = chain( fields.iter().map(|(k, v, m)| (*k, (*v, *m))),
fields.iter().map(|(k, v, m)| (*k, (*v, *m))), methods.iter().map(|(k, v, _)| (*k, (*v, false))),
methods.iter().map(|(k, v, _)| (*k, (*v, false))), )
)
.collect(); .collect();
Ok(unifier.add_ty(TypeEnum::TObj { Ok(unifier.add_ty(TypeEnum::TObj {
obj_id, obj_id,
fields, fields,
params: VarMap::default(), params: Default::default(),
})) }))
} else { } else {
Err(HashSet::from([ Err(format!("Cannot use function name as type at {}", loc))
format!("Cannot use function name as type at {loc}"), }
]))
} }
} else { Err(_) => {
let ty = resolver let ty = resolver
.get_symbol_type(unifier, top_level_defs, primitives, *id) .get_symbol_type(unifier, top_level_defs, primitives, *id)
.map_err(|e| HashSet::from([ .map_err(|e| format!("Unknown type annotation at {}: {}", loc, e))?;
format!("Unknown type annotation at {loc}: {e}"), if let TypeEnum::TVar { .. } = &*unifier.get_ty(ty) {
]))?; Ok(ty)
if let TypeEnum::TVar { .. } = &*unifier.get_ty(ty) { } else {
Ok(ty) Err(format!("Unknown type annotation {} at {}", id, loc))
} else { }
Err(HashSet::from([
format!("Unknown type annotation {id} at {loc}"),
]))
} }
} }
} }
@ -499,31 +278,8 @@ pub fn parse_type_annotation<T>(
.collect::<Result<Vec<_>, _>>()?; .collect::<Result<Vec<_>, _>>()?;
Ok(unifier.add_ty(TypeEnum::TTuple { ty })) Ok(unifier.add_ty(TypeEnum::TTuple { ty }))
} else { } else {
Err(HashSet::from([ Err("Expected multiple elements for tuple".into())
"Expected multiple elements for tuple".into()
]))
} }
} else if *id == literal_id {
let mut parse_literal = |elt: &Expr<T>| {
let ty = parse_type_annotation(resolver, top_level_defs, unifier, primitives, elt)?;
let ty_enum = &*unifier.get_ty_immutable(ty);
match ty_enum {
TypeEnum::TLiteral { values, .. } => Ok(values.clone()),
_ => Err(HashSet::from([
format!("Expected literal in type argument for Literal at {}", elt.location),
]))
}
};
let values = if let Tuple { elts, .. } = &slice.node {
elts.iter()
.map(&mut parse_literal)
.collect::<Result<Vec<_>, _>>()?
} else {
vec![parse_literal(slice)?]
}.into_iter().flatten().collect_vec();
Ok(unifier.get_fresh_literal(values, Some(slice.location)))
} else { } else {
let types = if let Tuple { elts, .. } = &slice.node { let types = if let Tuple { elts, .. } = &slice.node {
elts.iter() elts.iter()
@ -539,15 +295,13 @@ pub fn parse_type_annotation<T>(
let def = top_level_defs[obj_id.0].read(); let def = top_level_defs[obj_id.0].read();
if let TopLevelDef::Class { fields, methods, type_vars, .. } = &*def { if let TopLevelDef::Class { fields, methods, type_vars, .. } = &*def {
if types.len() != type_vars.len() { if types.len() != type_vars.len() {
return Err(HashSet::from([ return Err(format!(
format!( "Unexpected number of type parameters: expected {} but got {}",
"Unexpected number of type parameters: expected {} but got {}", type_vars.len(),
type_vars.len(), types.len()
types.len() ));
),
]))
} }
let mut subst = VarMap::new(); let mut subst = HashMap::new();
for (var, ty) in izip!(type_vars.iter(), types.iter()) { for (var, ty) in izip!(type_vars.iter(), types.iter()) {
let id = if let TypeEnum::TVar { id, .. } = &*unifier.get_ty(*var) { let id = if let TypeEnum::TVar { id, .. } = &*unifier.get_ty(*var) {
*id *id
@ -569,9 +323,7 @@ pub fn parse_type_annotation<T>(
})); }));
Ok(unifier.add_ty(TypeEnum::TObj { obj_id, fields, params: subst })) Ok(unifier.add_ty(TypeEnum::TObj { obj_id, fields, params: subst }))
} else { } else {
Err(HashSet::from([ Err("Cannot use function name as type".into())
"Cannot use function name as type".into(),
]))
} }
} }
}; };
@ -582,17 +334,10 @@ pub fn parse_type_annotation<T>(
if let Name { id, .. } = &value.node { if let Name { id, .. } = &value.node {
subscript_name_handle(id, slice, unifier) subscript_name_handle(id, slice, unifier)
} else { } else {
Err(HashSet::from([ Err(format!("unsupported type expression at {}", expr.location))
format!("unsupported type expression at {}", expr.location),
]))
} }
} }
Constant { value, .. } => SymbolValue::from_constant_inferred(value) _ => Err(format!("unsupported type expression at {}", expr.location)),
.map(|v| unifier.get_fresh_literal(vec![v], Some(expr.location)))
.map_err(|err| HashSet::from([err])),
_ => Err(HashSet::from([
format!("unsupported type expression at {}", expr.location),
])),
} }
} }
@ -603,7 +348,7 @@ impl dyn SymbolResolver + Send + Sync {
unifier: &mut Unifier, unifier: &mut Unifier,
primitives: &PrimitiveStore, primitives: &PrimitiveStore,
expr: &Expr<T>, expr: &Expr<T>,
) -> Result<Type, HashSet<String>> { ) -> Result<Type, String> {
parse_type_annotation(self, top_level_defs, unifier, primitives, expr) parse_type_annotation(self, top_level_defs, unifier, primitives, expr)
} }
@ -616,13 +361,13 @@ impl dyn SymbolResolver + Send + Sync {
unifier.internal_stringify( unifier.internal_stringify(
ty, ty,
&mut |id| { &mut |id| {
let TopLevelDef::Class { name, .. } = &*top_level_defs[id].read() else { if let TopLevelDef::Class { name, .. } = &*top_level_defs[id].read() {
name.to_string()
} else {
unreachable!("expected class definition") unreachable!("expected class definition")
}; }
name.to_string()
}, },
&mut |id| format!("typevar{id}"), &mut |id| format!("typevar{}", id),
&mut None, &mut None,
) )
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,78 +1,10 @@
use std::convert::TryInto; use std::convert::TryInto;
use crate::symbol_resolver::SymbolValue; use crate::symbol_resolver::SymbolValue;
use crate::typecheck::typedef::{Mapping, VarMap};
use nac3parser::ast::{Constant, Location}; use nac3parser::ast::{Constant, Location};
use super::*; use super::*;
/// Structure storing [`DefinitionId`] for primitive types.
#[derive(Clone, Copy)]
pub struct PrimitiveDefinitionIds {
pub int32: DefinitionId,
pub int64: DefinitionId,
pub uint32: DefinitionId,
pub uint64: DefinitionId,
pub float: DefinitionId,
pub bool: DefinitionId,
pub none: DefinitionId,
pub range: DefinitionId,
pub str: DefinitionId,
pub exception: DefinitionId,
pub option: DefinitionId,
pub ndarray: DefinitionId,
}
impl PrimitiveDefinitionIds {
/// Returns all [`DefinitionId`] of primitives as a [`Vec`].
///
/// There are no guarantees on ordering of the IDs.
#[must_use]
fn as_vec(&self) -> Vec<DefinitionId> {
vec![
self.int32,
self.int64,
self.uint32,
self.uint64,
self.float,
self.bool,
self.none,
self.range,
self.str,
self.exception,
self.option,
self.ndarray,
]
}
/// Returns an iterator over all [`DefinitionId`]s of this instance in indeterminate order.
pub fn iter(&self) -> impl Iterator<Item=DefinitionId> {
self.as_vec().into_iter()
}
/// Returns the primitive with the largest [`DefinitionId`].
#[must_use]
pub fn max_id(&self) -> DefinitionId {
self.iter().max().unwrap()
}
}
/// The [definition IDs][DefinitionId] for primitive types.
pub const PRIMITIVE_DEF_IDS: PrimitiveDefinitionIds = PrimitiveDefinitionIds {
int32: DefinitionId(0),
int64: DefinitionId(1),
uint32: DefinitionId(8),
uint64: DefinitionId(9),
float: DefinitionId(2),
bool: DefinitionId(3),
none: DefinitionId(4),
range: DefinitionId(5),
str: DefinitionId(6),
exception: DefinitionId(7),
option: DefinitionId(10),
ndarray: DefinitionId(14),
};
impl TopLevelDef { impl TopLevelDef {
pub fn to_string(&self, unifier: &mut Unifier) -> String { pub fn to_string(&self, unifier: &mut Unifier) -> String {
match self { match self {
@ -111,46 +43,45 @@ impl TopLevelDef {
} }
impl TopLevelComposer { impl TopLevelComposer {
#[must_use] pub fn make_primitives() -> (PrimitiveStore, Unifier) {
pub fn make_primitives(size_t: u32) -> (PrimitiveStore, Unifier) {
let mut unifier = Unifier::new(); let mut unifier = Unifier::new();
let int32 = unifier.add_ty(TypeEnum::TObj { let int32 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.int32, obj_id: DefinitionId(0),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let int64 = unifier.add_ty(TypeEnum::TObj { let int64 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.int64, obj_id: DefinitionId(1),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let float = unifier.add_ty(TypeEnum::TObj { let float = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.float, obj_id: DefinitionId(2),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let bool = unifier.add_ty(TypeEnum::TObj { let bool = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.bool, obj_id: DefinitionId(3),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let none = unifier.add_ty(TypeEnum::TObj { let none = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.none, obj_id: DefinitionId(4),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let range = unifier.add_ty(TypeEnum::TObj { let range = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.range, obj_id: DefinitionId(5),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let str = unifier.add_ty(TypeEnum::TObj { let str = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.str, obj_id: DefinitionId(6),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let exception = unifier.add_ty(TypeEnum::TObj { let exception = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.exception, obj_id: DefinitionId(7),
fields: vec![ fields: vec![
("__name__".into(), (int32, true)), ("__name__".into(), (int32, true)),
("__file__".into(), (str, true)), ("__file__".into(), (str, true)),
@ -164,32 +95,32 @@ impl TopLevelComposer {
] ]
.into_iter() .into_iter()
.collect::<HashMap<_, _>>(), .collect::<HashMap<_, _>>(),
params: VarMap::new(), params: HashMap::new(),
}); });
let uint32 = unifier.add_ty(TypeEnum::TObj { let uint32 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.uint32, obj_id: DefinitionId(8),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let uint64 = unifier.add_ty(TypeEnum::TObj { let uint64 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.uint64, obj_id: DefinitionId(9),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let option_type_var = unifier.get_fresh_var(Some("option_type_var".into()), None); let option_type_var = unifier.get_fresh_var(Some("option_type_var".into()), None);
let is_some_type_fun_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature { let is_some_type_fun_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature {
args: vec![], args: vec![],
ret: bool, ret: bool,
vars: VarMap::from([(option_type_var.1, option_type_var.0)]), vars: HashMap::from([(option_type_var.1, option_type_var.0)]),
})); }));
let unwrap_fun_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature { let unwrap_fun_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature {
args: vec![], args: vec![],
ret: option_type_var.0, ret: option_type_var.0,
vars: VarMap::from([(option_type_var.1, option_type_var.0)]), vars: HashMap::from([(option_type_var.1, option_type_var.0)]),
})); }));
let option = unifier.add_ty(TypeEnum::TObj { let option = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.option, obj_id: DefinitionId(10),
fields: vec![ fields: vec![
("is_some".into(), (is_some_type_fun_ty, true)), ("is_some".into(), (is_some_type_fun_ty, true)),
("is_none".into(), (is_some_type_fun_ty, true)), ("is_none".into(), (is_some_type_fun_ty, true)),
@ -197,79 +128,30 @@ impl TopLevelComposer {
] ]
.into_iter() .into_iter()
.collect::<HashMap<_, _>>(), .collect::<HashMap<_, _>>(),
params: VarMap::from([(option_type_var.1, option_type_var.0)]), params: HashMap::from([(option_type_var.1, option_type_var.0)]),
}); });
let size_t_ty = match size_t {
32 => uint32,
64 => uint64,
_ => unreachable!(),
};
let ndarray_dtype_tvar = unifier.get_fresh_var(Some("ndarray_dtype".into()), None);
let ndarray_ndims_tvar = unifier.get_fresh_const_generic_var(size_t_ty, Some("ndarray_ndims".into()), None);
let ndarray_copy_fun_ret_ty = unifier.get_fresh_var(None, None);
let ndarray_copy_fun_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature {
args: vec![],
ret: ndarray_copy_fun_ret_ty.0,
vars: VarMap::from([
(ndarray_dtype_tvar.1, ndarray_dtype_tvar.0),
(ndarray_ndims_tvar.1, ndarray_ndims_tvar.0),
]),
}));
let ndarray_fill_fun_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature {
args: vec![
FuncArg {
name: "value".into(),
ty: ndarray_dtype_tvar.0,
default_value: None,
},
],
ret: none,
vars: VarMap::from([
(ndarray_dtype_tvar.1, ndarray_dtype_tvar.0),
(ndarray_ndims_tvar.1, ndarray_ndims_tvar.0),
]),
}));
let ndarray = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.ndarray,
fields: Mapping::from([
("copy".into(), (ndarray_copy_fun_ty, true)),
("fill".into(), (ndarray_fill_fun_ty, true)),
]),
params: VarMap::from([
(ndarray_dtype_tvar.1, ndarray_dtype_tvar.0),
(ndarray_ndims_tvar.1, ndarray_ndims_tvar.0),
]),
});
unifier.unify(ndarray_copy_fun_ret_ty.0, ndarray).unwrap();
let primitives = PrimitiveStore { let primitives = PrimitiveStore {
int32, int32,
int64, int64,
uint32,
uint64,
float, float,
bool, bool,
none, none,
range, range,
str, str,
exception, exception,
uint32,
uint64,
option, option,
ndarray,
size_t,
}; };
unifier.put_primitive_store(&primitives);
crate::typecheck::magic_methods::set_primitives_magic_methods(&primitives, &mut unifier); crate::typecheck::magic_methods::set_primitives_magic_methods(&primitives, &mut unifier);
(primitives, unifier) (primitives, unifier)
} }
/// already include the `definition_id` of itself inside the ancestors vector /// already include the definition_id of itself inside the ancestors vector
/// when first registering, the `type_vars`, fields, methods, ancestors are invalid /// when first registering, the type_vars, fields, methods, ancestors are invalid
#[must_use]
pub fn make_top_level_class_def( pub fn make_top_level_class_def(
obj_id: DefinitionId, index: usize,
resolver: Option<Arc<dyn SymbolResolver + Send + Sync>>, resolver: Option<Arc<dyn SymbolResolver + Send + Sync>>,
name: StrRef, name: StrRef,
constructor: Option<Type>, constructor: Option<Type>,
@ -277,11 +159,11 @@ impl TopLevelComposer {
) -> TopLevelDef { ) -> TopLevelDef {
TopLevelDef::Class { TopLevelDef::Class {
name, name,
object_id: obj_id, object_id: DefinitionId(index),
type_vars: Vec::default(), type_vars: Default::default(),
fields: Vec::default(), fields: Default::default(),
methods: Vec::default(), methods: Default::default(),
ancestors: Vec::default(), ancestors: Default::default(),
constructor, constructor,
resolver, resolver,
loc, loc,
@ -289,7 +171,6 @@ impl TopLevelComposer {
} }
/// when first registering, the type is a invalid value /// when first registering, the type is a invalid value
#[must_use]
pub fn make_top_level_function_def( pub fn make_top_level_function_def(
name: String, name: String,
simple_name: StrRef, simple_name: StrRef,
@ -301,16 +182,15 @@ impl TopLevelComposer {
name, name,
simple_name, simple_name,
signature: ty, signature: ty,
var_id: Vec::default(), var_id: Default::default(),
instance_to_symbol: HashMap::default(), instance_to_symbol: Default::default(),
instance_to_stmt: HashMap::default(), instance_to_stmt: Default::default(),
resolver, resolver,
codegen_callback: None, codegen_callback: None,
loc, loc,
} }
} }
#[must_use]
pub fn make_class_method_name(mut class_name: String, method_name: &str) -> String { pub fn make_class_method_name(mut class_name: String, method_name: &str) -> String {
class_name.push('.'); class_name.push('.');
class_name.push_str(method_name); class_name.push_str(method_name);
@ -320,13 +200,13 @@ impl TopLevelComposer {
pub fn get_class_method_def_info( pub fn get_class_method_def_info(
class_methods_def: &[(StrRef, Type, DefinitionId)], class_methods_def: &[(StrRef, Type, DefinitionId)],
method_name: StrRef, method_name: StrRef,
) -> Result<(Type, DefinitionId), HashSet<String>> { ) -> Result<(Type, DefinitionId), String> {
for (name, ty, def_id) in class_methods_def { for (name, ty, def_id) in class_methods_def {
if name == &method_name { if name == &method_name {
return Ok((*ty, *def_id)); return Ok((*ty, *def_id));
} }
} }
Err(HashSet::from([format!("no method {method_name} in the current class")])) Err(format!("no method {} in the current class", method_name))
} }
/// get all base class def id of a class, excluding itself. \ /// get all base class def id of a class, excluding itself. \
@ -337,7 +217,7 @@ impl TopLevelComposer {
pub fn get_all_ancestors_helper( pub fn get_all_ancestors_helper(
child: &TypeAnnotation, child: &TypeAnnotation,
temp_def_list: &[Arc<RwLock<TopLevelDef>>], temp_def_list: &[Arc<RwLock<TopLevelDef>>],
) -> Result<Vec<TypeAnnotation>, HashSet<String>> { ) -> Result<Vec<TypeAnnotation>, String> {
let mut result: Vec<TypeAnnotation> = Vec::new(); let mut result: Vec<TypeAnnotation> = Vec::new();
let mut parent = Self::get_parent(child, temp_def_list); let mut parent = Self::get_parent(child, temp_def_list);
while let Some(p) = parent { while let Some(p) = parent {
@ -349,16 +229,16 @@ impl TopLevelComposer {
}; };
// check cycle // check cycle
let no_cycle = result.iter().all(|x| { let no_cycle = result.iter().all(|x| {
let TypeAnnotation::CustomClass { id, .. } = x else { if let TypeAnnotation::CustomClass { id, .. } = x {
id.0 != p_id.0
} else {
unreachable!("must be class kind annotation") unreachable!("must be class kind annotation")
}; }
id.0 != p_id.0
}); });
if no_cycle { if no_cycle {
result.push(p); result.push(p);
} else { } else {
return Err(HashSet::from(["cyclic inheritance detected".into()])); return Err("cyclic inheritance detected".into());
} }
} }
Ok(result) Ok(result)
@ -376,25 +256,23 @@ impl TopLevelComposer {
}; };
let child_def = temp_def_list.get(child_id.0).unwrap(); let child_def = temp_def_list.get(child_id.0).unwrap();
let child_def = child_def.read(); let child_def = child_def.read();
let TopLevelDef::Class { ancestors, .. } = &*child_def else { if let TopLevelDef::Class { ancestors, .. } = &*child_def {
unreachable!("child must be top level class def") if !ancestors.is_empty() {
}; Some(ancestors[0].clone())
} else {
if ancestors.is_empty() { None
None }
} else { } else {
Some(ancestors[0].clone()) unreachable!("child must be top level class def")
} }
} }
/// get the `var_id` of a given `TVar` type /// get the var_id of a given TVar type
pub fn get_var_id(var_ty: Type, unifier: &mut Unifier) -> Result<u32, HashSet<String>> { pub fn get_var_id(var_ty: Type, unifier: &mut Unifier) -> Result<u32, String> {
if let TypeEnum::TVar { id, .. } = unifier.get_ty(var_ty).as_ref() { if let TypeEnum::TVar { id, .. } = unifier.get_ty(var_ty).as_ref() {
Ok(*id) Ok(*id)
} else { } else {
Err(HashSet::from([ Err("not type var".to_string())
"not type var".to_string(),
]))
} }
} }
@ -408,38 +286,39 @@ impl TopLevelComposer {
let this = this.as_ref(); let this = this.as_ref();
let other = unifier.get_ty(other); let other = unifier.get_ty(other);
let other = other.as_ref(); let other = other.as_ref();
let ( if let (
TypeEnum::TFunc(FunSignature { args: this_args, ret: this_ret, .. }), TypeEnum::TFunc(FunSignature { args: this_args, ret: this_ret, .. }),
TypeEnum::TFunc(FunSignature { args: other_args, ret: other_ret, .. }), TypeEnum::TFunc(FunSignature { args: other_args, ret: other_ret, .. }),
) = (this, other) else { ) = (this, other)
{
// check args
let args_ok = this_args
.iter()
.map(|FuncArg { name, ty, .. }| (name, type_var_to_concrete_def.get(ty).unwrap()))
.zip(other_args.iter().map(|FuncArg { name, ty, .. }| {
(name, type_var_to_concrete_def.get(ty).unwrap())
}))
.all(|(this, other)| {
if this.0 == &"self".into() && this.0 == other.0 {
true
} else {
this.0 == other.0
&& check_overload_type_annotation_compatible(this.1, other.1, unifier)
}
});
// check rets
let ret_ok = check_overload_type_annotation_compatible(
type_var_to_concrete_def.get(this_ret).unwrap(),
type_var_to_concrete_def.get(other_ret).unwrap(),
unifier,
);
// return
args_ok && ret_ok
} else {
unreachable!("this function must be called with function type") unreachable!("this function must be called with function type")
}; }
// check args
let args_ok = this_args
.iter()
.map(|FuncArg { name, ty, .. }| (name, type_var_to_concrete_def.get(ty).unwrap()))
.zip(other_args.iter().map(|FuncArg { name, ty, .. }| {
(name, type_var_to_concrete_def.get(ty).unwrap())
}))
.all(|(this, other)| {
if this.0 == &"self".into() && this.0 == other.0 {
true
} else {
this.0 == other.0
&& check_overload_type_annotation_compatible(this.1, other.1, unifier)
}
});
// check rets
let ret_ok = check_overload_type_annotation_compatible(
type_var_to_concrete_def.get(this_ret).unwrap(),
type_var_to_concrete_def.get(other_ret).unwrap(),
unifier,
);
// return
args_ok && ret_ok
} }
pub fn check_overload_field_type( pub fn check_overload_field_type(
@ -455,7 +334,7 @@ impl TopLevelComposer {
) )
} }
pub fn get_all_assigned_field(stmts: &[Stmt<()>]) -> Result<HashSet<StrRef>, HashSet<String>> { pub fn get_all_assigned_field(stmts: &[ast::Stmt<()>]) -> Result<HashSet<StrRef>, String> {
let mut result = HashSet::new(); let mut result = HashSet::new();
for s in stmts { for s in stmts {
match &s.node { match &s.node {
@ -472,12 +351,10 @@ impl TopLevelComposer {
} }
} => } =>
{ {
return Err(HashSet::from([ return Err(format!(
format!( "redundant type annotation for class fields at {}",
"redundant type annotation for class fields at {}", s.location
s.location ))
),
]))
} }
ast::StmtKind::Assign { targets, .. } => { ast::StmtKind::Assign { targets, .. } => {
for t in targets { for t in targets {
@ -499,14 +376,14 @@ impl TopLevelComposer {
ast::StmtKind::If { body, orelse, .. } => { ast::StmtKind::If { body, orelse, .. } => {
let inited_for_sure = Self::get_all_assigned_field(body.as_slice())? let inited_for_sure = Self::get_all_assigned_field(body.as_slice())?
.intersection(&Self::get_all_assigned_field(orelse.as_slice())?) .intersection(&Self::get_all_assigned_field(orelse.as_slice())?)
.copied() .cloned()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
result.extend(inited_for_sure); result.extend(inited_for_sure);
} }
ast::StmtKind::Try { body, orelse, finalbody, .. } => { ast::StmtKind::Try { body, orelse, finalbody, .. } => {
let inited_for_sure = Self::get_all_assigned_field(body.as_slice())? let inited_for_sure = Self::get_all_assigned_field(body.as_slice())?
.intersection(&Self::get_all_assigned_field(orelse.as_slice())?) .intersection(&Self::get_all_assigned_field(orelse.as_slice())?)
.copied() .cloned()
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
result.extend(inited_for_sure); result.extend(inited_for_sure);
result.extend(Self::get_all_assigned_field(finalbody.as_slice())?); result.extend(Self::get_all_assigned_field(finalbody.as_slice())?);
@ -514,9 +391,9 @@ impl TopLevelComposer {
ast::StmtKind::With { body, .. } => { ast::StmtKind::With { body, .. } => {
result.extend(Self::get_all_assigned_field(body.as_slice())?); result.extend(Self::get_all_assigned_field(body.as_slice())?);
} }
ast::StmtKind::Pass { .. } ast::StmtKind::Pass { .. } => {}
| ast::StmtKind::Assert { .. } ast::StmtKind::Assert { .. } => {}
| ast::StmtKind::Expr { .. } => {} ast::StmtKind::Expr { .. } => {}
_ => { _ => {
unimplemented!() unimplemented!()
@ -529,7 +406,7 @@ impl TopLevelComposer {
pub fn parse_parameter_default_value( pub fn parse_parameter_default_value(
default: &ast::Expr, default: &ast::Expr,
resolver: &(dyn SymbolResolver + Send + Sync), resolver: &(dyn SymbolResolver + Send + Sync),
) -> Result<SymbolValue, HashSet<String>> { ) -> Result<SymbolValue, String> {
parse_parameter_default_value(default, resolver) parse_parameter_default_value(default, resolver)
} }
@ -539,6 +416,40 @@ impl TopLevelComposer {
primitive: &PrimitiveStore, primitive: &PrimitiveStore,
unifier: &mut Unifier, unifier: &mut Unifier,
) -> Result<(), String> { ) -> Result<(), String> {
fn type_default_param(
val: &SymbolValue,
primitive: &PrimitiveStore,
unifier: &mut Unifier,
) -> TypeAnnotation {
match val {
SymbolValue::Bool(..) => TypeAnnotation::Primitive(primitive.bool),
SymbolValue::Double(..) => TypeAnnotation::Primitive(primitive.float),
SymbolValue::I32(..) => TypeAnnotation::Primitive(primitive.int32),
SymbolValue::I64(..) => TypeAnnotation::Primitive(primitive.int64),
SymbolValue::U32(..) => TypeAnnotation::Primitive(primitive.uint32),
SymbolValue::U64(..) => TypeAnnotation::Primitive(primitive.uint64),
SymbolValue::Str(..) => TypeAnnotation::Primitive(primitive.str),
SymbolValue::Tuple(vs) => {
let vs_tys = vs
.iter()
.map(|v| type_default_param(v, primitive, unifier))
.collect::<Vec<_>>();
TypeAnnotation::Tuple(vs_tys)
}
SymbolValue::OptionNone => TypeAnnotation::CustomClass {
id: primitive.option.get_obj_id(unifier),
params: Default::default(),
},
SymbolValue::OptionSome(v) => {
let ty = type_default_param(v, primitive, unifier);
TypeAnnotation::CustomClass {
id: primitive.option.get_obj_id(unifier),
params: vec![ty],
}
}
}
}
fn is_compatible( fn is_compatible(
found: &TypeAnnotation, found: &TypeAnnotation,
expect: &TypeAnnotation, expect: &TypeAnnotation,
@ -554,7 +465,7 @@ impl TopLevelComposer {
TypeAnnotation::CustomClass { id: e_id, params: e_param }, TypeAnnotation::CustomClass { id: e_id, params: e_param },
) => { ) => {
*f_id == *e_id *f_id == *e_id
&& *f_id == primitive.option.obj_id(unifier).unwrap() && *f_id == primitive.option.get_obj_id(unifier)
&& (f_param.is_empty() && (f_param.is_empty()
|| (f_param.len() == 1 || (f_param.len() == 1
&& e_param.len() == 1 && e_param.len() == 1
@ -570,15 +481,15 @@ impl TopLevelComposer {
} }
} }
let found = val.get_type_annotation(primitive, unifier); let found = type_default_param(val, primitive, unifier);
if is_compatible(&found, ty, unifier, primitive) { if !is_compatible(&found, ty, unifier, primitive) {
Ok(())
} else {
Err(format!( Err(format!(
"incompatible default parameter type, expect {}, found {}", "incompatible default parameter type, expect {}, found {}",
ty.stringify(unifier), ty.stringify(unifier),
found.stringify(unifier), found.stringify(unifier),
)) ))
} else {
Ok(())
} }
} }
} }
@ -586,14 +497,14 @@ impl TopLevelComposer {
pub fn parse_parameter_default_value( pub fn parse_parameter_default_value(
default: &ast::Expr, default: &ast::Expr,
resolver: &(dyn SymbolResolver + Send + Sync), resolver: &(dyn SymbolResolver + Send + Sync),
) -> Result<SymbolValue, HashSet<String>> { ) -> Result<SymbolValue, String> {
fn handle_constant(val: &Constant, loc: &Location) -> Result<SymbolValue, HashSet<String>> { fn handle_constant(val: &Constant, loc: &Location) -> Result<SymbolValue, String> {
match val { match val {
Constant::Int(v) => { Constant::Int(v) => {
if let Ok(v) = (*v).try_into() { if let Ok(v) = (*v).try_into() {
Ok(SymbolValue::I32(v)) Ok(SymbolValue::I32(v))
} else { } else {
Err(HashSet::from([format!("integer value out of range at {loc}")])) Err(format!("integer value out of range at {}", loc))
} }
} }
Constant::Float(v) => Ok(SymbolValue::Double(*v)), Constant::Float(v) => Ok(SymbolValue::Double(*v)),
@ -601,11 +512,10 @@ pub fn parse_parameter_default_value(
Constant::Tuple(tuple) => Ok(SymbolValue::Tuple( Constant::Tuple(tuple) => Ok(SymbolValue::Tuple(
tuple.iter().map(|x| handle_constant(x, loc)).collect::<Result<Vec<_>, _>>()?, tuple.iter().map(|x| handle_constant(x, loc)).collect::<Result<Vec<_>, _>>()?,
)), )),
Constant::None => Err(HashSet::from([ Constant::None => Err(format!(
format!( "`None` is not supported, use `none` for option type instead ({})",
"`None` is not supported, use `none` for option type instead ({loc})" loc
), )),
])),
_ => unimplemented!("this constant is not supported at {}", loc), _ => unimplemented!("this constant is not supported at {}", loc),
} }
} }
@ -618,51 +528,37 @@ pub fn parse_parameter_default_value(
let v: Result<i64, _> = (*v).try_into(); let v: Result<i64, _> = (*v).try_into();
match v { match v {
Ok(v) => Ok(SymbolValue::I64(v)), Ok(v) => Ok(SymbolValue::I64(v)),
_ => Err(HashSet::from([ _ => Err(format!("default param value out of range at {}", default.location)),
format!("default param value out of range at {}", default.location)
])),
} }
} }
_ => Err(HashSet::from([ _ => Err(format!("only allow constant integer here at {}", default.location))
format!("only allow constant integer here at {}", default.location),
]))
} }
ast::ExprKind::Name { id, .. } if *id == "uint32".into() => match &args[0].node { ast::ExprKind::Name { id, .. } if *id == "uint32".into() => match &args[0].node {
ast::ExprKind::Constant { value: Constant::Int(v), .. } => { ast::ExprKind::Constant { value: Constant::Int(v), .. } => {
let v: Result<u32, _> = (*v).try_into(); let v: Result<u32, _> = (*v).try_into();
match v { match v {
Ok(v) => Ok(SymbolValue::U32(v)), Ok(v) => Ok(SymbolValue::U32(v)),
_ => Err(HashSet::from([ _ => Err(format!("default param value out of range at {}", default.location)),
format!("default param value out of range at {}", default.location),
])),
} }
} }
_ => Err(HashSet::from([ _ => Err(format!("only allow constant integer here at {}", default.location))
format!("only allow constant integer here at {}", default.location),
]))
} }
ast::ExprKind::Name { id, .. } if *id == "uint64".into() => match &args[0].node { ast::ExprKind::Name { id, .. } if *id == "uint64".into() => match &args[0].node {
ast::ExprKind::Constant { value: Constant::Int(v), .. } => { ast::ExprKind::Constant { value: Constant::Int(v), .. } => {
let v: Result<u64, _> = (*v).try_into(); let v: Result<u64, _> = (*v).try_into();
match v { match v {
Ok(v) => Ok(SymbolValue::U64(v)), Ok(v) => Ok(SymbolValue::U64(v)),
_ => Err(HashSet::from([ _ => Err(format!("default param value out of range at {}", default.location)),
format!("default param value out of range at {}", default.location),
])),
} }
} }
_ => Err(HashSet::from([ _ => Err(format!("only allow constant integer here at {}", default.location))
format!("only allow constant integer here at {}", default.location),
]))
} }
ast::ExprKind::Name { id, .. } if *id == "Some".into() => Ok( ast::ExprKind::Name { id, .. } if *id == "Some".into() => Ok(
SymbolValue::OptionSome( SymbolValue::OptionSome(
Box::new(parse_parameter_default_value(&args[0], resolver)?) Box::new(parse_parameter_default_value(&args[0], resolver)?)
) )
), ),
_ => Err(HashSet::from([ _ => Err(format!("unsupported default parameter at {}", default.location)),
format!("unsupported default parameter at {}", default.location),
])),
} }
} }
ast::ExprKind::Tuple { elts, .. } => Ok(SymbolValue::Tuple(elts ast::ExprKind::Tuple { elts, .. } => Ok(SymbolValue::Tuple(elts
@ -673,21 +569,17 @@ pub fn parse_parameter_default_value(
ast::ExprKind::Name { id, .. } if id == &"none".into() => Ok(SymbolValue::OptionNone), ast::ExprKind::Name { id, .. } if id == &"none".into() => Ok(SymbolValue::OptionNone),
ast::ExprKind::Name { id, .. } => { ast::ExprKind::Name { id, .. } => {
resolver.get_default_param_value(default).ok_or_else( resolver.get_default_param_value(default).ok_or_else(
|| HashSet::from([ || format!(
format!( "`{}` cannot be used as a default parameter at {} \
"`{}` cannot be used as a default parameter at {} \ (not primitive type, option or tuple / not defined?)",
(not primitive type, option or tuple / not defined?)", id,
id, default.location
default.location )
),
])
) )
} }
_ => Err(HashSet::from([ _ => Err(format!(
format!( "unsupported default parameter (not primitive type, option or tuple) at {}",
"unsupported default parameter (not primitive type, option or tuple) at {}", default.location
default.location ))
),
]))
} }
} }

View File

@ -3,12 +3,13 @@ use std::{
collections::{HashMap, HashSet}, collections::{HashMap, HashSet},
fmt::Debug, fmt::Debug,
iter::FromIterator, iter::FromIterator,
ops::{Deref, DerefMut},
sync::Arc, sync::Arc,
}; };
use super::codegen::CodeGenContext; use super::codegen::CodeGenContext;
use super::typecheck::type_inferencer::PrimitiveStore; use super::typecheck::type_inferencer::PrimitiveStore;
use super::typecheck::typedef::{FunSignature, FuncArg, SharedUnifier, Type, TypeEnum, Unifier, VarMap}; use super::typecheck::typedef::{FunSignature, FuncArg, SharedUnifier, Type, TypeEnum, Unifier};
use crate::{ use crate::{
codegen::CodeGenerator, codegen::CodeGenerator,
symbol_resolver::{SymbolResolver, ValueEnum}, symbol_resolver::{SymbolResolver, ValueEnum},
@ -25,14 +26,13 @@ pub struct DefinitionId(pub usize);
pub mod builtins; pub mod builtins;
pub mod composer; pub mod composer;
pub mod helper; pub mod helper;
pub mod numpy;
pub mod type_annotation; pub mod type_annotation;
use composer::*; use composer::*;
use type_annotation::*; use type_annotation::*;
#[cfg(test)] #[cfg(test)]
mod test; mod test;
type GenCallCallback = type GenCallCallback = Box<
dyn for<'ctx, 'a> Fn( dyn for<'ctx, 'a> Fn(
&mut CodeGenContext<'ctx, 'a>, &mut CodeGenContext<'ctx, 'a>,
Option<(Type, ValueEnum<'ctx>)>, Option<(Type, ValueEnum<'ctx>)>,
@ -41,28 +41,21 @@ type GenCallCallback =
&mut dyn CodeGenerator, &mut dyn CodeGenerator,
) -> Result<Option<BasicValueEnum<'ctx>>, String> ) -> Result<Option<BasicValueEnum<'ctx>>, String>
+ Send + Send
+ Sync; + Sync,
>;
pub struct GenCall { pub struct GenCall {
fp: Box<GenCallCallback>, fp: GenCallCallback,
} }
impl GenCall { impl GenCall {
#[must_use] pub fn new(fp: GenCallCallback) -> GenCall {
pub fn new(fp: Box<GenCallCallback>) -> GenCall {
GenCall { fp } GenCall { fp }
} }
/// Creates a dummy instance of [`GenCall`], which invokes [`unreachable!()`] with the given pub fn run<'ctx, 'a>(
/// `reason`.
#[must_use]
pub fn create_dummy(reason: String) -> GenCall {
Self::new(Box::new(move |_, _, _, _, _| unreachable!("{reason}")))
}
pub fn run<'ctx>(
&self, &self,
ctx: &mut CodeGenContext<'ctx, '_>, ctx: &mut CodeGenContext<'ctx, 'a>,
obj: Option<(Type, ValueEnum<'ctx>)>, obj: Option<(Type, ValueEnum<'ctx>)>,
fun: (&FunSignature, DefinitionId), fun: (&FunSignature, DefinitionId),
args: Vec<(Option<StrRef>, ValueEnum<'ctx>)>, args: Vec<(Option<StrRef>, ValueEnum<'ctx>)>,
@ -82,62 +75,58 @@ impl Debug for GenCall {
pub struct FunInstance { pub struct FunInstance {
pub body: Arc<Vec<Stmt<Option<Type>>>>, pub body: Arc<Vec<Stmt<Option<Type>>>>,
pub calls: Arc<HashMap<CodeLocation, CallId>>, pub calls: Arc<HashMap<CodeLocation, CallId>>,
pub subst: VarMap, pub subst: HashMap<u32, Type>,
pub unifier_id: usize, pub unifier_id: usize,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub enum TopLevelDef { pub enum TopLevelDef {
Class { Class {
/// Name for error messages and symbols. // name for error messages and symbols
name: StrRef, name: StrRef,
/// Object ID used for [TypeEnum]. // object ID used for TypeEnum
object_id: DefinitionId, object_id: DefinitionId,
/// type variables bounded to the class. /// type variables bounded to the class.
type_vars: Vec<Type>, type_vars: Vec<Type>,
/// Class fields. // class fields
/// // name, type, is mutable
/// Name and type is mutable.
fields: Vec<(StrRef, Type, bool)>, fields: Vec<(StrRef, Type, bool)>,
/// Class methods, pointing to the corresponding function definition. // class methods, pointing to the corresponding function definition.
methods: Vec<(StrRef, Type, DefinitionId)>, methods: Vec<(StrRef, Type, DefinitionId)>,
/// Ancestor classes, including itself. // ancestor classes, including itself.
ancestors: Vec<TypeAnnotation>, ancestors: Vec<TypeAnnotation>,
/// Symbol resolver of the module defined the class; [None] if it is built-in type. // symbol resolver of the module defined the class, none if it is built-in type
resolver: Option<Arc<dyn SymbolResolver + Send + Sync>>, resolver: Option<Arc<dyn SymbolResolver + Send + Sync>>,
/// Constructor type. // constructor type
constructor: Option<Type>, constructor: Option<Type>,
/// Definition location. // definition location
loc: Option<Location>, loc: Option<Location>,
}, },
Function { Function {
/// Prefix for symbol, should be unique globally. // prefix for symbol, should be unique globally
name: String, name: String,
/// Simple name, the same as in method/function definition. // simple name, the same as in method/function definition
simple_name: StrRef, simple_name: StrRef,
/// Function signature. // function signature.
signature: Type, signature: Type,
/// Instantiated type variable IDs. // instantiated type variable IDs
var_id: Vec<u32>, var_id: Vec<u32>,
/// Function instance to symbol mapping /// Function instance to symbol mapping
/// /// Key: string representation of type variable values, sorted by variable ID in ascending
/// * Key: String representation of type variable values, sorted by variable ID in ascending
/// order, including type variables associated with the class. /// order, including type variables associated with the class.
/// * Value: Function symbol name. /// Value: function symbol name.
instance_to_symbol: HashMap<String, String>, instance_to_symbol: HashMap<String, String>,
/// Function instances to annotated AST mapping /// Function instances to annotated AST mapping
/// /// Key: string representation of type variable values, sorted by variable ID in ascending
/// * Key: String representation of type variable values, sorted by variable ID in ascending
/// order, including type variables associated with the class. Excluding rigid type /// order, including type variables associated with the class. Excluding rigid type
/// variables. /// variables.
/// /// rigid type variables that would be substituted when the function is instantiated.
/// Rigid type variables that would be substituted when the function is instantiated.
instance_to_stmt: HashMap<String, FunInstance>, instance_to_stmt: HashMap<String, FunInstance>,
/// Symbol resolver of the module defined the class. // symbol resolver of the module defined the class
resolver: Option<Arc<dyn SymbolResolver + Send + Sync>>, resolver: Option<Arc<dyn SymbolResolver + Send + Sync>>,
/// Custom code generation callback. // custom codegen callback
codegen_callback: Option<Arc<GenCall>>, codegen_callback: Option<Arc<GenCall>>,
/// Definition location. // definition location
loc: Option<Location>, loc: Option<Location>,
}, },
} }

View File

@ -1,103 +0,0 @@
use itertools::Itertools;
use crate::{
toplevel::helper::PRIMITIVE_DEF_IDS,
typecheck::{
type_inferencer::PrimitiveStore,
typedef::{Type, TypeEnum, Unifier, VarMap},
},
};
/// Creates a `ndarray` [`Type`] with the given type arguments.
///
/// * `dtype` - The element type of the `ndarray`, or [`None`] if the type variable is not
/// specialized.
/// * `ndims` - The number of dimensions of the `ndarray`, or [`None`] if the type variable is not
/// specialized.
pub fn make_ndarray_ty(
unifier: &mut Unifier,
primitives: &PrimitiveStore,
dtype: Option<Type>,
ndims: Option<Type>,
) -> Type {
subst_ndarray_tvars(unifier, primitives.ndarray, dtype, ndims)
}
/// Substitutes type variables in `ndarray`.
///
/// * `dtype` - The element type of the `ndarray`, or [`None`] if the type variable is not
/// specialized.
/// * `ndims` - The number of dimensions of the `ndarray`, or [`None`] if the type variable is not
/// specialized.
pub fn subst_ndarray_tvars(
unifier: &mut Unifier,
ndarray: Type,
dtype: Option<Type>,
ndims: Option<Type>,
) -> Type {
let TypeEnum::TObj { obj_id, params, .. } = &*unifier.get_ty_immutable(ndarray) else {
panic!("Expected `ndarray` to be TObj, but got {}", unifier.stringify(ndarray))
};
debug_assert_eq!(*obj_id, PRIMITIVE_DEF_IDS.ndarray);
if dtype.is_none() && ndims.is_none() {
return ndarray
}
let tvar_ids = params.iter()
.map(|(obj_id, _)| *obj_id)
.collect_vec();
debug_assert_eq!(tvar_ids.len(), 2);
let mut tvar_subst = VarMap::new();
if let Some(dtype) = dtype {
tvar_subst.insert(tvar_ids[0], dtype);
}
if let Some(ndims) = ndims {
tvar_subst.insert(tvar_ids[1], ndims);
}
unifier.subst(ndarray, &tvar_subst).unwrap_or(ndarray)
}
fn unpack_ndarray_tvars(
unifier: &mut Unifier,
ndarray: Type,
) -> Vec<(u32, Type)> {
let TypeEnum::TObj { obj_id, params, .. } = &*unifier.get_ty_immutable(ndarray) else {
panic!("Expected `ndarray` to be TObj, but got {}", unifier.stringify(ndarray))
};
debug_assert_eq!(*obj_id, PRIMITIVE_DEF_IDS.ndarray);
debug_assert_eq!(params.len(), 2);
params.iter()
.sorted_by_key(|(obj_id, _)| *obj_id)
.map(|(var_id, ty)| (*var_id, *ty))
.collect_vec()
}
/// Unpacks the type variable IDs of `ndarray` into a tuple. The elements of the tuple corresponds
/// to `dtype` (the element type) and `ndims` (the number of dimensions) of the `ndarray`
/// respectively.
pub fn unpack_ndarray_var_ids(
unifier: &mut Unifier,
ndarray: Type,
) -> (u32, u32) {
unpack_ndarray_tvars(unifier, ndarray)
.into_iter()
.map(|v| v.0)
.collect_tuple()
.unwrap()
}
/// Unpacks the type variables of `ndarray` into a tuple. The elements of the tuple corresponds to
/// `dtype` (the element type) and `ndims` (the number of dimensions) of the `ndarray` respectively.
pub fn unpack_ndarray_var_tys(
unifier: &mut Unifier,
ndarray: Type,
) -> (Type, Type) {
unpack_ndarray_tvars(unifier, ndarray)
.into_iter()
.map(|v| v.1)
.collect_tuple()
.unwrap()
}

View File

@ -1,11 +1,13 @@
--- ---
source: nac3core/src/toplevel/test.rs source: nac3core/src/toplevel/test.rs
assertion_line: 549
expression: res_vec expression: res_vec
--- ---
[ [
"Class {\nname: \"Generic_A\",\nancestors: [\"Generic_A[V]\", \"B\"],\nfields: [\"aa\", \"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"foo\", \"fn[[b:T], none]\"), (\"fun\", \"fn[[a:int32], V]\")],\ntype_vars: [\"V\"]\n}\n", "Class {\nname: \"Generic_A\",\nancestors: [\"Generic_A[V]\", \"B\"],\nfields: [\"aa\", \"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"foo\", \"fn[[b:T], none]\"), (\"fun\", \"fn[[a:int32], V]\")],\ntype_vars: [\"V\"]\n}\n",
"Function {\nname: \"Generic_A.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"Generic_A.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"Generic_A.fun\",\nsig: \"fn[[a:int32], V]\",\nvar_id: [238]\n}\n", "Function {\nname: \"Generic_A.fun\",\nsig: \"fn[[a:int32], V]\",\nvar_id: [18]\n}\n",
"Class {\nname: \"B\",\nancestors: [\"B\"],\nfields: [\"aa\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"foo\", \"fn[[b:T], none]\")],\ntype_vars: []\n}\n", "Class {\nname: \"B\",\nancestors: [\"B\"],\nfields: [\"aa\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"foo\", \"fn[[b:T], none]\")],\ntype_vars: []\n}\n",
"Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"B.foo\",\nsig: \"fn[[b:T], none]\",\nvar_id: []\n}\n", "Function {\nname: \"B.foo\",\nsig: \"fn[[b:T], none]\",\nvar_id: []\n}\n",

View File

@ -1,13 +1,15 @@
--- ---
source: nac3core/src/toplevel/test.rs source: nac3core/src/toplevel/test.rs
assertion_line: 549
expression: res_vec expression: res_vec
--- ---
[ [
"Class {\nname: \"A\",\nancestors: [\"A[T]\"],\nfields: [\"a\", \"b\", \"c\"],\nmethods: [(\"__init__\", \"fn[[t:T], none]\"), (\"fun\", \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\"), (\"foo\", \"fn[[c:C], none]\")],\ntype_vars: [\"T\"]\n}\n", "Class {\nname: \"A\",\nancestors: [\"A[T]\"],\nfields: [\"a\", \"b\", \"c\"],\nmethods: [(\"__init__\", \"fn[[t:T], none]\"), (\"fun\", \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\"), (\"foo\", \"fn[[c:C], none]\")],\ntype_vars: [\"T\"]\n}\n",
"Function {\nname: \"A.__init__\",\nsig: \"fn[[t:T], none]\",\nvar_id: []\n}\n", "Function {\nname: \"A.__init__\",\nsig: \"fn[[t:T], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"A.fun\",\nsig: \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\",\nvar_id: []\n}\n", "Function {\nname: \"A.fun\",\nsig: \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\",\nvar_id: []\n}\n",
"Function {\nname: \"A.foo\",\nsig: \"fn[[c:C], none]\",\nvar_id: []\n}\n", "Function {\nname: \"A.foo\",\nsig: \"fn[[c:C], none]\",\nvar_id: []\n}\n",
"Class {\nname: \"B\",\nancestors: [\"B[typevar227]\", \"A[float]\"],\nfields: [\"a\", \"b\", \"c\", \"d\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\"), (\"foo\", \"fn[[c:C], none]\")],\ntype_vars: [\"typevar227\"]\n}\n", "Class {\nname: \"B\",\nancestors: [\"B[typevar7]\", \"A[float]\"],\nfields: [\"a\", \"b\", \"c\", \"d\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\"), (\"foo\", \"fn[[c:C], none]\")],\ntype_vars: [\"typevar7\"]\n}\n",
"Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"B.fun\",\nsig: \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\",\nvar_id: []\n}\n", "Function {\nname: \"B.fun\",\nsig: \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\",\nvar_id: []\n}\n",
"Class {\nname: \"C\",\nancestors: [\"C\", \"B[bool]\", \"A[float]\"],\nfields: [\"a\", \"b\", \"c\", \"d\", \"e\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\"), (\"foo\", \"fn[[c:C], none]\")],\ntype_vars: []\n}\n", "Class {\nname: \"C\",\nancestors: [\"C\", \"B[bool]\", \"A[float]\"],\nfields: [\"a\", \"b\", \"c\", \"d\", \"e\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[a:int32, b:T], list[virtual[B[bool]]]]\"), (\"foo\", \"fn[[c:C], none]\")],\ntype_vars: []\n}\n",

View File

@ -1,13 +1,15 @@
--- ---
source: nac3core/src/toplevel/test.rs source: nac3core/src/toplevel/test.rs
assertion_line: 549
expression: res_vec expression: res_vec
--- ---
[ [
"Function {\nname: \"foo\",\nsig: \"fn[[a:list[int32], b:tuple[T, float]], A[B, bool]]\",\nvar_id: []\n}\n", "Function {\nname: \"foo\",\nsig: \"fn[[a:list[int32], b:tuple[T, float]], A[B, bool]]\",\nvar_id: []\n}\n",
"Class {\nname: \"A\",\nancestors: [\"A[T, V]\"],\nfields: [\"a\", \"b\"],\nmethods: [(\"__init__\", \"fn[[v:V], none]\"), (\"fun\", \"fn[[a:T], V]\")],\ntype_vars: [\"T\", \"V\"]\n}\n", "Class {\nname: \"A\",\nancestors: [\"A[T, V]\"],\nfields: [\"a\", \"b\"],\nmethods: [(\"__init__\", \"fn[[v:V], none]\"), (\"fun\", \"fn[[a:T], V]\")],\ntype_vars: [\"T\", \"V\"]\n}\n",
"Function {\nname: \"A.__init__\",\nsig: \"fn[[v:V], none]\",\nvar_id: [240]\n}\n", "Function {\nname: \"A.__init__\",\nsig: \"fn[[v:V], none]\",\nvar_id: [20]\n}\n",
"Function {\nname: \"A.fun\",\nsig: \"fn[[a:T], V]\",\nvar_id: [245]\n}\n", "Function {\nname: \"A.fun\",\nsig: \"fn[[a:T], V]\",\nvar_id: [25]\n}\n",
"Function {\nname: \"gfun\",\nsig: \"fn[[a:A[list[float], int32]], none]\",\nvar_id: []\n}\n", "Function {\nname: \"gfun\",\nsig: \"fn[[a:A[int32, list[float]]], none]\",\nvar_id: []\n}\n",
"Class {\nname: \"B\",\nancestors: [\"B\"],\nfields: [],\nmethods: [(\"__init__\", \"fn[[], none]\")],\ntype_vars: []\n}\n", "Class {\nname: \"B\",\nancestors: [\"B\"],\nfields: [],\nmethods: [(\"__init__\", \"fn[[], none]\")],\ntype_vars: []\n}\n",
"Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
] ]

View File

@ -1,13 +1,15 @@
--- ---
source: nac3core/src/toplevel/test.rs source: nac3core/src/toplevel/test.rs
assertion_line: 549
expression: res_vec expression: res_vec
--- ---
[ [
"Class {\nname: \"A\",\nancestors: [\"A[typevar226, typevar227]\"],\nfields: [\"a\", \"b\"],\nmethods: [(\"__init__\", \"fn[[a:A[float, bool], b:B], none]\"), (\"fun\", \"fn[[a:A[float, bool]], A[bool, int32]]\")],\ntype_vars: [\"typevar226\", \"typevar227\"]\n}\n", "Class {\nname: \"A\",\nancestors: [\"A[typevar6, typevar7]\"],\nfields: [\"a\", \"b\"],\nmethods: [(\"__init__\", \"fn[[a:A[bool, float], b:B], none]\"), (\"fun\", \"fn[[a:A[bool, float]], A[bool, int32]]\")],\ntype_vars: [\"typevar6\", \"typevar7\"]\n}\n",
"Function {\nname: \"A.__init__\",\nsig: \"fn[[a:A[float, bool], b:B], none]\",\nvar_id: []\n}\n", "Function {\nname: \"A.__init__\",\nsig: \"fn[[a:A[bool, float], b:B], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"A.fun\",\nsig: \"fn[[a:A[float, bool]], A[bool, int32]]\",\nvar_id: []\n}\n", "Function {\nname: \"A.fun\",\nsig: \"fn[[a:A[bool, float]], A[bool, int32]]\",\nvar_id: []\n}\n",
"Class {\nname: \"B\",\nancestors: [\"B\", \"A[int64, bool]\"],\nfields: [\"a\", \"b\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[a:A[float, bool]], A[bool, int32]]\"), (\"foo\", \"fn[[b:B], B]\"), (\"bar\", \"fn[[a:A[list[B], int32]], tuple[A[virtual[A[B, int32]], bool], B]]\")],\ntype_vars: []\n}\n", "Class {\nname: \"B\",\nancestors: [\"B\", \"A[int64, bool]\"],\nfields: [\"a\", \"b\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[a:A[bool, float]], A[bool, int32]]\"), (\"foo\", \"fn[[b:B], B]\"), (\"bar\", \"fn[[a:A[int32, list[B]]], tuple[A[bool, virtual[A[B, int32]]], B]]\")],\ntype_vars: []\n}\n",
"Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"B.foo\",\nsig: \"fn[[b:B], B]\",\nvar_id: []\n}\n", "Function {\nname: \"B.foo\",\nsig: \"fn[[b:B], B]\",\nvar_id: []\n}\n",
"Function {\nname: \"B.bar\",\nsig: \"fn[[a:A[list[B], int32]], tuple[A[virtual[A[B, int32]], bool], B]]\",\nvar_id: []\n}\n", "Function {\nname: \"B.bar\",\nsig: \"fn[[a:A[int32, list[B]]], tuple[A[bool, virtual[A[B, int32]]], B]]\",\nvar_id: []\n}\n",
] ]

View File

@ -1,17 +1,19 @@
--- ---
source: nac3core/src/toplevel/test.rs source: nac3core/src/toplevel/test.rs
assertion_line: 549
expression: res_vec expression: res_vec
--- ---
[ [
"Class {\nname: \"A\",\nancestors: [\"A\"],\nfields: [\"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[b:B], none]\"), (\"foo\", \"fn[[a:T, b:V], none]\")],\ntype_vars: []\n}\n", "Class {\nname: \"A\",\nancestors: [\"A\"],\nfields: [\"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[b:B], none]\"), (\"foo\", \"fn[[a:T, b:V], none]\")],\ntype_vars: []\n}\n",
"Function {\nname: \"A.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"A.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"A.fun\",\nsig: \"fn[[b:B], none]\",\nvar_id: []\n}\n", "Function {\nname: \"A.fun\",\nsig: \"fn[[b:B], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"A.foo\",\nsig: \"fn[[a:T, b:V], none]\",\nvar_id: [246]\n}\n", "Function {\nname: \"A.foo\",\nsig: \"fn[[a:T, b:V], none]\",\nvar_id: [26]\n}\n",
"Class {\nname: \"B\",\nancestors: [\"B\", \"C\", \"A\"],\nfields: [\"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[b:B], none]\"), (\"foo\", \"fn[[a:T, b:V], none]\")],\ntype_vars: []\n}\n", "Class {\nname: \"B\",\nancestors: [\"B\", \"C\", \"A\"],\nfields: [\"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[b:B], none]\"), (\"foo\", \"fn[[a:T, b:V], none]\")],\ntype_vars: []\n}\n",
"Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"B.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
"Class {\nname: \"C\",\nancestors: [\"C\", \"A\"],\nfields: [\"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[b:B], none]\"), (\"foo\", \"fn[[a:T, b:V], none]\")],\ntype_vars: []\n}\n", "Class {\nname: \"C\",\nancestors: [\"C\", \"A\"],\nfields: [\"a\"],\nmethods: [(\"__init__\", \"fn[[], none]\"), (\"fun\", \"fn[[b:B], none]\"), (\"foo\", \"fn[[a:T, b:V], none]\")],\ntype_vars: []\n}\n",
"Function {\nname: \"C.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n", "Function {\nname: \"C.__init__\",\nsig: \"fn[[], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"C.fun\",\nsig: \"fn[[b:B], none]\",\nvar_id: []\n}\n", "Function {\nname: \"C.fun\",\nsig: \"fn[[b:B], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"foo\",\nsig: \"fn[[a:A], none]\",\nvar_id: []\n}\n", "Function {\nname: \"foo\",\nsig: \"fn[[a:A], none]\",\nvar_id: []\n}\n",
"Function {\nname: \"ff\",\nsig: \"fn[[a:T], V]\",\nvar_id: [254]\n}\n", "Function {\nname: \"ff\",\nsig: \"fn[[a:T], V]\",\nvar_id: [34]\n}\n",
] ]

View File

@ -36,7 +36,7 @@ struct Resolver(Arc<ResolverInternal>);
impl SymbolResolver for Resolver { impl SymbolResolver for Resolver {
fn get_default_param_value( fn get_default_param_value(
&self, &self,
_: &ast::Expr, _: &nac3parser::ast::Expr,
) -> Option<crate::symbol_resolver::SymbolValue> { ) -> Option<crate::symbol_resolver::SymbolValue> {
unimplemented!() unimplemented!()
} }
@ -64,9 +64,8 @@ impl SymbolResolver for Resolver {
unimplemented!() unimplemented!()
} }
fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, HashSet<String>> { fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, String> {
self.0.id_to_def.lock().get(&id).cloned() self.0.id_to_def.lock().get(&id).cloned().ok_or_else(|| "Unknown identifier".to_string())
.ok_or_else(|| HashSet::from(["Unknown identifier".to_string()]))
} }
fn get_string_id(&self, _: &str) -> i32 { fn get_string_id(&self, _: &str) -> i32 {
@ -111,7 +110,7 @@ impl SymbolResolver for Resolver {
"register" "register"
)] )]
fn test_simple_register(source: Vec<&str>) { fn test_simple_register(source: Vec<&str>) {
let mut composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 64).0; let mut composer: TopLevelComposer = Default::default();
for s in source { for s in source {
let ast = parse_program(s, Default::default()).unwrap(); let ast = parse_program(s, Default::default()).unwrap();
@ -131,7 +130,7 @@ fn test_simple_register(source: Vec<&str>) {
"register" "register"
)] )]
fn test_simple_register_without_constructor(source: &str) { fn test_simple_register_without_constructor(source: &str) {
let mut composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 64).0; let mut composer: TopLevelComposer = Default::default();
let ast = parse_program(source, Default::default()).unwrap(); let ast = parse_program(source, Default::default()).unwrap();
let ast = ast[0].clone(); let ast = ast[0].clone();
composer.register_top_level(ast, None, "".into(), true).unwrap(); composer.register_top_level(ast, None, "".into(), true).unwrap();
@ -165,7 +164,7 @@ fn test_simple_register_without_constructor(source: &str) {
"function compose" "function compose"
)] )]
fn test_simple_function_analyze(source: Vec<&str>, tys: Vec<&str>, names: Vec<&str>) { fn test_simple_function_analyze(source: Vec<&str>, tys: Vec<&str>, names: Vec<&str>) {
let mut composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 64).0; let mut composer: TopLevelComposer = Default::default();
let internal_resolver = Arc::new(ResolverInternal { let internal_resolver = Arc::new(ResolverInternal {
id_to_def: Default::default(), id_to_def: Default::default(),
@ -362,7 +361,7 @@ fn test_simple_function_analyze(source: Vec<&str>, tys: Vec<&str>, names: Vec<&s
pass pass
"} "}
], ],
vec!["application of type vars to generic class is not currently supported (at unknown:4:24)"]; vec!["application of type vars to generic class is not currently supported (at unknown: line 4 column 24)"];
"err no type var in generic app" "err no type var in generic app"
)] )]
#[test_case( #[test_case(
@ -418,7 +417,7 @@ fn test_simple_function_analyze(source: Vec<&str>, tys: Vec<&str>, names: Vec<&s
def __init__(): def __init__():
pass pass
"}], "}],
vec!["__init__ method must have a `self` parameter (at unknown:2:5)"]; vec!["__init__ method must have a `self` parameter (at unknown: line 2 column 5)"];
"err no self_1" "err no self_1"
)] )]
#[test_case( #[test_case(
@ -440,7 +439,7 @@ fn test_simple_function_analyze(source: Vec<&str>, tys: Vec<&str>, names: Vec<&s
"} "}
], ],
vec!["a class definition can only have at most one base class declaration and one generic declaration (at unknown:1:24)"]; vec!["a class definition can only have at most one base class declaration and one generic declaration (at unknown: line 1 column 24)"];
"err multiple inheritance" "err multiple inheritance"
)] )]
#[test_case( #[test_case(
@ -508,12 +507,12 @@ fn test_simple_function_analyze(source: Vec<&str>, tys: Vec<&str>, names: Vec<&s
pass pass
"} "}
], ],
vec!["duplicate definition of class `A` (at unknown:1:1)"]; vec!["duplicate definition of class `A` (at unknown: line 1 column 1)"];
"class same name" "class same name"
)] )]
fn test_analyze(source: Vec<&str>, res: Vec<&str>) { fn test_analyze(source: Vec<&str>, res: Vec<&str>) {
let print = false; let print = false;
let mut composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 64).0; let mut composer: TopLevelComposer = Default::default();
let internal_resolver = make_internal_resolver_with_tvar( let internal_resolver = make_internal_resolver_with_tvar(
vec![ vec![
@ -552,9 +551,9 @@ fn test_analyze(source: Vec<&str>, res: Vec<&str>) {
if let Err(msg) = composer.start_analysis(false) { if let Err(msg) = composer.start_analysis(false) {
if print { if print {
println!("{}", msg.iter().sorted().join("\n----------\n")); println!("{}", msg);
} else { } else {
assert_eq!(res[0], msg.iter().next().unwrap()); assert_eq!(res[0], msg);
} }
} else { } else {
// skip 5 to skip primitives // skip 5 to skip primitives
@ -690,7 +689,7 @@ fn test_analyze(source: Vec<&str>, res: Vec<&str>) {
)] )]
fn test_inference(source: Vec<&str>, res: Vec<&str>) { fn test_inference(source: Vec<&str>, res: Vec<&str>) {
let print = true; let print = true;
let mut composer = TopLevelComposer::new(Vec::new(), ComposerConfig::default(), 64).0; let mut composer: TopLevelComposer = Default::default();
let internal_resolver = make_internal_resolver_with_tvar( let internal_resolver = make_internal_resolver_with_tvar(
vec![ vec![
@ -736,9 +735,9 @@ fn test_inference(source: Vec<&str>, res: Vec<&str>) {
if let Err(msg) = composer.start_analysis(true) { if let Err(msg) = composer.start_analysis(true) {
if print { if print {
println!("{}", msg.iter().sorted().join("\n----------\n")); println!("{}", msg);
} else { } else {
assert_eq!(res[0], msg.iter().next().unwrap()); assert_eq!(res[0], msg);
} }
} else { } else {
// skip 5 to skip primitives // skip 5 to skip primitives

View File

@ -1,8 +1,4 @@
use crate::symbol_resolver::SymbolValue;
use crate::toplevel::helper::PRIMITIVE_DEF_IDS;
use crate::typecheck::typedef::VarMap;
use super::*; use super::*;
use nac3parser::ast::Constant;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum TypeAnnotation { pub enum TypeAnnotation {
@ -16,8 +12,6 @@ pub enum TypeAnnotation {
// can only be CustomClassKind // can only be CustomClassKind
Virtual(Box<TypeAnnotation>), Virtual(Box<TypeAnnotation>),
TypeVar(Type), TypeVar(Type),
/// A `Literal` allowing a subset of literals.
Literal(Vec<Constant>),
List(Box<TypeAnnotation>), List(Box<TypeAnnotation>),
Tuple(Vec<TypeAnnotation>), Tuple(Vec<TypeAnnotation>),
} }
@ -28,16 +22,17 @@ impl TypeAnnotation {
match self { match self {
Primitive(ty) | TypeVar(ty) => unifier.stringify(*ty), Primitive(ty) | TypeVar(ty) => unifier.stringify(*ty),
CustomClass { id, params } => { CustomClass { id, params } => {
let class_name = if let Some(ref top) = unifier.top_level { let class_name = match unifier.top_level {
if let TopLevelDef::Class { name, .. } = Some(ref top) => {
&*top.definitions.read()[id.0].read() if let TopLevelDef::Class { name, .. } =
{ &*top.definitions.read()[id.0].read()
(*name).into() {
} else { (*name).into()
unreachable!() } else {
unreachable!()
}
} }
} else { None => format!("class_def_{}", id.0),
format!("class_def_{}", id.0)
}; };
format!( format!(
"{}{}", "{}{}",
@ -45,14 +40,13 @@ impl TypeAnnotation {
{ {
let param_list = params.iter().map(|p| p.stringify(unifier)).collect_vec().join(", "); let param_list = params.iter().map(|p| p.stringify(unifier)).collect_vec().join(", ");
if param_list.is_empty() { if param_list.is_empty() {
String::new() "".into()
} else { } else {
format!("[{param_list}]") format!("[{}]", param_list)
} }
} }
) )
} }
Literal(values) => format!("Literal({})", values.iter().map(|v| format!("{v:?}")).join(", ")),
Virtual(ty) => format!("virtual[{}]", ty.stringify(unifier)), Virtual(ty) => format!("virtual[{}]", ty.stringify(unifier)),
List(ty) => format!("list[{}]", ty.stringify(unifier)), List(ty) => format!("list[{}]", ty.stringify(unifier)),
Tuple(types) => { Tuple(types) => {
@ -62,12 +56,6 @@ impl TypeAnnotation {
} }
} }
/// Parses an AST expression `expr` into a [`TypeAnnotation`].
///
/// * `locked` - A [`HashMap`] containing the IDs of known definitions, mapped to a [`Vec`] of all
/// generic variables associated with the definition.
/// * `type_var` - The type variable associated with the type argument currently being parsed. Pass
/// [`None`] when this function is invoked externally.
pub fn parse_ast_to_type_annotation_kinds<T>( pub fn parse_ast_to_type_annotation_kinds<T>(
resolver: &(dyn SymbolResolver + Send + Sync), resolver: &(dyn SymbolResolver + Send + Sync),
top_level_defs: &[Arc<RwLock<TopLevelDef>>], top_level_defs: &[Arc<RwLock<TopLevelDef>>],
@ -76,7 +64,7 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
expr: &ast::Expr<T>, expr: &ast::Expr<T>,
// the key stores the type_var of this topleveldef::class, we only need this field here // the key stores the type_var of this topleveldef::class, we only need this field here
locked: HashMap<DefinitionId, Vec<Type>>, locked: HashMap<DefinitionId, Vec<Type>>,
) -> Result<TypeAnnotation, HashSet<String>> { ) -> Result<TypeAnnotation, String> {
let name_handle = |id: &StrRef, let name_handle = |id: &StrRef,
unifier: &mut Unifier, unifier: &mut Unifier,
locked: HashMap<DefinitionId, Vec<Type>>| { locked: HashMap<DefinitionId, Vec<Type>>| {
@ -95,7 +83,7 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
} else if id == &"str".into() { } else if id == &"str".into() {
Ok(TypeAnnotation::Primitive(primitives.str)) Ok(TypeAnnotation::Primitive(primitives.str))
} else if id == &"Exception".into() { } else if id == &"Exception".into() {
Ok(TypeAnnotation::CustomClass { id: PRIMITIVE_DEF_IDS.exception, params: Vec::default() }) Ok(TypeAnnotation::CustomClass { id: DefinitionId(7), params: Default::default() })
} else if let Ok(obj_id) = resolver.get_identifier_def(*id) { } else if let Ok(obj_id) = resolver.get_identifier_def(*id) {
let type_vars = { let type_vars = {
let def_read = top_level_defs[obj_id.0].try_read(); let def_read = top_level_defs[obj_id.0].try_read();
@ -103,12 +91,10 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
if let TopLevelDef::Class { type_vars, .. } = &*def_read { if let TopLevelDef::Class { type_vars, .. } = &*def_read {
type_vars.clone() type_vars.clone()
} else { } else {
return Err(HashSet::from([ return Err(format!(
format!( "function cannot be used as a type (at {})",
"function cannot be used as a type (at {})", expr.location
expr.location ));
),
]))
} }
} else { } else {
locked.get(&obj_id).unwrap().clone() locked.get(&obj_id).unwrap().clone()
@ -116,13 +102,11 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
}; };
// check param number here // check param number here
if !type_vars.is_empty() { if !type_vars.is_empty() {
return Err(HashSet::from([ return Err(format!(
format!( "expect {} type variable parameter but got 0 (at {})",
"expect {} type variable parameter but got 0 (at {})", type_vars.len(),
type_vars.len(), expr.location,
expr.location, ));
),
]))
} }
Ok(TypeAnnotation::CustomClass { id: obj_id, params: vec![] }) Ok(TypeAnnotation::CustomClass { id: obj_id, params: vec![] })
} else if let Ok(ty) = resolver.get_symbol_type(unifier, top_level_defs, primitives, *id) { } else if let Ok(ty) = resolver.get_symbol_type(unifier, top_level_defs, primitives, *id) {
@ -131,14 +115,10 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
unifier.unify(var, ty).unwrap(); unifier.unify(var, ty).unwrap();
Ok(TypeAnnotation::TypeVar(ty)) Ok(TypeAnnotation::TypeVar(ty))
} else { } else {
Err(HashSet::from([ Err(format!("`{}` is not a valid type annotation (at {})", id, expr.location))
format!("`{}` is not a valid type annotation (at {})", id, expr.location),
]))
} }
} else { } else {
Err(HashSet::from([ Err(format!("`{}` is not a valid type annotation (at {})", id, expr.location))
format!("`{}` is not a valid type annotation (at {})", id, expr.location),
]))
} }
}; };
@ -147,21 +127,19 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
slice: &ast::Expr<T>, slice: &ast::Expr<T>,
unifier: &mut Unifier, unifier: &mut Unifier,
mut locked: HashMap<DefinitionId, Vec<Type>>| { mut locked: HashMap<DefinitionId, Vec<Type>>| {
if ["virtual".into(), "Generic".into(), "list".into(), "tuple".into(), "Option".into()].contains(id) if vec!["virtual".into(), "Generic".into(), "list".into(), "tuple".into()].contains(id)
{ {
return Err(HashSet::from([ return Err(format!("keywords cannot be class name (at {})", expr.location));
format!("keywords cannot be class name (at {})", expr.location),
]))
} }
let obj_id = resolver.get_identifier_def(*id)?; let obj_id = resolver.get_identifier_def(*id)?;
let type_vars = { let type_vars = {
let def_read = top_level_defs[obj_id.0].try_read(); let def_read = top_level_defs[obj_id.0].try_read();
if let Some(def_read) = def_read { if let Some(def_read) = def_read {
let TopLevelDef::Class { type_vars, .. } = &*def_read else { if let TopLevelDef::Class { type_vars, .. } = &*def_read {
type_vars.clone()
} else {
unreachable!("must be class here") unreachable!("must be class here")
}; }
type_vars.clone()
} else { } else {
locked.get(&obj_id).unwrap().clone() locked.get(&obj_id).unwrap().clone()
} }
@ -174,14 +152,12 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
vec![slice] vec![slice]
}; };
if type_vars.len() != params_ast.len() { if type_vars.len() != params_ast.len() {
return Err(HashSet::from([ return Err(format!(
format!( "expect {} type parameters but got {} (at {})",
"expect {} type parameters but got {} (at {})", type_vars.len(),
type_vars.len(), params_ast.len(),
params_ast.len(), params_ast[0].location,
params_ast[0].location, ));
),
]))
} }
let result = params_ast let result = params_ast
.iter() .iter()
@ -205,17 +181,15 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
if no_type_var { if no_type_var {
result result
} else { } else {
return Err(HashSet::from([ return Err(format!(
format!( "application of type vars to generic class \
"application of type vars to generic class is not currently supported (at {})", is not currently supported (at {})",
params_ast[0].location params_ast[0].location
), ));
]))
} }
}; };
Ok(TypeAnnotation::CustomClass { id: obj_id, params: param_type_infos }) Ok(TypeAnnotation::CustomClass { id: obj_id, params: param_type_infos })
}; };
match &expr.node { match &expr.node {
ast::ExprKind::Name { id, .. } => name_handle(id, unifier, locked), ast::ExprKind::Name { id, .. } => name_handle(id, unifier, locked),
// virtual // virtual
@ -307,70 +281,16 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
Ok(TypeAnnotation::Tuple(type_annotations)) Ok(TypeAnnotation::Tuple(type_annotations))
} }
// Literal
ast::ExprKind::Subscript { value, slice, .. }
if {
matches!(&value.node, ast::ExprKind::Name { id, .. } if id == &"Literal".into())
} => {
let tup_elts = {
if let ast::ExprKind::Tuple { elts, .. } = &slice.node {
elts.as_slice()
} else {
std::slice::from_ref(slice.as_ref())
}
};
let type_annotations = tup_elts
.iter()
.map(|e| {
match &e.node {
ast::ExprKind::Constant { value, .. } => Ok(
TypeAnnotation::Literal(vec![value.clone()]),
),
_ => parse_ast_to_type_annotation_kinds(
resolver,
top_level_defs,
unifier,
primitives,
e,
locked.clone(),
),
}
})
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.flat_map(|type_ann| match type_ann {
TypeAnnotation::Literal(values) => values,
_ => unreachable!(),
})
.collect_vec();
if type_annotations.len() == 1 {
Ok(TypeAnnotation::Literal(type_annotations))
} else {
Err(HashSet::from([
format!("multiple literal bounds are currently unsupported (at {})", value.location)
]))
}
}
// custom class // custom class
ast::ExprKind::Subscript { value, slice, .. } => { ast::ExprKind::Subscript { value, slice, .. } => {
if let ast::ExprKind::Name { id, .. } = &value.node { if let ast::ExprKind::Name { id, .. } = &value.node {
class_name_handle(id, slice, unifier, locked) class_name_handle(id, slice, unifier, locked)
} else { } else {
Err(HashSet::from([ Err(format!("unsupported expression type for class name (at {})", value.location))
format!("unsupported expression type for class name (at {})", value.location)
]))
} }
} }
ast::ExprKind::Constant { value, .. } => { _ => Err(format!("unsupported expression for type annotation (at {})", expr.location)),
Ok(TypeAnnotation::Literal(vec![value.clone()]))
}
_ => Err(HashSet::from([
format!("unsupported expression for type annotation (at {})", expr.location),
])),
} }
} }
@ -380,145 +300,107 @@ pub fn parse_ast_to_type_annotation_kinds<T>(
pub fn get_type_from_type_annotation_kinds( pub fn get_type_from_type_annotation_kinds(
top_level_defs: &[Arc<RwLock<TopLevelDef>>], top_level_defs: &[Arc<RwLock<TopLevelDef>>],
unifier: &mut Unifier, unifier: &mut Unifier,
primitives: &PrimitiveStore,
ann: &TypeAnnotation, ann: &TypeAnnotation,
subst_list: &mut Option<Vec<Type>> subst_list: &mut Option<Vec<Type>>
) -> Result<Type, HashSet<String>> { ) -> Result<Type, String> {
match ann { match ann {
TypeAnnotation::CustomClass { id: obj_id, params } => { TypeAnnotation::CustomClass { id: obj_id, params } => {
let def_read = top_level_defs[obj_id.0].read(); let def_read = top_level_defs[obj_id.0].read();
let class_def: &TopLevelDef = &def_read; let class_def: &TopLevelDef = def_read.deref();
let TopLevelDef::Class { fields, methods, type_vars, .. } = class_def else { if let TopLevelDef::Class { fields, methods, type_vars, .. } = class_def {
unreachable!("should be class def here") if type_vars.len() != params.len() {
}; Err(format!(
if type_vars.len() != params.len() {
return Err(HashSet::from([
format!(
"unexpected number of type parameters: expected {} but got {}", "unexpected number of type parameters: expected {} but got {}",
type_vars.len(), type_vars.len(),
params.len() params.len()
), ))
])) } else {
} let param_ty = params
.iter()
.map(|x| {
get_type_from_type_annotation_kinds(
top_level_defs,
unifier,
primitives,
x,
subst_list
)
})
.collect::<Result<Vec<_>, _>>()?;
let param_ty = params let subst = {
.iter() // check for compatible range
.map(|x| { // TODO: if allow type var to be applied(now this disallowed in the parse_to_type_annotation), need more check
get_type_from_type_annotation_kinds( let mut result: HashMap<u32, Type> = HashMap::new();
top_level_defs, for (tvar, p) in type_vars.iter().zip(param_ty) {
unifier, if let TypeEnum::TVar { id, range, fields: None, name, loc } =
x, unifier.get_ty(*tvar).as_ref()
subst_list {
) let ok: bool = {
}) // create a temp type var and unify to check compatibility
.collect::<Result<Vec<_>, _>>()?; p == *tvar || {
let temp = unifier.get_fresh_var_with_range(
let subst = { range.as_slice(),
// check for compatible range *name,
// TODO: if allow type var to be applied(now this disallowed in the parse_to_type_annotation), need more check *loc,
let mut result = VarMap::new(); );
for (tvar, p) in type_vars.iter().zip(param_ty) { unifier.unify(temp.0, p).is_ok()
match unifier.get_ty(*tvar).as_ref() { }
TypeEnum::TVar { id, range, fields: None, name, loc, is_const_generic: false } => { };
let ok: bool = { if ok {
// create a temp type var and unify to check compatibility result.insert(*id, p);
p == *tvar || { } else {
let temp = unifier.get_fresh_var_with_range( return Err(format!(
range.as_slice(),
*name,
*loc,
);
unifier.unify(temp.0, p).is_ok()
}
};
if ok {
result.insert(*id, p);
} else {
return Err(HashSet::from([
format!(
"cannot apply type {} to type variable with id {:?}", "cannot apply type {} to type variable with id {:?}",
unifier.internal_stringify( unifier.internal_stringify(
p, p,
&mut |id| format!("class{id}"), &mut |id| format!("class{}", id),
&mut |id| format!("typevar{id}"), &mut |id| format!("typevar{}", id),
&mut None &mut None
), ),
*id *id
) ));
]))
}
}
TypeEnum::TVar { id, range, name, loc, is_const_generic: true, .. } => {
let ty = range[0];
let ok: bool = {
// create a temp type var and unify to check compatibility
p == *tvar || {
let temp = unifier.get_fresh_const_generic_var(
ty,
*name,
*loc,
);
unifier.unify(temp.0, p).is_ok()
} }
};
if ok {
result.insert(*id, p);
} else { } else {
return Err(HashSet::from([ unreachable!("must be generic type var")
format!(
"cannot apply type {} to type variable {}",
unifier.stringify(p),
name.unwrap_or_else(|| format!("typevar{id}").into()),
),
]))
} }
} }
result
_ => unreachable!("must be generic type var"), };
let mut tobj_fields = methods
.iter()
.map(|(name, ty, _)| {
let subst_ty = unifier.subst(*ty, &subst).unwrap_or(*ty);
// methods are immutable
(*name, (subst_ty, false))
})
.collect::<HashMap<_, _>>();
tobj_fields.extend(fields.iter().map(|(name, ty, mutability)| {
let subst_ty = unifier.subst(*ty, &subst).unwrap_or(*ty);
(*name, (subst_ty, *mutability))
}));
let need_subst = !subst.is_empty();
let ty = unifier.add_ty(TypeEnum::TObj {
obj_id: *obj_id,
fields: tobj_fields,
params: subst,
});
if need_subst {
subst_list.as_mut().map(|wl| wl.push(ty));
} }
Ok(ty)
} }
result } else {
}; unreachable!("should be class def here")
let mut tobj_fields = methods
.iter()
.map(|(name, ty, _)| {
let subst_ty = unifier.subst(*ty, &subst).unwrap_or(*ty);
// methods are immutable
(*name, (subst_ty, false))
})
.collect::<HashMap<_, _>>();
tobj_fields.extend(fields.iter().map(|(name, ty, mutability)| {
let subst_ty = unifier.subst(*ty, &subst).unwrap_or(*ty);
(*name, (subst_ty, *mutability))
}));
let need_subst = !subst.is_empty();
let ty = unifier.add_ty(TypeEnum::TObj {
obj_id: *obj_id,
fields: tobj_fields,
params: subst,
});
if need_subst {
if let Some(wl) = subst_list.as_mut() {
wl.push(ty);
}
} }
Ok(ty)
} }
TypeAnnotation::Primitive(ty) | TypeAnnotation::TypeVar(ty) => Ok(*ty), TypeAnnotation::Primitive(ty) | TypeAnnotation::TypeVar(ty) => Ok(*ty),
TypeAnnotation::Literal(values) => {
let values = values.iter()
.map(SymbolValue::from_constant_inferred)
.collect::<Result<Vec<_>, _>>()
.map_err(|err| HashSet::from([err]))?;
let var = unifier.get_fresh_literal(values, None);
Ok(var)
}
TypeAnnotation::Virtual(ty) => { TypeAnnotation::Virtual(ty) => {
let ty = get_type_from_type_annotation_kinds( let ty = get_type_from_type_annotation_kinds(
top_level_defs, top_level_defs,
unifier, unifier,
primitives,
ty.as_ref(), ty.as_ref(),
subst_list subst_list
)?; )?;
@ -528,6 +410,7 @@ pub fn get_type_from_type_annotation_kinds(
let ty = get_type_from_type_annotation_kinds( let ty = get_type_from_type_annotation_kinds(
top_level_defs, top_level_defs,
unifier, unifier,
primitives,
ty.as_ref(), ty.as_ref(),
subst_list subst_list
)?; )?;
@ -537,7 +420,7 @@ pub fn get_type_from_type_annotation_kinds(
let tys = tys let tys = tys
.iter() .iter()
.map(|x| { .map(|x| {
get_type_from_type_annotation_kinds(top_level_defs, unifier, x, subst_list) get_type_from_type_annotation_kinds(top_level_defs, unifier, primitives, x, subst_list)
}) })
.collect::<Result<Vec<_>, _>>()?; .collect::<Result<Vec<_>, _>>()?;
Ok(unifier.add_ty(TypeEnum::TTuple { ty: tys })) Ok(unifier.add_ty(TypeEnum::TTuple { ty: tys }))
@ -554,10 +437,9 @@ pub fn get_type_from_type_annotation_kinds(
/// considered to be type variables associated with the class \ /// considered to be type variables associated with the class \
/// \ /// \
/// But note that here we do not make a duplication of `T`, `V`, we directly /// But note that here we do not make a duplication of `T`, `V`, we directly
/// use them as they are in the [`TopLevelDef::Class`] since those in the /// use them as they are in the TopLevelDef::Class since those in the
/// `TopLevelDef::Class.type_vars` will be substitute later when seeing applications/instantiations /// TopLevelDef::Class.type_vars will be substitute later when seeing applications/instantiations
/// the Type of their fields and methods will also be subst when application/instantiation /// the Type of their fields and methods will also be subst when application/instantiation
#[must_use]
pub fn make_self_type_annotation(type_vars: &[Type], object_id: DefinitionId) -> TypeAnnotation { pub fn make_self_type_annotation(type_vars: &[Type], object_id: DefinitionId) -> TypeAnnotation {
TypeAnnotation::CustomClass { TypeAnnotation::CustomClass {
id: object_id, id: object_id,
@ -568,25 +450,27 @@ pub fn make_self_type_annotation(type_vars: &[Type], object_id: DefinitionId) ->
/// get all the occurences of type vars contained in a type annotation /// get all the occurences of type vars contained in a type annotation
/// e.g. `A[int, B[T], V, virtual[C[G]]]` => [T, V, G] /// e.g. `A[int, B[T], V, virtual[C[G]]]` => [T, V, G]
/// this function will not make a duplicate of type var /// this function will not make a duplicate of type var
#[must_use]
pub fn get_type_var_contained_in_type_annotation(ann: &TypeAnnotation) -> Vec<TypeAnnotation> { pub fn get_type_var_contained_in_type_annotation(ann: &TypeAnnotation) -> Vec<TypeAnnotation> {
let mut result: Vec<TypeAnnotation> = Vec::new(); let mut result: Vec<TypeAnnotation> = Vec::new();
match ann { match ann {
TypeAnnotation::TypeVar(..) => result.push(ann.clone()), TypeAnnotation::TypeVar(..) => result.push(ann.clone()),
TypeAnnotation::Virtual(ann) | TypeAnnotation::List(ann) => { TypeAnnotation::Virtual(ann) => {
result.extend(get_type_var_contained_in_type_annotation(ann.as_ref())); result.extend(get_type_var_contained_in_type_annotation(ann.as_ref()))
} }
TypeAnnotation::CustomClass { params, .. } => { TypeAnnotation::CustomClass { params, .. } => {
for p in params { for p in params {
result.extend(get_type_var_contained_in_type_annotation(p)); result.extend(get_type_var_contained_in_type_annotation(p));
} }
} }
TypeAnnotation::List(ann) => {
result.extend(get_type_var_contained_in_type_annotation(ann.as_ref()))
}
TypeAnnotation::Tuple(anns) => { TypeAnnotation::Tuple(anns) => {
for a in anns { for a in anns {
result.extend(get_type_var_contained_in_type_annotation(a)); result.extend(get_type_var_contained_in_type_annotation(a));
} }
} }
TypeAnnotation::Primitive(..) | TypeAnnotation::Literal { .. } => {} TypeAnnotation::Primitive(..) => {}
} }
result result
} }
@ -601,17 +485,18 @@ pub fn check_overload_type_annotation_compatible(
(TypeAnnotation::Primitive(a), TypeAnnotation::Primitive(b)) => a == b, (TypeAnnotation::Primitive(a), TypeAnnotation::Primitive(b)) => a == b,
(TypeAnnotation::TypeVar(a), TypeAnnotation::TypeVar(b)) => { (TypeAnnotation::TypeVar(a), TypeAnnotation::TypeVar(b)) => {
let a = unifier.get_ty(*a); let a = unifier.get_ty(*a);
let a = &*a; let a = a.deref();
let b = unifier.get_ty(*b); let b = unifier.get_ty(*b);
let b = &*b; let b = b.deref();
let ( if let (
TypeEnum::TVar { id: a, fields: None, .. }, TypeEnum::TVar { id: a, fields: None, .. },
TypeEnum::TVar { id: b, fields: None, .. }, TypeEnum::TVar { id: b, fields: None, .. },
) = (a, b) else { ) = (a, b)
{
a == b
} else {
unreachable!("must be type var") unreachable!("must be type var")
}; }
a == b
} }
(TypeAnnotation::Virtual(a), TypeAnnotation::Virtual(b)) (TypeAnnotation::Virtual(a), TypeAnnotation::Virtual(b))
| (TypeAnnotation::List(a), TypeAnnotation::List(b)) => { | (TypeAnnotation::List(a), TypeAnnotation::List(b)) => {

View File

@ -2,15 +2,13 @@ use crate::typecheck::typedef::TypeEnum;
use super::type_inferencer::Inferencer; use super::type_inferencer::Inferencer;
use super::typedef::Type; use super::typedef::Type;
use nac3parser::ast::{self, Constant, Expr, ExprKind, Operator::{LShift, RShift}, Stmt, StmtKind, StrRef}; use nac3parser::ast::{self, Expr, ExprKind, Stmt, StmtKind, StrRef};
use std::{collections::HashSet, iter::once}; use std::{collections::HashSet, iter::once};
impl<'a> Inferencer<'a> { impl<'a> Inferencer<'a> {
fn should_have_value(&mut self, expr: &Expr<Option<Type>>) -> Result<(), HashSet<String>> { fn should_have_value(&mut self, expr: &Expr<Option<Type>>) -> Result<(), String> {
if matches!(expr.custom, Some(ty) if self.unifier.unioned(ty, self.primitives.none)) { if matches!(expr.custom, Some(ty) if self.unifier.unioned(ty, self.primitives.none)) {
Err(HashSet::from([ Err(format!("Error at {}: cannot have value none", expr.location))
format!("Error at {}: cannot have value none", expr.location),
]))
} else { } else {
Ok(()) Ok(())
} }
@ -20,11 +18,10 @@ impl<'a> Inferencer<'a> {
&mut self, &mut self,
pattern: &Expr<Option<Type>>, pattern: &Expr<Option<Type>>,
defined_identifiers: &mut HashSet<StrRef>, defined_identifiers: &mut HashSet<StrRef>,
) -> Result<(), HashSet<String>> { ) -> Result<(), String> {
match &pattern.node { match &pattern.node {
ExprKind::Name { id, .. } if id == &"none".into() => Err(HashSet::from([ ast::ExprKind::Name { id, .. } if id == &"none".into() =>
format!("cannot assign to a `none` (at {})", pattern.location), Err(format!("cannot assign to a `none` (at {})", pattern.location)),
])),
ExprKind::Name { id, .. } => { ExprKind::Name { id, .. } => {
if !defined_identifiers.contains(id) { if !defined_identifiers.contains(id) {
defined_identifiers.insert(*id); defined_identifiers.insert(*id);
@ -33,7 +30,7 @@ impl<'a> Inferencer<'a> {
Ok(()) Ok(())
} }
ExprKind::Tuple { elts, .. } => { ExprKind::Tuple { elts, .. } => {
for elt in elts { for elt in elts.iter() {
self.check_pattern(elt, defined_identifiers)?; self.check_pattern(elt, defined_identifiers)?;
self.should_have_value(elt)?; self.should_have_value(elt)?;
} }
@ -44,19 +41,15 @@ impl<'a> Inferencer<'a> {
self.should_have_value(value)?; self.should_have_value(value)?;
self.check_expr(slice, defined_identifiers)?; self.check_expr(slice, defined_identifiers)?;
if let TypeEnum::TTuple { .. } = &*self.unifier.get_ty(value.custom.unwrap()) { if let TypeEnum::TTuple { .. } = &*self.unifier.get_ty(value.custom.unwrap()) {
return Err(HashSet::from([ return Err(format!(
format!( "Error at {}: cannot assign to tuple element",
"Error at {}: cannot assign to tuple element", value.location
value.location ));
),
]))
} }
Ok(()) Ok(())
} }
ExprKind::Constant { .. } => { ExprKind::Constant { .. } => {
Err(HashSet::from([ Err(format!("cannot assign to a constant (at {})", pattern.location))
format!("cannot assign to a constant (at {})", pattern.location),
]))
} }
_ => self.check_expr(pattern, defined_identifiers), _ => self.check_expr(pattern, defined_identifiers),
} }
@ -66,17 +59,15 @@ impl<'a> Inferencer<'a> {
&mut self, &mut self,
expr: &Expr<Option<Type>>, expr: &Expr<Option<Type>>,
defined_identifiers: &mut HashSet<StrRef>, defined_identifiers: &mut HashSet<StrRef>,
) -> Result<(), HashSet<String>> { ) -> Result<(), String> {
// there are some cases where the custom field is None // there are some cases where the custom field is None
if let Some(ty) = &expr.custom { if let Some(ty) = &expr.custom {
if !matches!(&expr.node, ExprKind::Constant { value: Constant::Ellipsis, .. }) && !self.unifier.is_concrete(*ty, &self.function_data.bound_variables) { if !self.unifier.is_concrete(*ty, &self.function_data.bound_variables) {
return Err(HashSet::from([ return Err(format!(
format!( "expected concrete type at {} but got {}",
"expected concrete type at {} but got {}", expr.location,
expr.location, self.unifier.get_ty(*ty).get_type_name()
self.unifier.get_ty(*ty).get_type_name() ));
)
]))
} }
} }
match &expr.node { match &expr.node {
@ -96,12 +87,10 @@ impl<'a> Inferencer<'a> {
self.defined_identifiers.insert(*id); self.defined_identifiers.insert(*id);
} }
Err(e) => { Err(e) => {
return Err(HashSet::from([ return Err(format!(
format!( "type error at identifier `{}` ({}) at {}",
"type error at identifier `{}` ({}) at {}", id, e, expr.location
id, e, expr.location ));
)
]))
} }
} }
} }
@ -109,7 +98,7 @@ impl<'a> Inferencer<'a> {
ExprKind::List { elts, .. } ExprKind::List { elts, .. }
| ExprKind::Tuple { elts, .. } | ExprKind::Tuple { elts, .. }
| ExprKind::BoolOp { values: elts, .. } => { | ExprKind::BoolOp { values: elts, .. } => {
for elt in elts { for elt in elts.iter() {
self.check_expr(elt, defined_identifiers)?; self.check_expr(elt, defined_identifiers)?;
self.should_have_value(elt)?; self.should_have_value(elt)?;
} }
@ -118,29 +107,11 @@ impl<'a> Inferencer<'a> {
self.check_expr(value, defined_identifiers)?; self.check_expr(value, defined_identifiers)?;
self.should_have_value(value)?; self.should_have_value(value)?;
} }
ExprKind::BinOp { left, op, right } => { ExprKind::BinOp { left, right, .. } => {
self.check_expr(left, defined_identifiers)?; self.check_expr(left, defined_identifiers)?;
self.check_expr(right, defined_identifiers)?; self.check_expr(right, defined_identifiers)?;
self.should_have_value(left)?; self.should_have_value(left)?;
self.should_have_value(right)?; self.should_have_value(right)?;
// Check whether a bitwise shift has a negative RHS constant value
if *op == LShift || *op == RShift {
if let ExprKind::Constant { value, .. } = &right.node {
let Constant::Int(rhs_val) = value else {
unreachable!()
};
if *rhs_val < 0 {
return Err(HashSet::from([
format!(
"shift count is negative at {}",
right.location
),
]))
}
}
}
} }
ExprKind::UnaryOp { operand, .. } => { ExprKind::UnaryOp { operand, .. } => {
self.check_expr(operand, defined_identifiers)?; self.check_expr(operand, defined_identifiers)?;
@ -170,7 +141,7 @@ impl<'a> Inferencer<'a> {
} }
ExprKind::Lambda { args, body } => { ExprKind::Lambda { args, body } => {
let mut defined_identifiers = defined_identifiers.clone(); let mut defined_identifiers = defined_identifiers.clone();
for arg in &args.args { for arg in args.args.iter() {
// TODO: should we check the types here? // TODO: should we check the types here?
if !defined_identifiers.contains(&arg.node.arg) { if !defined_identifiers.contains(&arg.node.arg) {
defined_identifiers.insert(arg.node.arg); defined_identifiers.insert(arg.node.arg);
@ -208,45 +179,24 @@ impl<'a> Inferencer<'a> {
Ok(()) Ok(())
} }
/// Check that the return value is a non-`alloca` type, effectively only allowing primitive types.
///
/// This is a workaround preventing the caller from using a variable `alloca`-ed in the body, which
/// is freed when the function returns.
fn check_return_value_ty(&mut self, ret_ty: Type) -> bool {
match &*self.unifier.get_ty_immutable(ret_ty) {
TypeEnum::TObj { .. } => {
[
self.primitives.int32,
self.primitives.int64,
self.primitives.uint32,
self.primitives.uint64,
self.primitives.float,
self.primitives.bool,
].iter().any(|allowed_ty| self.unifier.unioned(ret_ty, *allowed_ty))
}
TypeEnum::TTuple { ty } => ty.iter().all(|t| self.check_return_value_ty(*t)),
_ => false,
}
}
// check statements for proper identifier def-use and return on all paths // check statements for proper identifier def-use and return on all paths
fn check_stmt( fn check_stmt(
&mut self, &mut self,
stmt: &Stmt<Option<Type>>, stmt: &Stmt<Option<Type>>,
defined_identifiers: &mut HashSet<StrRef>, defined_identifiers: &mut HashSet<StrRef>,
) -> Result<bool, HashSet<String>> { ) -> Result<bool, String> {
match &stmt.node { match &stmt.node {
StmtKind::For { target, iter, body, orelse, .. } => { StmtKind::For { target, iter, body, orelse, .. } => {
self.check_expr(iter, defined_identifiers)?; self.check_expr(iter, defined_identifiers)?;
self.should_have_value(iter)?; self.should_have_value(iter)?;
let mut local_defined_identifiers = defined_identifiers.clone(); let mut local_defined_identifiers = defined_identifiers.clone();
for stmt in orelse { for stmt in orelse.iter() {
self.check_stmt(stmt, &mut local_defined_identifiers)?; self.check_stmt(stmt, &mut local_defined_identifiers)?;
} }
let mut local_defined_identifiers = defined_identifiers.clone(); let mut local_defined_identifiers = defined_identifiers.clone();
self.check_pattern(target, &mut local_defined_identifiers)?; self.check_pattern(target, &mut local_defined_identifiers)?;
self.should_have_value(target)?; self.should_have_value(target)?;
for stmt in body { for stmt in body.iter() {
self.check_stmt(stmt, &mut local_defined_identifiers)?; self.check_stmt(stmt, &mut local_defined_identifiers)?;
} }
Ok(false) Ok(false)
@ -259,7 +209,7 @@ impl<'a> Inferencer<'a> {
let body_returned = self.check_block(body, &mut body_identifiers)?; let body_returned = self.check_block(body, &mut body_identifiers)?;
let orelse_returned = self.check_block(orelse, &mut orelse_identifiers)?; let orelse_returned = self.check_block(orelse, &mut orelse_identifiers)?;
for ident in &body_identifiers { for ident in body_identifiers.iter() {
if !defined_identifiers.contains(ident) && orelse_identifiers.contains(ident) { if !defined_identifiers.contains(ident) && orelse_identifiers.contains(ident) {
defined_identifiers.insert(*ident); defined_identifiers.insert(*ident);
} }
@ -276,7 +226,7 @@ impl<'a> Inferencer<'a> {
} }
StmtKind::With { items, body, .. } => { StmtKind::With { items, body, .. } => {
let mut new_defined_identifiers = defined_identifiers.clone(); let mut new_defined_identifiers = defined_identifiers.clone();
for item in items { for item in items.iter() {
self.check_expr(&item.context_expr, defined_identifiers)?; self.check_expr(&item.context_expr, defined_identifiers)?;
if let Some(var) = item.optional_vars.as_ref() { if let Some(var) = item.optional_vars.as_ref() {
self.check_pattern(var, &mut new_defined_identifiers)?; self.check_pattern(var, &mut new_defined_identifiers)?;
@ -288,7 +238,7 @@ impl<'a> Inferencer<'a> {
StmtKind::Try { body, handlers, orelse, finalbody, .. } => { StmtKind::Try { body, handlers, orelse, finalbody, .. } => {
self.check_block(body, &mut defined_identifiers.clone())?; self.check_block(body, &mut defined_identifiers.clone())?;
self.check_block(orelse, &mut defined_identifiers.clone())?; self.check_block(orelse, &mut defined_identifiers.clone())?;
for handler in handlers { for handler in handlers.iter() {
let mut defined_identifiers = defined_identifiers.clone(); let mut defined_identifiers = defined_identifiers.clone();
let ast::ExcepthandlerKind::ExceptHandler { name, body, .. } = &handler.node; let ast::ExcepthandlerKind::ExceptHandler { name, body, .. } = &handler.node;
if let Some(name) = name { if let Some(name) = name {
@ -323,27 +273,6 @@ impl<'a> Inferencer<'a> {
if let Some(value) = value { if let Some(value) = value {
self.check_expr(value, defined_identifiers)?; self.check_expr(value, defined_identifiers)?;
self.should_have_value(value)?; self.should_have_value(value)?;
// Check that the return value is a non-`alloca` type, effectively only allowing primitive types.
// This is a workaround preventing the caller from using a variable `alloca`-ed in the body, which
// is freed when the function returns.
if let Some(ret_ty) = value.custom {
// Explicitly allow ellipsis as a return value, as the type of the ellipsis is contextually
// inferred and just generates an unconditional assertion
if matches!(value.node, ExprKind::Constant { value: Constant::Ellipsis, .. }) {
return Ok(true)
}
if !self.check_return_value_ty(ret_ty) {
return Err(HashSet::from([
format!(
"return value of type {} must be a primitive or a tuple of primitives at {}",
self.unifier.stringify(ret_ty),
value.location,
),
]))
}
}
} }
Ok(true) Ok(true)
} }
@ -362,11 +291,11 @@ impl<'a> Inferencer<'a> {
&mut self, &mut self,
block: &[Stmt<Option<Type>>], block: &[Stmt<Option<Type>>],
defined_identifiers: &mut HashSet<StrRef>, defined_identifiers: &mut HashSet<StrRef>,
) -> Result<bool, HashSet<String>> { ) -> Result<bool, String> {
let mut ret = false; let mut ret = false;
for stmt in block { for stmt in block {
if ret { if ret {
eprintln!("warning: dead code at {}\n", stmt.location); return Err(format!("dead code at {:?}", stmt.location));
} }
if self.check_stmt(stmt, defined_identifiers)? { if self.check_stmt(stmt, defined_identifiers)? {
ret = true; ret = true;

View File

@ -1,18 +1,12 @@
use std::cmp::max;
use crate::symbol_resolver::SymbolValue;
use crate::toplevel::helper::PRIMITIVE_DEF_IDS;
use crate::toplevel::numpy::{make_ndarray_ty, unpack_ndarray_var_tys};
use crate::typecheck::{ use crate::typecheck::{
type_inferencer::*, type_inferencer::*,
typedef::{FunSignature, FuncArg, Type, TypeEnum, Unifier, VarMap}, typedef::{FunSignature, FuncArg, Type, TypeEnum, Unifier},
}; };
use nac3parser::ast::StrRef; use nac3parser::ast::{self, StrRef};
use nac3parser::ast::{Cmpop, Operator, Unaryop}; use nac3parser::ast::{Cmpop, Operator, Unaryop};
use std::collections::HashMap; use std::collections::HashMap;
use std::rc::Rc; use std::rc::Rc;
use itertools::Itertools;
#[must_use]
pub fn binop_name(op: &Operator) -> &'static str { pub fn binop_name(op: &Operator) -> &'static str {
match op { match op {
Operator::Add => "__add__", Operator::Add => "__add__",
@ -31,7 +25,6 @@ pub fn binop_name(op: &Operator) -> &'static str {
} }
} }
#[must_use]
pub fn binop_assign_name(op: &Operator) -> &'static str { pub fn binop_assign_name(op: &Operator) -> &'static str {
match op { match op {
Operator::Add => "__iadd__", Operator::Add => "__iadd__",
@ -50,7 +43,6 @@ pub fn binop_assign_name(op: &Operator) -> &'static str {
} }
} }
#[must_use]
pub fn unaryop_name(op: &Unaryop) -> &'static str { pub fn unaryop_name(op: &Unaryop) -> &'static str {
match op { match op {
Unaryop::UAdd => "__pos__", Unaryop::UAdd => "__pos__",
@ -60,7 +52,6 @@ pub fn unaryop_name(op: &Unaryop) -> &'static str {
} }
} }
#[must_use]
pub fn comparison_name(op: &Cmpop) -> Option<&'static str> { pub fn comparison_name(op: &Cmpop) -> Option<&'static str> {
match op { match op {
Cmpop::Lt => Some("__lt__"), Cmpop::Lt => Some("__lt__"),
@ -95,8 +86,8 @@ pub fn impl_binop(
_store: &PrimitiveStore, _store: &PrimitiveStore,
ty: Type, ty: Type,
other_ty: &[Type], other_ty: &[Type],
ret_ty: Option<Type>, ret_ty: Type,
ops: &[Operator], ops: &[ast::Operator],
) { ) {
with_fields(unifier, ty, |unifier, fields| { with_fields(unifier, ty, |unifier, fields| {
let (other_ty, other_var_id) = if other_ty.len() == 1 { let (other_ty, other_var_id) = if other_ty.len() == 1 {
@ -105,15 +96,11 @@ pub fn impl_binop(
let (ty, var_id) = unifier.get_fresh_var_with_range(other_ty, Some("N".into()), None); let (ty, var_id) = unifier.get_fresh_var_with_range(other_ty, Some("N".into()), None);
(ty, Some(var_id)) (ty, Some(var_id))
}; };
let function_vars = if let Some(var_id) = other_var_id { let function_vars = if let Some(var_id) = other_var_id {
vec![(var_id, other_ty)].into_iter().collect::<VarMap>() vec![(var_id, other_ty)].into_iter().collect::<HashMap<_, _>>()
} else { } else {
VarMap::new() HashMap::new()
}; };
let ret_ty = ret_ty.unwrap_or_else(|| unifier.get_fresh_var(None, None).0);
for op in ops { for op in ops {
fields.insert(binop_name(op).into(), { fields.insert(binop_name(op).into(), {
( (
@ -148,17 +135,15 @@ pub fn impl_binop(
}); });
} }
pub fn impl_unaryop(unifier: &mut Unifier, ty: Type, ret_ty: Option<Type>, ops: &[Unaryop]) { pub fn impl_unaryop(unifier: &mut Unifier, ty: Type, ret_ty: Type, ops: &[ast::Unaryop]) {
with_fields(unifier, ty, |unifier, fields| { with_fields(unifier, ty, |unifier, fields| {
let ret_ty = ret_ty.unwrap_or_else(|| unifier.get_fresh_var(None, None).0);
for op in ops { for op in ops {
fields.insert( fields.insert(
unaryop_name(op).into(), unaryop_name(op).into(),
( (
unifier.add_ty(TypeEnum::TFunc(FunSignature { unifier.add_ty(TypeEnum::TFunc(FunSignature {
ret: ret_ty, ret: ret_ty,
vars: VarMap::new(), vars: HashMap::new(),
args: vec![], args: vec![],
})), })),
false, false,
@ -170,35 +155,19 @@ pub fn impl_unaryop(unifier: &mut Unifier, ty: Type, ret_ty: Option<Type>, ops:
pub fn impl_cmpop( pub fn impl_cmpop(
unifier: &mut Unifier, unifier: &mut Unifier,
_store: &PrimitiveStore, store: &PrimitiveStore,
ty: Type, ty: Type,
other_ty: &[Type], other_ty: Type,
ops: &[Cmpop], ops: &[ast::Cmpop],
ret_ty: Option<Type>,
) { ) {
with_fields(unifier, ty, |unifier, fields| { with_fields(unifier, ty, |unifier, fields| {
let (other_ty, other_var_id) = if other_ty.len() == 1 {
(other_ty[0], None)
} else {
let (ty, var_id) = unifier.get_fresh_var_with_range(other_ty, Some("N".into()), None);
(ty, Some(var_id))
};
let function_vars = if let Some(var_id) = other_var_id {
vec![(var_id, other_ty)].into_iter().collect::<VarMap>()
} else {
VarMap::new()
};
let ret_ty = ret_ty.unwrap_or_else(|| unifier.get_fresh_var(None, None).0);
for op in ops { for op in ops {
fields.insert( fields.insert(
comparison_name(op).unwrap().into(), comparison_name(op).unwrap().into(),
( (
unifier.add_ty(TypeEnum::TFunc(FunSignature { unifier.add_ty(TypeEnum::TFunc(FunSignature {
ret: ret_ty, ret: store.bool,
vars: function_vars.clone(), vars: HashMap::new(),
args: vec![FuncArg { args: vec![FuncArg {
ty: other_ty, ty: other_ty,
default_value: None, default_value: None,
@ -212,13 +181,13 @@ pub fn impl_cmpop(
}); });
} }
/// `Add`, `Sub`, `Mult` /// Add, Sub, Mult
pub fn impl_basic_arithmetic( pub fn impl_basic_arithmetic(
unifier: &mut Unifier, unifier: &mut Unifier,
store: &PrimitiveStore, store: &PrimitiveStore,
ty: Type, ty: Type,
other_ty: &[Type], other_ty: &[Type],
ret_ty: Option<Type>, ret_ty: Type,
) { ) {
impl_binop( impl_binop(
unifier, unifier,
@ -226,368 +195,94 @@ pub fn impl_basic_arithmetic(
ty, ty,
other_ty, other_ty,
ret_ty, ret_ty,
&[Operator::Add, Operator::Sub, Operator::Mult], &[ast::Operator::Add, ast::Operator::Sub, ast::Operator::Mult],
); )
} }
/// `Pow` /// Pow
pub fn impl_pow( pub fn impl_pow(
unifier: &mut Unifier, unifier: &mut Unifier,
store: &PrimitiveStore, store: &PrimitiveStore,
ty: Type, ty: Type,
other_ty: &[Type], other_ty: &[Type],
ret_ty: Option<Type>, ret_ty: Type,
) { ) {
impl_binop(unifier, store, ty, other_ty, ret_ty, &[Operator::Pow]); impl_binop(unifier, store, ty, other_ty, ret_ty, &[ast::Operator::Pow])
} }
/// `BitOr`, `BitXor`, `BitAnd` /// BitOr, BitXor, BitAnd
pub fn impl_bitwise_arithmetic(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type) { pub fn impl_bitwise_arithmetic(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type) {
impl_binop( impl_binop(
unifier, unifier,
store, store,
ty, ty,
&[ty], &[ty],
Some(ty), ty,
&[Operator::BitAnd, Operator::BitOr, Operator::BitXor], &[ast::Operator::BitAnd, ast::Operator::BitOr, ast::Operator::BitXor],
); )
} }
/// `LShift`, `RShift` /// LShift, RShift
pub fn impl_bitwise_shift(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type) { pub fn impl_bitwise_shift(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type) {
impl_binop(unifier, store, ty, &[store.int32, store.uint32], Some(ty), &[Operator::LShift, Operator::RShift]); impl_binop(unifier, store, ty, &[ty], ty, &[ast::Operator::LShift, ast::Operator::RShift])
} }
/// `Div` /// Div
pub fn impl_div( pub fn impl_div(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type, other_ty: &[Type]) {
unifier: &mut Unifier, impl_binop(unifier, store, ty, other_ty, store.float, &[ast::Operator::Div])
store: &PrimitiveStore,
ty: Type,
other_ty: &[Type],
ret_ty: Option<Type>,
) {
impl_binop(unifier, store, ty, other_ty, ret_ty, &[Operator::Div]);
} }
/// `FloorDiv` /// FloorDiv
pub fn impl_floordiv( pub fn impl_floordiv(
unifier: &mut Unifier, unifier: &mut Unifier,
store: &PrimitiveStore, store: &PrimitiveStore,
ty: Type, ty: Type,
other_ty: &[Type], other_ty: &[Type],
ret_ty: Option<Type>, ret_ty: Type,
) { ) {
impl_binop(unifier, store, ty, other_ty, ret_ty, &[Operator::FloorDiv]); impl_binop(unifier, store, ty, other_ty, ret_ty, &[ast::Operator::FloorDiv])
} }
/// `Mod` /// Mod
pub fn impl_mod( pub fn impl_mod(
unifier: &mut Unifier, unifier: &mut Unifier,
store: &PrimitiveStore, store: &PrimitiveStore,
ty: Type, ty: Type,
other_ty: &[Type], other_ty: &[Type],
ret_ty: Option<Type>, ret_ty: Type,
) { ) {
impl_binop(unifier, store, ty, other_ty, ret_ty, &[Operator::Mod]); impl_binop(unifier, store, ty, other_ty, ret_ty, &[ast::Operator::Mod])
} }
/// [`Operator::MatMult`] /// UAdd, USub
pub fn impl_matmul( pub fn impl_sign(unifier: &mut Unifier, _store: &PrimitiveStore, ty: Type) {
unifier: &mut Unifier, impl_unaryop(unifier, ty, ty, &[ast::Unaryop::UAdd, ast::Unaryop::USub])
store: &PrimitiveStore,
ty: Type,
other_ty: &[Type],
ret_ty: Option<Type>,
) {
impl_binop(unifier, store, ty, other_ty, ret_ty, &[Operator::MatMult]);
} }
/// `UAdd`, `USub` /// Invert
pub fn impl_sign(unifier: &mut Unifier, _store: &PrimitiveStore, ty: Type, ret_ty: Option<Type>) { pub fn impl_invert(unifier: &mut Unifier, _store: &PrimitiveStore, ty: Type) {
impl_unaryop(unifier, ty, ret_ty, &[Unaryop::UAdd, Unaryop::USub]); impl_unaryop(unifier, ty, ty, &[ast::Unaryop::Invert])
} }
/// `Invert` /// Not
pub fn impl_invert(unifier: &mut Unifier, _store: &PrimitiveStore, ty: Type, ret_ty: Option<Type>) { pub fn impl_not(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type) {
impl_unaryop(unifier, ty, ret_ty, &[Unaryop::Invert]); impl_unaryop(unifier, ty, store.bool, &[ast::Unaryop::Not])
} }
/// `Not` /// Lt, LtE, Gt, GtE
pub fn impl_not(unifier: &mut Unifier, _store: &PrimitiveStore, ty: Type, ret_ty: Option<Type>) { pub fn impl_comparison(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type, other_ty: Type) {
impl_unaryop(unifier, ty, ret_ty, &[Unaryop::Not]);
}
/// `Lt`, `LtE`, `Gt`, `GtE`
pub fn impl_comparison(
unifier: &mut Unifier,
store: &PrimitiveStore,
ty: Type,
other_ty: &[Type],
ret_ty: Option<Type>,
) {
impl_cmpop( impl_cmpop(
unifier, unifier,
store, store,
ty, ty,
other_ty, other_ty,
&[Cmpop::Lt, Cmpop::Gt, Cmpop::LtE, Cmpop::GtE], &[ast::Cmpop::Lt, ast::Cmpop::Gt, ast::Cmpop::LtE, ast::Cmpop::GtE],
ret_ty, )
);
} }
/// `Eq`, `NotEq` /// Eq, NotEq
pub fn impl_eq( pub fn impl_eq(unifier: &mut Unifier, store: &PrimitiveStore, ty: Type) {
unifier: &mut Unifier, impl_cmpop(unifier, store, ty, ty, &[ast::Cmpop::Eq, ast::Cmpop::NotEq])
store: &PrimitiveStore,
ty: Type,
other_ty: &[Type],
ret_ty: Option<Type>,
) {
impl_cmpop(unifier, store, ty, other_ty, &[Cmpop::Eq, Cmpop::NotEq], ret_ty);
}
/// Returns the expected return type of binary operations with at least one `ndarray` operand.
pub fn typeof_ndarray_broadcast(
unifier: &mut Unifier,
primitives: &PrimitiveStore,
left: Type,
right: Type,
) -> Result<Type, String> {
let is_left_ndarray = left.obj_id(unifier).is_some_and(|id| id == PRIMITIVE_DEF_IDS.ndarray);
let is_right_ndarray = right.obj_id(unifier).is_some_and(|id| id == PRIMITIVE_DEF_IDS.ndarray);
assert!(is_left_ndarray || is_right_ndarray);
if is_left_ndarray && is_right_ndarray {
// Perform broadcasting on two ndarray operands.
let (left_ty_dtype, left_ty_ndims) = unpack_ndarray_var_tys(unifier, left);
let (right_ty_dtype, right_ty_ndims) = unpack_ndarray_var_tys(unifier, right);
assert!(unifier.unioned(left_ty_dtype, right_ty_dtype));
let left_ty_ndims = match &*unifier.get_ty_immutable(left_ty_ndims) {
TypeEnum::TLiteral { values, .. } => values.clone(),
_ => unreachable!(),
};
let right_ty_ndims = match &*unifier.get_ty_immutable(right_ty_ndims) {
TypeEnum::TLiteral { values, .. } => values.clone(),
_ => unreachable!(),
};
let res_ndims = left_ty_ndims.into_iter()
.cartesian_product(right_ty_ndims)
.map(|(left, right)| {
let left_val = u64::try_from(left).unwrap();
let right_val = u64::try_from(right).unwrap();
max(left_val, right_val)
})
.unique()
.map(SymbolValue::U64)
.collect_vec();
let res_ndims = unifier.get_fresh_literal(res_ndims, None);
Ok(make_ndarray_ty(unifier, primitives, Some(left_ty_dtype), Some(res_ndims)))
} else {
let (ndarray_ty, scalar_ty) = if is_left_ndarray {
(left, right)
} else {
(right, left)
};
let (ndarray_ty_dtype, _) = unpack_ndarray_var_tys(unifier, ndarray_ty);
if unifier.unioned(ndarray_ty_dtype, scalar_ty) {
Ok(ndarray_ty)
} else {
let (expected_ty, actual_ty) = if is_left_ndarray {
(ndarray_ty_dtype, scalar_ty)
} else {
(scalar_ty, ndarray_ty_dtype)
};
Err(format!(
"Expected right-hand side operand to be {}, got {}",
unifier.stringify(expected_ty),
unifier.stringify(actual_ty),
))
}
}
}
/// Returns the return type given a binary operator and its primitive operands.
pub fn typeof_binop(
unifier: &mut Unifier,
primitives: &PrimitiveStore,
op: &Operator,
lhs: Type,
rhs: Type,
) -> Result<Option<Type>, String> {
let is_left_ndarray = lhs.obj_id(unifier).is_some_and(|id| id == PRIMITIVE_DEF_IDS.ndarray);
let is_right_ndarray = rhs.obj_id(unifier).is_some_and(|id| id == PRIMITIVE_DEF_IDS.ndarray);
Ok(Some(match op {
Operator::Add
| Operator::Sub
| Operator::Mult
| Operator::Mod
| Operator::FloorDiv => {
if is_left_ndarray || is_right_ndarray {
typeof_ndarray_broadcast(unifier, primitives, lhs, rhs)?
} else if unifier.unioned(lhs, rhs) {
lhs
} else {
return Ok(None)
}
}
Operator::MatMult => {
let (_, lhs_ndims) = unpack_ndarray_var_tys(unifier, lhs);
let lhs_ndims = match &*unifier.get_ty_immutable(lhs_ndims) {
TypeEnum::TLiteral { values, .. } => {
assert_eq!(values.len(), 1);
u64::try_from(values[0].clone()).unwrap()
}
_ => unreachable!(),
};
let (_, rhs_ndims) = unpack_ndarray_var_tys(unifier, rhs);
let rhs_ndims = match &*unifier.get_ty_immutable(rhs_ndims) {
TypeEnum::TLiteral { values, .. } => {
assert_eq!(values.len(), 1);
u64::try_from(values[0].clone()).unwrap()
}
_ => unreachable!(),
};
match (lhs_ndims, rhs_ndims) {
(2, 2) => typeof_ndarray_broadcast(unifier, primitives, lhs, rhs)?,
(lhs, rhs) if lhs == 0 || rhs == 0 => {
return Err(format!(
"Input operand {} does not have enough dimensions (has {lhs}, requires {rhs})",
(rhs == 0) as u8
))
}
(lhs, rhs) => {
return Err(format!("ndarray.__matmul__ on {lhs}D and {rhs}D operands not supported"))
}
}
}
Operator::Div => {
if is_left_ndarray || is_right_ndarray {
typeof_ndarray_broadcast(unifier, primitives, lhs, rhs)?
} else if unifier.unioned(lhs, rhs) {
primitives.float
} else {
return Ok(None)
}
}
Operator::Pow => {
if is_left_ndarray || is_right_ndarray {
typeof_ndarray_broadcast(unifier, primitives, lhs, rhs)?
} else if [primitives.int32, primitives.int64, primitives.uint32, primitives.uint64, primitives.float].into_iter().any(|ty| unifier.unioned(lhs, ty)) {
lhs
} else {
return Ok(None)
}
}
Operator::LShift
| Operator::RShift => lhs,
Operator::BitOr
| Operator::BitXor
| Operator::BitAnd => {
if unifier.unioned(lhs, rhs) {
lhs
} else {
return Ok(None)
}
}
}))
}
pub fn typeof_unaryop(
unifier: &mut Unifier,
primitives: &PrimitiveStore,
op: &Unaryop,
operand: Type,
) -> Result<Option<Type>, String> {
let operand_obj_id = operand.obj_id(unifier);
if *op == Unaryop::Not && operand_obj_id.is_some_and(|id| id == primitives.ndarray.obj_id(unifier).unwrap()) {
return Err("The truth value of an array with more than one element is ambiguous".to_string())
}
Ok(match *op {
Unaryop::Not => {
match operand_obj_id {
Some(v) if v == PRIMITIVE_DEF_IDS.ndarray => Some(operand),
Some(_) => Some(primitives.bool),
_ => None
}
}
Unaryop::Invert => {
if operand_obj_id.is_some_and(|id| id == PRIMITIVE_DEF_IDS.bool) {
Some(primitives.int32)
} else if operand_obj_id.is_some_and(|id| PRIMITIVE_DEF_IDS.iter().any(|prim_id| id == prim_id)) {
Some(operand)
} else {
None
}
}
Unaryop::UAdd
| Unaryop::USub => {
if operand_obj_id.is_some_and(|id| id == PRIMITIVE_DEF_IDS.ndarray) {
let (dtype, _) = unpack_ndarray_var_tys(unifier, operand);
if dtype.obj_id(unifier).is_some_and(|id| id == PRIMITIVE_DEF_IDS.bool) {
return Err(if *op == Unaryop::UAdd {
"The ufunc 'positive' cannot be applied to ndarray[bool, N]".to_string()
} else {
"The numpy boolean negative, the `-` operator, is not supported, use the `~` operator function instead.".to_string()
})
}
Some(operand)
} else if operand_obj_id.is_some_and(|id| id == PRIMITIVE_DEF_IDS.bool) {
Some(primitives.int32)
} else if operand_obj_id.is_some_and(|id| PRIMITIVE_DEF_IDS.iter().any(|prim_id| id == prim_id)) {
Some(operand)
} else {
None
}
}
})
}
/// Returns the return type given a comparison operator and its primitive operands.
pub fn typeof_cmpop(
unifier: &mut Unifier,
primitives: &PrimitiveStore,
_op: &Cmpop,
lhs: Type,
rhs: Type,
) -> Result<Option<Type>, String> {
let is_left_ndarray = lhs
.obj_id(unifier)
.is_some_and(|id| id == PRIMITIVE_DEF_IDS.ndarray);
let is_right_ndarray = rhs
.obj_id(unifier)
.is_some_and(|id| id == PRIMITIVE_DEF_IDS.ndarray);
Ok(Some(if is_left_ndarray || is_right_ndarray {
let brd = typeof_ndarray_broadcast(unifier, primitives, lhs, rhs)?;
let (_, ndims) = unpack_ndarray_var_tys(unifier, brd);
make_ndarray_ty(unifier, primitives, Some(primitives.bool), Some(ndims))
} else if unifier.unioned(lhs, rhs) {
primitives.bool
} else {
return Ok(None)
}))
} }
pub fn set_primitives_magic_methods(store: &PrimitiveStore, unifier: &mut Unifier) { pub fn set_primitives_magic_methods(store: &PrimitiveStore, unifier: &mut Unifier) {
@ -598,63 +293,38 @@ pub fn set_primitives_magic_methods(store: &PrimitiveStore, unifier: &mut Unifie
bool: bool_t, bool: bool_t,
uint32: uint32_t, uint32: uint32_t,
uint64: uint64_t, uint64: uint64_t,
ndarray: ndarray_t,
.. ..
} = *store; } = *store;
let size_t = store.usize();
/* int ======== */ /* int ======== */
for t in [int32_t, int64_t, uint32_t, uint64_t] { for t in [int32_t, int64_t, uint32_t, uint64_t] {
let ndarray_int_t = make_ndarray_ty(unifier, store, Some(t), None); impl_basic_arithmetic(unifier, store, t, &[t], t);
impl_basic_arithmetic(unifier, store, t, &[t, ndarray_int_t], None); impl_pow(unifier, store, t, &[t], t);
impl_pow(unifier, store, t, &[t, ndarray_int_t], None);
impl_bitwise_arithmetic(unifier, store, t); impl_bitwise_arithmetic(unifier, store, t);
impl_bitwise_shift(unifier, store, t); impl_bitwise_shift(unifier, store, t);
impl_div(unifier, store, t, &[t, ndarray_int_t], None); impl_div(unifier, store, t, &[t]);
impl_floordiv(unifier, store, t, &[t, ndarray_int_t], None); impl_floordiv(unifier, store, t, &[t], t);
impl_mod(unifier, store, t, &[t, ndarray_int_t], None); impl_mod(unifier, store, t, &[t], t);
impl_invert(unifier, store, t, Some(t)); impl_invert(unifier, store, t);
impl_not(unifier, store, t, Some(bool_t)); impl_not(unifier, store, t);
impl_comparison(unifier, store, t, &[t, ndarray_int_t], None); impl_comparison(unifier, store, t, t);
impl_eq(unifier, store, t, &[t, ndarray_int_t], None); impl_eq(unifier, store, t);
} }
for t in [int32_t, int64_t] { for t in [int32_t, int64_t] {
impl_sign(unifier, store, t, Some(t)); impl_sign(unifier, store, t);
} }
/* float ======== */ /* float ======== */
let ndarray_float_t = make_ndarray_ty(unifier, store, Some(float_t), None); impl_basic_arithmetic(unifier, store, float_t, &[float_t], float_t);
let ndarray_int32_t = make_ndarray_ty(unifier, store, Some(int32_t), None); impl_pow(unifier, store, float_t, &[int32_t, float_t], float_t);
impl_basic_arithmetic(unifier, store, float_t, &[float_t, ndarray_float_t], None); impl_div(unifier, store, float_t, &[float_t]);
impl_pow(unifier, store, float_t, &[int32_t, float_t, ndarray_int32_t, ndarray_float_t], None); impl_floordiv(unifier, store, float_t, &[float_t], float_t);
impl_div(unifier, store, float_t, &[float_t, ndarray_float_t], None); impl_mod(unifier, store, float_t, &[float_t], float_t);
impl_floordiv(unifier, store, float_t, &[float_t, ndarray_float_t], None); impl_sign(unifier, store, float_t);
impl_mod(unifier, store, float_t, &[float_t, ndarray_float_t], None); impl_not(unifier, store, float_t);
impl_sign(unifier, store, float_t, Some(float_t)); impl_comparison(unifier, store, float_t, float_t);
impl_not(unifier, store, float_t, Some(bool_t)); impl_eq(unifier, store, float_t);
impl_comparison(unifier, store, float_t, &[float_t, ndarray_float_t], None);
impl_eq(unifier, store, float_t, &[float_t, ndarray_float_t], None);
/* bool ======== */ /* bool ======== */
let ndarray_bool_t = make_ndarray_ty(unifier, store, Some(bool_t), None); impl_not(unifier, store, bool_t);
impl_invert(unifier, store, bool_t, Some(int32_t)); impl_eq(unifier, store, bool_t);
impl_not(unifier, store, bool_t, Some(bool_t));
impl_sign(unifier, store, bool_t, Some(int32_t));
impl_eq(unifier, store, bool_t, &[bool_t, ndarray_bool_t], None);
/* ndarray ===== */
let ndarray_usized_ndims_tvar = unifier.get_fresh_const_generic_var(size_t, Some("ndarray_ndims".into()), None);
let ndarray_unsized_t = make_ndarray_ty(unifier, store, None, Some(ndarray_usized_ndims_tvar.0));
let (ndarray_dtype_t, _) = unpack_ndarray_var_tys(unifier, ndarray_t);
let (ndarray_unsized_dtype_t, _) = unpack_ndarray_var_tys(unifier, ndarray_unsized_t);
impl_basic_arithmetic(unifier, store, ndarray_t, &[ndarray_unsized_t, ndarray_unsized_dtype_t], None);
impl_pow(unifier, store, ndarray_t, &[ndarray_unsized_t, ndarray_unsized_dtype_t], None);
impl_div(unifier, store, ndarray_t, &[ndarray_t, ndarray_dtype_t], None);
impl_floordiv(unifier, store, ndarray_t, &[ndarray_unsized_t, ndarray_unsized_dtype_t], None);
impl_mod(unifier, store, ndarray_t, &[ndarray_unsized_t, ndarray_unsized_dtype_t], None);
impl_matmul(unifier, store, ndarray_t, &[ndarray_t], Some(ndarray_t));
impl_sign(unifier, store, ndarray_t, Some(ndarray_t));
impl_invert(unifier, store, ndarray_t, Some(ndarray_t));
impl_eq(unifier, store, ndarray_t, &[ndarray_unsized_t, ndarray_unsized_dtype_t], None);
impl_comparison(unifier, store, ndarray_t, &[ndarray_unsized_t, ndarray_unsized_dtype_t], None);
} }

View File

@ -43,18 +43,15 @@ pub struct TypeError {
} }
impl TypeError { impl TypeError {
#[must_use]
pub fn new(kind: TypeErrorKind, loc: Option<Location>) -> TypeError { pub fn new(kind: TypeErrorKind, loc: Option<Location>) -> TypeError {
TypeError { kind, loc } TypeError { kind, loc }
} }
#[must_use]
pub fn at(mut self, loc: Option<Location>) -> TypeError { pub fn at(mut self, loc: Option<Location>) -> TypeError {
self.loc = self.loc.or(loc); self.loc = self.loc.or(loc);
self self
} }
#[must_use]
pub fn to_display(self, unifier: &Unifier) -> DisplayTypeError { pub fn to_display(self, unifier: &Unifier) -> DisplayTypeError {
DisplayTypeError { err: self, unifier } DisplayTypeError { err: self, unifier }
} }
@ -67,8 +64,8 @@ pub struct DisplayTypeError<'a> {
fn loc_to_str(loc: Option<Location>) -> String { fn loc_to_str(loc: Option<Location>) -> String {
match loc { match loc {
Some(loc) => format!("(in {loc})"), Some(loc) => format!("(in {})", loc),
None => String::new(), None => "".to_string(),
} }
} }
@ -78,20 +75,21 @@ impl<'a> Display for DisplayTypeError<'a> {
let mut notes = Some(HashMap::new()); let mut notes = Some(HashMap::new());
match &self.err.kind { match &self.err.kind {
TooManyArguments { expected, got } => { TooManyArguments { expected, got } => {
write!(f, "Too many arguments. Expected {expected} but got {got}") write!(f, "Too many arguments. Expected {} but got {}", expected, got)
} }
MissingArgs(args) => { MissingArgs(args) => {
write!(f, "Missing arguments: {args}") write!(f, "Missing arguments: {}", args)
} }
UnknownArgName(name) => { UnknownArgName(name) => {
write!(f, "Unknown argument name: {name}") write!(f, "Unknown argument name: {}", name)
} }
IncorrectArgType { name, expected, got } => { IncorrectArgType { name, expected, got } => {
let expected = self.unifier.stringify_with_notes(*expected, &mut notes); let expected = self.unifier.stringify_with_notes(*expected, &mut notes);
let got = self.unifier.stringify_with_notes(*got, &mut notes); let got = self.unifier.stringify_with_notes(*got, &mut notes);
write!( write!(
f, f,
"Incorrect argument type for {name}. Expected {expected}, but got {got}" "Incorrect argument type for {}. Expected {}, but got {}",
name, expected, got
) )
} }
FieldUnificationError { field, types, loc } => { FieldUnificationError { field, types, loc } => {
@ -128,7 +126,7 @@ impl<'a> Display for DisplayTypeError<'a> {
); );
if let Some(loc) = loc { if let Some(loc) = loc {
result?; result?;
write!(f, " (in {loc})")?; write!(f, " (in {})", loc)?;
return Ok(()); return Ok(());
} }
result result
@ -138,12 +136,12 @@ impl<'a> Display for DisplayTypeError<'a> {
{ {
let t1 = self.unifier.stringify_with_notes(*t1, &mut notes); let t1 = self.unifier.stringify_with_notes(*t1, &mut notes);
let t2 = self.unifier.stringify_with_notes(*t2, &mut notes); let t2 = self.unifier.stringify_with_notes(*t2, &mut notes);
write!(f, "Tuple length mismatch: got {t1} and {t2}") write!(f, "Tuple length mismatch: got {} and {}", t1, t2)
} }
_ => { _ => {
let t1 = self.unifier.stringify_with_notes(*t1, &mut notes); let t1 = self.unifier.stringify_with_notes(*t1, &mut notes);
let t2 = self.unifier.stringify_with_notes(*t2, &mut notes); let t2 = self.unifier.stringify_with_notes(*t2, &mut notes);
write!(f, "Incompatible types: {t1} and {t2}") write!(f, "Incompatible types: {} and {}", t1, t2)
} }
} }
} }
@ -152,17 +150,18 @@ impl<'a> Display for DisplayTypeError<'a> {
write!(f, "Cannot assign to an element of a tuple") write!(f, "Cannot assign to an element of a tuple")
} else { } else {
let t = self.unifier.stringify_with_notes(*t, &mut notes); let t = self.unifier.stringify_with_notes(*t, &mut notes);
write!(f, "Cannot assign to field {name} of {t}, which is immutable") write!(f, "Cannot assign to field {} of {}, which is immutable", name, t)
} }
} }
NoSuchField(name, t) => { NoSuchField(name, t) => {
let t = self.unifier.stringify_with_notes(*t, &mut notes); let t = self.unifier.stringify_with_notes(*t, &mut notes);
write!(f, "`{t}::{name}` field/method does not exist") write!(f, "`{}::{}` field/method does not exist", t, name)
} }
TupleIndexOutOfBounds { index, len } => { TupleIndexOutOfBounds { index, len } => {
write!( write!(
f, f,
"Tuple index out of bounds. Got {index} but tuple has only {len} elements" "Tuple index out of bounds. Got {} but tuple has only {} elements",
index, len
) )
} }
RequiresTypeAnn => { RequiresTypeAnn => {
@ -173,13 +172,13 @@ impl<'a> Display for DisplayTypeError<'a> {
} }
}?; }?;
if let Some(loc) = self.err.loc { if let Some(loc) = self.err.loc {
write!(f, " at {loc}")?; write!(f, " at {}", loc)?;
} }
let notes = notes.unwrap(); let notes = notes.unwrap();
if !notes.is_empty() { if !notes.is_empty() {
write!(f, "\n\nNotes:")?; write!(f, "\n\nNotes:")?;
for line in notes.values() { for line in notes.values() {
write!(f, "\n {line}")?; write!(f, "\n {}", line)?;
} }
} }
Ok(()) Ok(())

File diff suppressed because it is too large Load Diff

View File

@ -3,10 +3,10 @@ use super::*;
use crate::{ use crate::{
codegen::CodeGenContext, codegen::CodeGenContext,
symbol_resolver::ValueEnum, symbol_resolver::ValueEnum,
toplevel::{DefinitionId, helper::PRIMITIVE_DEF_IDS, TopLevelDef}, toplevel::{DefinitionId, TopLevelDef},
}; };
use indoc::indoc; use indoc::indoc;
use std::iter::zip; use itertools::zip;
use nac3parser::parser::parse_program; use nac3parser::parser::parse_program;
use parking_lot::RwLock; use parking_lot::RwLock;
use test_case::test_case; use test_case::test_case;
@ -20,7 +20,7 @@ struct Resolver {
impl SymbolResolver for Resolver { impl SymbolResolver for Resolver {
fn get_default_param_value( fn get_default_param_value(
&self, &self,
_: &ast::Expr, _: &nac3parser::ast::Expr,
) -> Option<crate::symbol_resolver::SymbolValue> { ) -> Option<crate::symbol_resolver::SymbolValue> {
unimplemented!() unimplemented!()
} }
@ -43,9 +43,8 @@ impl SymbolResolver for Resolver {
unimplemented!() unimplemented!()
} }
fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, HashSet<String>> { fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, String> {
self.id_to_def.get(&id).cloned() self.id_to_def.get(&id).cloned().ok_or_else(|| "Unknown identifier".to_string())
.ok_or_else(|| HashSet::from(["Unknown identifier".to_string()]))
} }
fn get_string_id(&self, _: &str) -> i32 { fn get_string_id(&self, _: &str) -> i32 {
@ -63,7 +62,7 @@ struct TestEnvironment {
pub primitives: PrimitiveStore, pub primitives: PrimitiveStore,
pub id_to_name: HashMap<usize, StrRef>, pub id_to_name: HashMap<usize, StrRef>,
pub identifier_mapping: HashMap<StrRef, Type>, pub identifier_mapping: HashMap<StrRef, Type>,
pub virtual_checks: Vec<(Type, Type, Location)>, pub virtual_checks: Vec<(Type, Type, nac3parser::ast::Location)>,
pub calls: HashMap<CodeLocation, CallId>, pub calls: HashMap<CodeLocation, CallId>,
pub top_level: TopLevelContext, pub top_level: TopLevelContext,
} }
@ -73,77 +72,67 @@ impl TestEnvironment {
let mut unifier = Unifier::new(); let mut unifier = Unifier::new();
let int32 = unifier.add_ty(TypeEnum::TObj { let int32 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.int32, obj_id: DefinitionId(0),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
with_fields(&mut unifier, int32, |unifier, fields| { with_fields(&mut unifier, int32, |unifier, fields| {
let add_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature { let add_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature {
args: vec![FuncArg { name: "other".into(), ty: int32, default_value: None }], args: vec![FuncArg { name: "other".into(), ty: int32, default_value: None }],
ret: int32, ret: int32,
vars: VarMap::new(), vars: HashMap::new(),
})); }));
fields.insert("__add__".into(), (add_ty, false)); fields.insert("__add__".into(), (add_ty, false));
}); });
let int64 = unifier.add_ty(TypeEnum::TObj { let int64 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.int64, obj_id: DefinitionId(1),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let float = unifier.add_ty(TypeEnum::TObj { let float = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.float, obj_id: DefinitionId(2),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let bool = unifier.add_ty(TypeEnum::TObj { let bool = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.bool, obj_id: DefinitionId(3),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let none = unifier.add_ty(TypeEnum::TObj { let none = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.none, obj_id: DefinitionId(4),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let range = unifier.add_ty(TypeEnum::TObj { let range = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.range, obj_id: DefinitionId(5),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let str = unifier.add_ty(TypeEnum::TObj { let str = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.str, obj_id: DefinitionId(6),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let exception = unifier.add_ty(TypeEnum::TObj { let exception = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.exception, obj_id: DefinitionId(7),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let uint32 = unifier.add_ty(TypeEnum::TObj { let uint32 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.uint32, obj_id: DefinitionId(8),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let uint64 = unifier.add_ty(TypeEnum::TObj { let uint64 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.uint64, obj_id: DefinitionId(9),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let option = unifier.add_ty(TypeEnum::TObj { let option = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.option, obj_id: DefinitionId(10),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
});
let ndarray_dtype_tvar = unifier.get_fresh_var(Some("ndarray_dtype".into()), None);
let ndarray_ndims_tvar = unifier.get_fresh_const_generic_var(uint64, Some("ndarray_ndims".into()), None);
let ndarray = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.ndarray,
fields: HashMap::new(),
params: VarMap::from([
(ndarray_dtype_tvar.1, ndarray_dtype_tvar.0),
(ndarray_ndims_tvar.1, ndarray_ndims_tvar.0),
]),
}); });
let primitives = PrimitiveStore { let primitives = PrimitiveStore {
int32, int32,
@ -157,10 +146,7 @@ impl TestEnvironment {
uint32, uint32,
uint64, uint64,
option, option,
ndarray,
size_t: 64,
}; };
unifier.put_primitive_store(&primitives);
set_primitives_magic_methods(&primitives, &mut unifier); set_primitives_magic_methods(&primitives, &mut unifier);
let id_to_name = [ let id_to_name = [
@ -211,72 +197,67 @@ impl TestEnvironment {
let mut identifier_mapping = HashMap::new(); let mut identifier_mapping = HashMap::new();
let mut top_level_defs: Vec<Arc<RwLock<TopLevelDef>>> = Vec::new(); let mut top_level_defs: Vec<Arc<RwLock<TopLevelDef>>> = Vec::new();
let int32 = unifier.add_ty(TypeEnum::TObj { let int32 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.int32, obj_id: DefinitionId(0),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
with_fields(&mut unifier, int32, |unifier, fields| { with_fields(&mut unifier, int32, |unifier, fields| {
let add_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature { let add_ty = unifier.add_ty(TypeEnum::TFunc(FunSignature {
args: vec![FuncArg { name: "other".into(), ty: int32, default_value: None }], args: vec![FuncArg { name: "other".into(), ty: int32, default_value: None }],
ret: int32, ret: int32,
vars: VarMap::new(), vars: HashMap::new(),
})); }));
fields.insert("__add__".into(), (add_ty, false)); fields.insert("__add__".into(), (add_ty, false));
}); });
let int64 = unifier.add_ty(TypeEnum::TObj { let int64 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.int64, obj_id: DefinitionId(1),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let float = unifier.add_ty(TypeEnum::TObj { let float = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.float, obj_id: DefinitionId(2),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let bool = unifier.add_ty(TypeEnum::TObj { let bool = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.bool, obj_id: DefinitionId(3),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let none = unifier.add_ty(TypeEnum::TObj { let none = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.none, obj_id: DefinitionId(4),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let range = unifier.add_ty(TypeEnum::TObj { let range = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.range, obj_id: DefinitionId(5),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let str = unifier.add_ty(TypeEnum::TObj { let str = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.str, obj_id: DefinitionId(6),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let exception = unifier.add_ty(TypeEnum::TObj { let exception = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.exception, obj_id: DefinitionId(7),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let uint32 = unifier.add_ty(TypeEnum::TObj { let uint32 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.uint32, obj_id: DefinitionId(8),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let uint64 = unifier.add_ty(TypeEnum::TObj { let uint64 = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.uint64, obj_id: DefinitionId(9),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}); });
let option = unifier.add_ty(TypeEnum::TObj { let option = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.option, obj_id: DefinitionId(10),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
});
let ndarray = unifier.add_ty(TypeEnum::TObj {
obj_id: PRIMITIVE_DEF_IDS.ndarray,
fields: HashMap::new(),
params: VarMap::new(),
}); });
identifier_mapping.insert("None".into(), none); identifier_mapping.insert("None".into(), none);
for (i, name) in ["int32", "int64", "float", "bool", "none", "range", "str", "Exception"] for (i, name) in ["int32", "int64", "float", "bool", "none", "range", "str", "Exception"]
@ -312,18 +293,14 @@ impl TestEnvironment {
uint32, uint32,
uint64, uint64,
option, option,
ndarray,
size_t: 64,
}; };
unifier.put_primitive_store(&primitives);
let (v0, id) = unifier.get_dummy_var(); let (v0, id) = unifier.get_dummy_var();
let foo_ty = unifier.add_ty(TypeEnum::TObj { let foo_ty = unifier.add_ty(TypeEnum::TObj {
obj_id: DefinitionId(defs + 1), obj_id: DefinitionId(defs + 1),
fields: [("a".into(), (v0, true))].iter().cloned().collect::<HashMap<_, _>>(), fields: [("a".into(), (v0, true))].iter().cloned().collect::<HashMap<_, _>>(),
params: [(id, v0)].iter().cloned().collect::<VarMap>(), params: [(id, v0)].iter().cloned().collect::<HashMap<_, _>>(),
}); });
top_level_defs.push( top_level_defs.push(
RwLock::new(TopLevelDef::Class { RwLock::new(TopLevelDef::Class {

View File

@ -1,12 +1,10 @@
use itertools::{zip, Itertools};
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt::Display; use std::fmt::Display;
use std::rc::Rc; use std::rc::Rc;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::{borrow::Cow, collections::HashSet}; use std::{borrow::Cow, collections::HashSet};
use std::iter::zip;
use indexmap::IndexMap;
use itertools::Itertools;
use nac3parser::ast::{Location, StrRef}; use nac3parser::ast::{Location, StrRef};
@ -14,7 +12,6 @@ use super::type_error::{TypeError, TypeErrorKind};
use super::unification_table::{UnificationKey, UnificationTable}; use super::unification_table::{UnificationKey, UnificationTable};
use crate::symbol_resolver::SymbolValue; use crate::symbol_resolver::SymbolValue;
use crate::toplevel::{DefinitionId, TopLevelContext, TopLevelDef}; use crate::toplevel::{DefinitionId, TopLevelContext, TopLevelDef};
use crate::typecheck::type_inferencer::PrimitiveStore;
#[cfg(test)] #[cfg(test)]
mod test; mod test;
@ -26,10 +23,7 @@ pub type Type = UnificationKey;
pub struct CallId(pub(super) usize); pub struct CallId(pub(super) usize);
pub type Mapping<K, V = Type> = HashMap<K, V>; pub type Mapping<K, V = Type> = HashMap<K, V>;
pub type IndexMapping<K, V = Type> = IndexMap<K, V>; type VarMap = Mapping<u32>;
/// The mapping between type variable ID and [unifier type][`Type`].
pub type VarMap = IndexMapping<u32>;
#[derive(Clone)] #[derive(Clone)]
pub struct Call { pub struct Call {
@ -61,14 +55,13 @@ pub enum RecordKey {
} }
impl Type { impl Type {
/// Wrapper function for cleaner code so that we don't need to write this long pattern matching // a wrapper function for cleaner code so that we don't need to
/// just to get the field `obj_id`. // write this long pattern matching just to get the field `obj_id`
#[must_use] pub fn get_obj_id(self, unifier: &Unifier) -> DefinitionId {
pub fn obj_id(self, unifier: &Unifier) -> Option<DefinitionId> { if let TypeEnum::TObj { obj_id, .. } = unifier.get_ty_immutable(self).as_ref() {
if let TypeEnum::TObj { obj_id, .. } = &*unifier.get_ty_immutable(self) { *obj_id
Some(*obj_id)
} else { } else {
None unreachable!("expect a object type")
} }
} }
} }
@ -103,8 +96,8 @@ impl From<i32> for RecordKey {
impl Display for RecordKey { impl Display for RecordKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
RecordKey::Str(s) => write!(f, "{s}"), RecordKey::Str(s) => write!(f, "{}", s),
RecordKey::Int(i) => write!(f, "{i}"), RecordKey::Int(i) => write!(f, "{}", i),
} }
} }
} }
@ -117,13 +110,11 @@ pub struct RecordField {
} }
impl RecordField { impl RecordField {
#[must_use]
pub fn new(ty: Type, mutable: bool, loc: Option<Location>) -> RecordField { pub fn new(ty: Type, mutable: bool, loc: Option<Location>) -> RecordField {
RecordField { ty, mutable, loc } RecordField { ty, mutable, loc }
} }
} }
/// Category of variable and value types.
#[derive(Clone)] #[derive(Clone)]
pub enum TypeEnum { pub enum TypeEnum {
TRigidVar { TRigidVar {
@ -131,8 +122,6 @@ pub enum TypeEnum {
name: Option<StrRef>, name: Option<StrRef>,
loc: Option<Location>, loc: Option<Location>,
}, },
/// A type variable.
TVar { TVar {
id: u32, id: u32,
// empty indicates this is not a struct/tuple/list // empty indicates this is not a struct/tuple/list
@ -141,61 +130,30 @@ pub enum TypeEnum {
range: Vec<Type>, range: Vec<Type>,
name: Option<StrRef>, name: Option<StrRef>,
loc: Option<Location>, loc: Option<Location>,
/// Whether this type variable refers to a const-generic variable.
is_const_generic: bool,
}, },
/// A literal generic type matching `typing.Literal`.
TLiteral {
/// The value of the constant.
values: Vec<SymbolValue>,
loc: Option<Location>,
},
/// A tuple type.
TTuple { TTuple {
/// The types of elements present in this tuple.
ty: Vec<Type>, ty: Vec<Type>,
}, },
/// A list type.
TList { TList {
/// The type of elements present in this list.
ty: Type, ty: Type,
}, },
/// An object type.
TObj { TObj {
/// The [DefintionId] of this object type.
obj_id: DefinitionId, obj_id: DefinitionId,
/// The fields present in this object type.
///
/// The key of the [Mapping] is the identifier of the field, while the value is a tuple
/// containing the [Type] of the field, and a `bool` indicating whether the field is a
/// variable (as opposed to a function).
fields: Mapping<StrRef, (Type, bool)>, fields: Mapping<StrRef, (Type, bool)>,
/// Mapping between the ID of type variables and the [Type] representing the type variables
/// of this object type.
params: VarMap, params: VarMap,
}, },
TVirtual { TVirtual {
ty: Type, ty: Type,
}, },
TCall(Vec<CallId>), TCall(Vec<CallId>),
/// A function type.
TFunc(FunSignature), TFunc(FunSignature),
} }
impl TypeEnum { impl TypeEnum {
#[must_use]
pub fn get_type_name(&self) -> &'static str { pub fn get_type_name(&self) -> &'static str {
match self { match self {
TypeEnum::TRigidVar { .. } => "TRigidVar", TypeEnum::TRigidVar { .. } => "TRigidVar",
TypeEnum::TVar { .. } => "TVar", TypeEnum::TVar { .. } => "TVar",
TypeEnum::TLiteral { .. } => "TConstant",
TypeEnum::TTuple { .. } => "TTuple", TypeEnum::TTuple { .. } => "TTuple",
TypeEnum::TList { .. } => "TList", TypeEnum::TList { .. } => "TList",
TypeEnum::TObj { .. } => "TObj", TypeEnum::TObj { .. } => "TObj",
@ -215,8 +173,7 @@ pub struct Unifier {
pub(crate) calls: Vec<Rc<Call>>, pub(crate) calls: Vec<Rc<Call>>,
var_id: u32, var_id: u32,
unify_cache: HashSet<(Type, Type)>, unify_cache: HashSet<(Type, Type)>,
snapshot: Option<(usize, u32)>, snapshot: Option<(usize, u32)>
primitive_store: Option<PrimitiveStore>,
} }
impl Default for Unifier { impl Default for Unifier {
@ -227,7 +184,6 @@ impl Default for Unifier {
impl Unifier { impl Unifier {
/// Get an empty unifier /// Get an empty unifier
#[must_use]
pub fn new() -> Unifier { pub fn new() -> Unifier {
Unifier { Unifier {
unification_table: UnificationTable::new(), unification_table: UnificationTable::new(),
@ -236,27 +192,9 @@ impl Unifier {
unify_cache: HashSet::new(), unify_cache: HashSet::new(),
top_level: None, top_level: None,
snapshot: None, snapshot: None,
primitive_store: None,
} }
} }
/// Sets the [`PrimitiveStore`] instance within this `Unifier`.
///
/// This function can only be invoked once. Any subsequent invocations will result in an
/// assertion error.
pub fn put_primitive_store(&mut self, primitives: &PrimitiveStore) {
assert!(self.primitive_store.is_none());
self.primitive_store.replace(*primitives);
}
/// Returns the [`UnificationTable`] associated with this `Unifier`.
///
/// # Safety
///
/// The use of this function is discouraged under most circumstances. Only use this function if
/// in-place manipulation of type variables and/or type fields is necessary, otherwise prefer to
/// [add a new type][`Unifier::add_ty`] and [unify the type][`Unifier::unify`] with an existing
/// type.
pub unsafe fn get_unification_table(&mut self) -> &mut UnificationTable<Rc<TypeEnum>> { pub unsafe fn get_unification_table(&mut self) -> &mut UnificationTable<Rc<TypeEnum>> {
&mut self.unification_table &mut self.unification_table
} }
@ -275,11 +213,9 @@ impl Unifier {
top_level: None, top_level: None,
unify_cache: HashSet::new(), unify_cache: HashSet::new(),
snapshot: None, snapshot: None,
primitive_store: None,
} }
} }
#[must_use]
pub fn get_shared_unifier(&self) -> SharedUnifier { pub fn get_shared_unifier(&self) -> SharedUnifier {
Arc::new(Mutex::new(( Arc::new(Mutex::new((
self.unification_table.get_send(), self.unification_table.get_send(),
@ -289,7 +225,7 @@ impl Unifier {
} }
/// Register a type to the unifier. /// Register a type to the unifier.
/// Returns a key in the `unification_table`. /// Returns a key in the unification_table.
pub fn add_ty(&mut self, a: TypeEnum) -> Type { pub fn add_ty(&mut self, a: TypeEnum) -> Type {
self.unification_table.new_key(Rc::new(a)) self.unification_table.new_key(Rc::new(a))
} }
@ -303,7 +239,6 @@ impl Unifier {
fields: Some(fields), fields: Some(fields),
name: None, name: None,
loc: None, loc: None,
is_const_generic: false,
}) })
} }
@ -322,7 +257,6 @@ impl Unifier {
} }
} }
#[must_use]
pub fn get_call_signature_immutable(&self, id: CallId) -> Option<FunSignature> { pub fn get_call_signature_immutable(&self, id: CallId) -> Option<FunSignature> {
let fun = self.calls.get(id.0).unwrap().fun.borrow().unwrap(); let fun = self.calls.get(id.0).unwrap().fun.borrow().unwrap();
if let TypeEnum::TFunc(sign) = &*self.get_ty_immutable(fun) { if let TypeEnum::TFunc(sign) = &*self.get_ty_immutable(fun) {
@ -336,12 +270,11 @@ impl Unifier {
self.unification_table.get_representative(ty) self.unification_table.get_representative(ty)
} }
/// Get the `TypeEnum` of a type. /// Get the TypeEnum of a type.
pub fn get_ty(&mut self, a: Type) -> Rc<TypeEnum> { pub fn get_ty(&mut self, a: Type) -> Rc<TypeEnum> {
self.unification_table.probe_value(a).clone() self.unification_table.probe_value(a).clone()
} }
#[must_use]
pub fn get_ty_immutable(&self, a: Type) -> Rc<TypeEnum> { pub fn get_ty_immutable(&self, a: Type) -> Rc<TypeEnum> {
self.unification_table.probe_value_immutable(a).clone() self.unification_table.probe_value_immutable(a).clone()
} }
@ -360,16 +293,11 @@ impl Unifier {
self.get_fresh_var_with_range(&[], None, None) self.get_fresh_var_with_range(&[], None, None)
} }
/// Returns a fresh [type variable][TypeEnum::TVar] with no associated range.
///
/// This type variable can be instantiated by any type.
pub fn get_fresh_var(&mut self, name: Option<StrRef>, loc: Option<Location>) -> (Type, u32) { pub fn get_fresh_var(&mut self, name: Option<StrRef>, loc: Option<Location>) -> (Type, u32) {
self.get_fresh_var_with_range(&[], name, loc) self.get_fresh_var_with_range(&[], name, loc)
} }
/// Returns a fresh [type variable][TypeEnum::TVar] with the range specified by `range`. /// Get a fresh type variable.
///
/// This type variable can be instantiated by any type present in `range`.
pub fn get_fresh_var_with_range( pub fn get_fresh_var_with_range(
&mut self, &mut self,
range: &[Type], range: &[Type],
@ -379,32 +307,7 @@ impl Unifier {
let id = self.var_id + 1; let id = self.var_id + 1;
self.var_id += 1; self.var_id += 1;
let range = range.to_vec(); let range = range.to_vec();
(self.add_ty(TypeEnum::TVar { id, range, fields: None, name, loc, is_const_generic: false }), id) (self.add_ty(TypeEnum::TVar { id, range, fields: None, name, loc }), id)
}
/// Returns a fresh type representing a constant generic variable with the given underlying type `ty`.
pub fn get_fresh_const_generic_var(
&mut self,
ty: Type,
name: Option<StrRef>,
loc: Option<Location>,
) -> (Type, u32) {
let id = self.var_id + 1;
self.var_id += 1;
(self.add_ty(TypeEnum::TVar { id, range: vec![ty], fields: None, name, loc, is_const_generic: true }), id)
}
/// Returns a fresh type representing a [literal][TypeEnum::TConstant] with the given `values`.
pub fn get_fresh_literal(
&mut self,
values: Vec<SymbolValue>,
loc: Option<Location>,
) -> Type {
let ty_enum = TypeEnum::TLiteral {
values: values.into_iter().dedup().collect(),
loc
};
self.add_ty(ty_enum)
} }
/// Unification would not unify rigid variables with other types, but we want to do this for /// Unification would not unify rigid variables with other types, but we want to do this for
@ -423,7 +326,8 @@ impl Unifier {
Some( Some(
range range
.iter() .iter()
.flat_map(|ty| self.get_instantiations(*ty).unwrap_or_else(|| vec![*ty])) .map(|ty| self.get_instantiations(*ty).unwrap_or_else(|| vec![*ty]))
.flatten()
.collect_vec(), .collect_vec(),
) )
} }
@ -464,7 +368,7 @@ impl Unifier {
.map(|params| { .map(|params| {
self.subst( self.subst(
ty, ty,
&zip(keys.iter().copied(), params.iter().copied()).collect(), &zip(keys.iter().cloned(), params.iter().cloned()).collect(),
) )
.unwrap_or(ty) .unwrap_or(ty)
}) })
@ -479,21 +383,18 @@ impl Unifier {
pub fn is_concrete(&mut self, a: Type, allowed_typevars: &[Type]) -> bool { pub fn is_concrete(&mut self, a: Type, allowed_typevars: &[Type]) -> bool {
use TypeEnum::*; use TypeEnum::*;
match &*self.get_ty(a) { match &*self.get_ty(a) {
TRigidVar { .. } TRigidVar { .. } => true,
| TLiteral { .. }
// functions are instantiated for each call sites, so the function type can contain
// type variables.
| TFunc { .. } => true,
TVar { .. } => allowed_typevars.iter().any(|b| self.unification_table.unioned(a, *b)), TVar { .. } => allowed_typevars.iter().any(|b| self.unification_table.unioned(a, *b)),
TCall { .. } => false, TCall { .. } => false,
TList { ty } TList { ty } => self.is_concrete(*ty, allowed_typevars),
| TVirtual { ty } => self.is_concrete(*ty, allowed_typevars),
TTuple { ty } => ty.iter().all(|ty| self.is_concrete(*ty, allowed_typevars)), TTuple { ty } => ty.iter().all(|ty| self.is_concrete(*ty, allowed_typevars)),
TObj { params: vars, .. } => { TObj { params: vars, .. } => {
vars.values().all(|ty| self.is_concrete(*ty, allowed_typevars)) vars.values().all(|ty| self.is_concrete(*ty, allowed_typevars))
} }
// functions are instantiated for each call sites, so the function type can contain
// type variables.
TFunc { .. } => true,
TVirtual { ty } => self.is_concrete(*ty, allowed_typevars),
} }
} }
@ -523,12 +424,15 @@ impl Unifier {
} }
let Call { posargs, kwargs, ret, fun, loc } = call; let Call { posargs, kwargs, ret, fun, loc } = call;
let instantiated = self.instantiate_fun(b, signature); let instantiated = self.instantiate_fun(b, &*signature);
let r = self.get_ty(instantiated); let r = self.get_ty(instantiated);
let r = r.as_ref(); let r = r.as_ref();
let TypeEnum::TFunc(signature) = r else { let signature;
unreachable!() if let TypeEnum::TFunc(s) = &*r {
}; signature = s;
} else {
unreachable!();
}
// we check to make sure that all required arguments (those without default // we check to make sure that all required arguments (those without default
// arguments) are provided, and do not provide the same argument twice. // arguments) are provided, and do not provide the same argument twice.
let mut required = required.to_vec(); let mut required = required.to_vec();
@ -551,7 +455,7 @@ impl Unifier {
TypeError::new(TypeErrorKind::IncorrectArgType { name, expected, got: *t }, *loc) TypeError::new(TypeErrorKind::IncorrectArgType { name, expected, got: *t }, *loc)
})?; })?;
} }
for (k, t) in kwargs { for (k, t) in kwargs.iter() {
if let Some(i) = required.iter().position(|v| v == k) { if let Some(i) = required.iter().position(|v| v == k) {
required.remove(i); required.remove(i);
} }
@ -627,8 +531,8 @@ impl Unifier {
}; };
match (&*ty_a, &*ty_b) { match (&*ty_a, &*ty_b) {
( (
TVar { fields: fields1, id, name: name1, loc: loc1, is_const_generic: false, .. }, TVar { fields: fields1, id, name: name1, loc: loc1, .. },
TVar { fields: fields2, id: id2, name: name2, loc: loc2, is_const_generic: false, .. }, TVar { fields: fields2, id: id2, name: name2, loc: loc2, .. },
) => { ) => {
let new_fields = match (fields1, fields2) { let new_fields = match (fields1, fields2) {
(None, None) => None, (None, None) => None,
@ -638,7 +542,7 @@ impl Unifier {
} }
(Some(fields1), Some(fields2)) => { (Some(fields1), Some(fields2)) => {
let mut new_fields: Mapping<_, _> = fields2.clone(); let mut new_fields: Mapping<_, _> = fields2.clone();
for (key, val1) in fields1 { for (key, val1) in fields1.iter() {
if let Some(val2) = fields2.get(key) { if let Some(val2) = fields2.get(key) {
self.unify_impl(val1.ty, val2.ty, false).map_err(|_| { self.unify_impl(val1.ty, val2.ty, false).map_err(|_| {
TypeError::new( TypeError::new(
@ -667,9 +571,9 @@ impl Unifier {
}; };
let intersection = self let intersection = self
.get_intersection(a, b) .get_intersection(a, b)
.map_err(|()| TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None))? .map_err(|_| TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None))?
.unwrap(); .unwrap();
let range = if let TVar { range, .. } = &*self.get_ty(intersection) { let range = if let TypeEnum::TVar { range, .. } = &*self.get_ty(intersection) {
range.clone() range.clone()
} else { } else {
unreachable!() unreachable!()
@ -677,17 +581,16 @@ impl Unifier {
self.unification_table.unify(a, b); self.unification_table.unify(a, b);
self.unification_table.set_value( self.unification_table.set_value(
a, a,
Rc::new(TVar { Rc::new(TypeEnum::TVar {
id: name1.map_or(*id2, |_| *id), id: name1.map_or(*id2, |_| *id),
fields: new_fields, fields: new_fields,
range, range,
name: name1.or(*name2), name: name1.or(*name2),
loc: loc1.or(*loc2), loc: loc1.or(*loc2),
is_const_generic: false,
}), }),
); );
} }
(TVar { fields: None, range, is_const_generic: false, .. }, _) => { (TVar { fields: None, range, .. }, _) => {
// We check for the range of the type variable to see if unification is allowed. // We check for the range of the type variable to see if unification is allowed.
// Note that although b may be compatible with a, we may have to constrain type // Note that although b may be compatible with a, we may have to constrain type
// variables in b to make sure that instantiations of b would always be compatible // variables in b to make sure that instantiations of b would always be compatible
@ -704,9 +607,9 @@ impl Unifier {
self.unify_impl(x, b, false)?; self.unify_impl(x, b, false)?;
self.set_a_to_b(a, x); self.set_a_to_b(a, x);
} }
(TVar { fields: Some(fields), range, is_const_generic: false, .. }, TTuple { ty }) => { (TVar { fields: Some(fields), range, .. }, TTuple { ty }) => {
let len = i32::try_from(ty.len()).unwrap(); let len = ty.len() as i32;
for (k, v) in fields { for (k, v) in fields.iter() {
match *k { match *k {
RecordKey::Int(i) => { RecordKey::Int(i) => {
if v.mutable { if v.mutable {
@ -734,11 +637,11 @@ impl Unifier {
self.unify_impl(x, b, false)?; self.unify_impl(x, b, false)?;
self.set_a_to_b(a, x); self.set_a_to_b(a, x);
} }
(TVar { fields: Some(fields), range, is_const_generic: false, .. }, TList { ty }) => { (TVar { fields: Some(fields), range, .. }, TList { ty }) => {
for (k, v) in fields { for (k, v) in fields.iter() {
match *k { match *k {
RecordKey::Int(_) => { RecordKey::Int(_) => {
self.unify_impl(v.ty, *ty, false).map_err(|e| e.at(v.loc))?; self.unify_impl(v.ty, *ty, false).map_err(|e| e.at(v.loc))?
} }
RecordKey::Str(_) => { RecordKey::Str(_) => {
return Err(TypeError::new(TypeErrorKind::NoSuchField(*k, b), v.loc)) return Err(TypeError::new(TypeErrorKind::NoSuchField(*k, b), v.loc))
@ -749,81 +652,6 @@ impl Unifier {
self.unify_impl(x, b, false)?; self.unify_impl(x, b, false)?;
self.set_a_to_b(a, x); self.set_a_to_b(a, x);
} }
(TVar { id: id1, range: ty1, is_const_generic: true, .. }, TVar { id: id2, range: ty2, .. }) => {
let ty1 = ty1[0];
let ty2 = ty2[0];
if id1 != id2 {
self.unify_impl(ty1, ty2, false)?;
}
self.set_a_to_b(a, b);
}
(TVar { range: tys, is_const_generic: true, .. }, TLiteral { values, .. }) => {
assert_eq!(tys.len(), 1);
assert_eq!(values.len(), 1);
let primitives = &self.primitive_store
.expect("Expected PrimitiveStore to be present");
let ty = tys[0];
let value= &values[0];
let value_ty = value.get_type(primitives, self);
// If the types don't match, try to implicitly promote integers
if !self.unioned(ty, value_ty) {
let Ok(num_val) = i128::try_from(value.clone()) else {
return Self::incompatible_types(a, b)
};
let can_convert = if self.unioned(ty, primitives.int32) {
i32::try_from(num_val).is_ok()
} else if self.unioned(ty, primitives.int64) {
i64::try_from(num_val).is_ok()
} else if self.unioned(ty, primitives.uint32) {
u32::try_from(num_val).is_ok()
} else if self.unioned(ty, primitives.uint64) {
u64::try_from(num_val).is_ok()
} else {
false
};
if !can_convert {
return Self::incompatible_types(a, b)
}
}
self.set_a_to_b(a, b);
}
(TLiteral { values: val1, .. }, TLiteral { values: val2, .. }) => {
for (v1, v2) in zip(val1, val2) {
if v1 != v2 {
let symbol_value_to_int = |value: &SymbolValue| -> Option<i128> {
match value {
SymbolValue::I32(v) => Some(*v as i128),
SymbolValue::I64(v) => Some(*v as i128),
SymbolValue::U32(v) => Some(*v as i128),
SymbolValue::U64(v) => Some(*v as i128),
_ => None,
}
};
// Try performing integer promotion on literals
let v1i = symbol_value_to_int(v1);
let v2i = symbol_value_to_int(v2);
if v1i != v2i {
return Self::incompatible_types(a, b)
}
}
}
self.set_a_to_b(a, b);
}
(TTuple { ty: ty1 }, TTuple { ty: ty2 }) => { (TTuple { ty: ty1 }, TTuple { ty: ty2 }) => {
if ty1.len() != ty2.len() { if ty1.len() != ty2.len() {
return Err(TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None)); return Err(TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None));
@ -842,7 +670,7 @@ impl Unifier {
self.set_a_to_b(a, b); self.set_a_to_b(a, b);
} }
(TVar { fields: Some(map), range, .. }, TObj { fields, .. }) => { (TVar { fields: Some(map), range, .. }, TObj { fields, .. }) => {
for (k, field) in map { for (k, field) in map.iter() {
match *k { match *k {
RecordKey::Str(s) => { RecordKey::Str(s) => {
let (ty, mutable) = fields.get(&s).copied().ok_or_else(|| { let (ty, mutable) = fields.get(&s).copied().ok_or_else(|| {
@ -874,7 +702,7 @@ impl Unifier {
(TVar { fields: Some(map), range, .. }, TVirtual { ty }) => { (TVar { fields: Some(map), range, .. }, TVirtual { ty }) => {
let ty = self.get_ty(*ty); let ty = self.get_ty(*ty);
if let TObj { fields, .. } = ty.as_ref() { if let TObj { fields, .. } = ty.as_ref() {
for (k, field) in map { for (k, field) in map.iter() {
match *k { match *k {
RecordKey::Str(s) => { RecordKey::Str(s) => {
let (ty, _) = fields.get(&s).copied().ok_or_else(|| { let (ty, _) = fields.get(&s).copied().ok_or_else(|| {
@ -916,16 +744,9 @@ impl Unifier {
TObj { obj_id: id2, params: params2, .. }, TObj { obj_id: id2, params: params2, .. },
) => { ) => {
if id1 != id2 { if id1 != id2 {
Self::incompatible_types(a, b)?; self.incompatible_types(a, b)?;
} }
for (x, y) in zip(params1.values(), params2.values()) {
// Sort the type arguments by its UnificationKey first, since `HashMap::iter` visits
// all K-V pairs "in arbitrary order"
let (tv1, tv2) = (
params1.iter().map(|(_, v)| v).collect_vec(),
params2.iter().map(|(_, v)| v).collect_vec(),
);
for (x, y) in zip(tv1, tv2) {
if self.unify_impl(*x, *y, false).is_err() { if self.unify_impl(*x, *y, false).is_err() {
return Err(TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None)); return Err(TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None));
}; };
@ -941,7 +762,7 @@ impl Unifier {
(TCall(calls1), TCall(calls2)) => { (TCall(calls1), TCall(calls2)) => {
// we do not unify individual calls, instead we defer until the unification wtih a // we do not unify individual calls, instead we defer until the unification wtih a
// function definition. // function definition.
let calls = calls1.iter().chain(calls2.iter()).copied().collect(); let calls = calls1.iter().chain(calls2.iter()).cloned().collect();
self.set_a_to_b(a, b); self.set_a_to_b(a, b);
self.unification_table.set_value(b, Rc::new(TCall(calls))); self.unification_table.set_value(b, Rc::new(TCall(calls)));
} }
@ -954,7 +775,7 @@ impl Unifier {
.rev() .rev()
.collect(); .collect();
// we unify every calls to the function signature. // we unify every calls to the function signature.
for c in calls { for c in calls.iter() {
let call = self.calls[c.0].clone(); let call = self.calls[c.0].clone();
self.unify_call(&call, b, signature, &required)?; self.unify_call(&call, b, signature, &required)?;
} }
@ -986,10 +807,10 @@ impl Unifier {
} }
_ => { _ => {
if swapped { if swapped {
return Self::incompatible_types(a, b); return self.incompatible_types(a, b);
} else {
self.unify_impl(b, a, true)?;
} }
self.unify_impl(b, a, true)?;
} }
} }
Ok(()) Ok(())
@ -1009,18 +830,19 @@ impl Unifier {
ty, ty,
&mut |id| { &mut |id| {
top_level.as_ref().map_or_else( top_level.as_ref().map_or_else(
|| format!("{id}"), || format!("{}", id),
|top_level| { |top_level| {
let top_level_def = &top_level.definitions.read()[id]; if let TopLevelDef::Class { name, .. } =
let TopLevelDef::Class { name, .. } = &*top_level_def.read() else { &*top_level.definitions.read()[id].read()
{
name.to_string()
} else {
unreachable!("expected class definition") unreachable!("expected class definition")
}; }
name.to_string()
}, },
) )
}, },
&mut |id| format!("typevar{id}"), &mut |id| format!("typevar{}", id),
notes, notes,
) )
} }
@ -1063,7 +885,7 @@ impl Unifier {
if !range.is_empty() && notes.is_some() && !notes.as_ref().unwrap().contains_key(id) if !range.is_empty() && notes.is_some() && !notes.as_ref().unwrap().contains_key(id)
{ {
// just in case if there is any cyclic dependency // just in case if there is any cyclic dependency
notes.as_mut().unwrap().insert(*id, String::new()); notes.as_mut().unwrap().insert(*id, "".into());
let body = format!( let body = format!(
"{} ∈ {{{}}}", "{} ∈ {{{}}}",
n, n,
@ -1077,9 +899,6 @@ impl Unifier {
}; };
n n
} }
TypeEnum::TLiteral { values, .. } => {
format!("const({})", values.iter().map(|v| format!("{v:?}")).join(", "))
}
TypeEnum::TTuple { ty } => { TypeEnum::TTuple { ty } => {
let mut fields = let mut fields =
ty.iter().map(|v| self.internal_stringify(*v, obj_to_name, var_to_name, notes)); ty.iter().map(|v| self.internal_stringify(*v, obj_to_name, var_to_name, notes));
@ -1096,13 +915,15 @@ impl Unifier {
} }
TypeEnum::TObj { obj_id, params, .. } => { TypeEnum::TObj { obj_id, params, .. } => {
let name = obj_to_name(obj_id.0); let name = obj_to_name(obj_id.0);
if params.is_empty() { if !params.is_empty() {
name let params = params
} else {
let mut params = params
.iter() .iter()
.map(|(_, v)| self.internal_stringify(*v, obj_to_name, var_to_name, notes)); .map(|(_, v)| self.internal_stringify(*v, obj_to_name, var_to_name, notes));
// sort to preserve order
let mut params = params.sorted();
format!("{}[{}]", name, params.join(", ")) format!("{}[{}]", name, params.join(", "))
} else {
name
} }
} }
TypeEnum::TCall { .. } => "call".to_owned(), TypeEnum::TCall { .. } => "call".to_owned(),
@ -1128,20 +949,20 @@ impl Unifier {
}) })
.join(", "); .join(", ");
let ret = self.internal_stringify(signature.ret, obj_to_name, var_to_name, notes); let ret = self.internal_stringify(signature.ret, obj_to_name, var_to_name, notes);
format!("fn[[{params}], {ret}]") format!("fn[[{}], {}]", params, ret)
} }
} }
} }
/// Unifies `a` and `b` together, and set the value to the value of `b`.
fn set_a_to_b(&mut self, a: Type, b: Type) { fn set_a_to_b(&mut self, a: Type, b: Type) {
// unify a and b together, and set the value to b's value.
let table = &mut self.unification_table; let table = &mut self.unification_table;
let ty_b = table.probe_value(b).clone(); let ty_b = table.probe_value(b).clone();
table.unify(a, b); table.unify(a, b);
table.set_value(a, ty_b); table.set_value(a, ty_b)
} }
fn incompatible_types(a: Type, b: Type) -> Result<(), TypeError> { fn incompatible_types(&mut self, a: Type, b: Type) -> Result<(), TypeError> {
Err(TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None)) Err(TypeError::new(TypeErrorKind::IncompatibleTypes(a, b), None))
} }
@ -1151,7 +972,7 @@ impl Unifier {
fn instantiate_fun(&mut self, ty: Type, fun: &FunSignature) -> Type { fn instantiate_fun(&mut self, ty: Type, fun: &FunSignature) -> Type {
let mut instantiated = true; let mut instantiated = true;
let mut vars = Vec::new(); let mut vars = Vec::new();
for (k, v) in &fun.vars { for (k, v) in fun.vars.iter() {
if let TypeEnum::TVar { id, name, loc, range, .. } = if let TypeEnum::TVar { id, name, loc, range, .. } =
self.unification_table.probe_value(*v).as_ref() self.unification_table.probe_value(*v).as_ref()
{ {
@ -1205,8 +1026,8 @@ impl Unifier {
// variables, i.e. things like TRecord, TCall should not occur, and we // variables, i.e. things like TRecord, TCall should not occur, and we
// should be safe to not implement the substitution for those variants. // should be safe to not implement the substitution for those variants.
match &*ty { match &*ty {
TypeEnum::TRigidVar { .. } | TypeEnum::TLiteral { .. } => None, TypeEnum::TRigidVar { .. } => None,
TypeEnum::TVar { id, .. } => mapping.get(id).copied(), TypeEnum::TVar { id, .. } => mapping.get(id).cloned(),
TypeEnum::TTuple { ty } => { TypeEnum::TTuple { ty } => {
let mut new_ty = Cow::from(ty); let mut new_ty = Cow::from(ty);
for (i, t) in ty.iter().enumerate() { for (i, t) in ty.iter().enumerate() {
@ -1268,14 +1089,14 @@ impl Unifier {
} }
if new_params.is_some() || new_ret.is_some() || matches!(new_args, Cow::Owned(..)) { if new_params.is_some() || new_ret.is_some() || matches!(new_args, Cow::Owned(..)) {
let params = new_params.unwrap_or_else(|| params.clone()); let params = new_params.unwrap_or_else(|| params.clone());
let ret = new_ret.unwrap_or(*ret); let ret = new_ret.unwrap_or_else(|| *ret);
let args = new_args.into_owned(); let args = new_args.into_owned();
Some(self.add_ty(TypeEnum::TFunc(FunSignature { args, ret, vars: params }))) Some(self.add_ty(TypeEnum::TFunc(FunSignature { args, ret, vars: params })))
} else { } else {
None None
} }
} }
TypeEnum::TCall(_) => { _ => {
unreachable!("{} not expected", ty.get_type_name()) unreachable!("{} not expected", ty.get_type_name())
} }
} }
@ -1283,15 +1104,15 @@ impl Unifier {
fn subst_map<K>( fn subst_map<K>(
&mut self, &mut self,
map: &IndexMapping<K>, map: &Mapping<K>,
mapping: &VarMap, mapping: &VarMap,
cache: &mut HashMap<Type, Option<Type>>, cache: &mut HashMap<Type, Option<Type>>,
) -> Option<IndexMapping<K>> ) -> Option<Mapping<K>>
where where
K: std::hash::Hash + Eq + Clone, K: std::hash::Hash + std::cmp::Eq + std::clone::Clone,
{ {
let mut map2 = None; let mut map2 = None;
for (k, v) in map { for (k, v) in map.iter() {
if let Some(v1) = self.subst_impl(*v, mapping, cache) { if let Some(v1) = self.subst_impl(*v, mapping, cache) {
if map2.is_none() { if map2.is_none() {
map2 = Some(map.clone()); map2 = Some(map.clone());
@ -1309,10 +1130,10 @@ impl Unifier {
cache: &mut HashMap<Type, Option<Type>>, cache: &mut HashMap<Type, Option<Type>>,
) -> Option<Mapping<K, (Type, bool)>> ) -> Option<Mapping<K, (Type, bool)>>
where where
K: std::hash::Hash + Eq + Clone, K: std::hash::Hash + std::cmp::Eq + std::clone::Clone,
{ {
let mut map2 = None; let mut map2 = None;
for (k, (v, mutability)) in map { for (k, (v, mutability)) in map.iter() {
if let Some(v1) = self.subst_impl(*v, mapping, cache) { if let Some(v1) = self.subst_impl(*v, mapping, cache) {
if map2.is_none() { if map2.is_none() {
map2 = Some(map.clone()); map2 = Some(map.clone());
@ -1357,7 +1178,6 @@ impl Unifier {
range, range,
name: name2.or(*name), name: name2.or(*name),
loc: loc2.or(*loc), loc: loc2.or(*loc),
is_const_generic: false,
}; };
Ok(Some(self.unification_table.new_key(ty.into()))) Ok(Some(self.unification_table.new_key(ty.into())))
} }
@ -1368,7 +1188,7 @@ impl Unifier {
if range.is_empty() { if range.is_empty() {
Ok(Some(a)) Ok(Some(a))
} else { } else {
for v in range { for v in range.iter() {
let result = self.get_intersection(a, *v); let result = self.get_intersection(a, *v);
if let Ok(result) = result { if let Ok(result) = result {
return Ok(result.or(Some(a))); return Ok(result.or(Some(a)));
@ -1384,7 +1204,7 @@ impl Unifier {
.try_collect()?; .try_collect()?;
if ty.iter().any(Option::is_some) { if ty.iter().any(Option::is_some) {
Ok(Some(self.add_ty(TTuple { Ok(Some(self.add_ty(TTuple {
ty: zip(ty, ty1.iter()).map(|(a, b)| a.unwrap_or(*b)).collect(), ty: zip(ty.into_iter(), ty1.iter()).map(|(a, b)| a.unwrap_or(*b)).collect(),
}))) })))
} else { } else {
Ok(None) Ok(None)
@ -1410,7 +1230,7 @@ impl Unifier {
if range.is_empty() { if range.is_empty() {
return Ok(None); return Ok(None);
} }
for t in range { for t in range.iter() {
let result = self.get_intersection(*t, b); let result = self.get_intersection(*t, b);
if let Ok(result) = result { if let Ok(result) = result {
return Ok(result); return Ok(result);

View File

@ -40,14 +40,14 @@ impl Unifier {
TypeEnum::TObj { obj_id: id1, params: params1, .. }, TypeEnum::TObj { obj_id: id1, params: params1, .. },
TypeEnum::TObj { obj_id: id2, params: params2, .. }, TypeEnum::TObj { obj_id: id2, params: params2, .. },
) => id1 == id2 && self.map_eq(params1, params2), ) => id1 == id2 && self.map_eq(params1, params2),
// TLiteral, TCall and TFunc are not yet implemented // TCall and TFunc are not yet implemented
_ => false, _ => false,
} }
} }
fn map_eq<K>(&mut self, map1: &IndexMapping<K>, map2: &IndexMapping<K>) -> bool fn map_eq<K>(&mut self, map1: &Mapping<K>, map2: &Mapping<K>) -> bool
where where
K: std::hash::Hash + Eq + Clone K: std::hash::Hash + std::cmp::Eq + std::clone::Clone,
{ {
if map1.len() != map2.len() { if map1.len() != map2.len() {
return false; return false;
@ -62,7 +62,7 @@ impl Unifier {
fn map_eq2<K>(&mut self, map1: &Mapping<K, RecordField>, map2: &Mapping<K, RecordField>) -> bool fn map_eq2<K>(&mut self, map1: &Mapping<K, RecordField>, map2: &Mapping<K, RecordField>) -> bool
where where
K: std::hash::Hash + Eq + Clone, K: std::hash::Hash + std::cmp::Eq + std::clone::Clone,
{ {
if map1.len() != map2.len() { if map1.len() != map2.len() {
return false; return false;
@ -91,7 +91,7 @@ impl TestEnvironment {
unifier.add_ty(TypeEnum::TObj { unifier.add_ty(TypeEnum::TObj {
obj_id: DefinitionId(0), obj_id: DefinitionId(0),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}), }),
); );
type_mapping.insert( type_mapping.insert(
@ -99,7 +99,7 @@ impl TestEnvironment {
unifier.add_ty(TypeEnum::TObj { unifier.add_ty(TypeEnum::TObj {
obj_id: DefinitionId(1), obj_id: DefinitionId(1),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}), }),
); );
type_mapping.insert( type_mapping.insert(
@ -107,7 +107,7 @@ impl TestEnvironment {
unifier.add_ty(TypeEnum::TObj { unifier.add_ty(TypeEnum::TObj {
obj_id: DefinitionId(2), obj_id: DefinitionId(2),
fields: HashMap::new(), fields: HashMap::new(),
params: VarMap::new(), params: HashMap::new(),
}), }),
); );
let (v0, id) = unifier.get_dummy_var(); let (v0, id) = unifier.get_dummy_var();
@ -116,7 +116,7 @@ impl TestEnvironment {
unifier.add_ty(TypeEnum::TObj { unifier.add_ty(TypeEnum::TObj {
obj_id: DefinitionId(3), obj_id: DefinitionId(3),
fields: [("a".into(), (v0, true))].iter().cloned().collect::<HashMap<_, _>>(), fields: [("a".into(), (v0, true))].iter().cloned().collect::<HashMap<_, _>>(),
params: [(id, v0)].iter().cloned().collect::<VarMap>(), params: [(id, v0)].iter().cloned().collect::<HashMap<_, _>>(),
}), }),
); );
@ -139,7 +139,7 @@ impl TestEnvironment {
match &typ[..end] { match &typ[..end] {
"tuple" => { "tuple" => {
let mut s = &typ[end..]; let mut s = &typ[end..];
assert_eq!(&s[0..1], "["); assert!(&s[0..1] == "[");
let mut ty = Vec::new(); let mut ty = Vec::new();
while &s[0..1] != "]" { while &s[0..1] != "]" {
let result = self.internal_parse(&s[1..], mapping); let result = self.internal_parse(&s[1..], mapping);
@ -149,14 +149,14 @@ impl TestEnvironment {
(self.unifier.add_ty(TypeEnum::TTuple { ty }), &s[1..]) (self.unifier.add_ty(TypeEnum::TTuple { ty }), &s[1..])
} }
"list" => { "list" => {
assert_eq!(&typ[end..end + 1], "["); assert!(&typ[end..end + 1] == "[");
let (ty, s) = self.internal_parse(&typ[end + 1..], mapping); let (ty, s) = self.internal_parse(&typ[end + 1..], mapping);
assert_eq!(&s[0..1], "]"); assert!(&s[0..1] == "]");
(self.unifier.add_ty(TypeEnum::TList { ty }), &s[1..]) (self.unifier.add_ty(TypeEnum::TList { ty }), &s[1..])
} }
"Record" => { "Record" => {
let mut s = &typ[end..]; let mut s = &typ[end..];
assert_eq!(&s[0..1], "["); assert!(&s[0..1] == "[");
let mut fields = HashMap::new(); let mut fields = HashMap::new();
while &s[0..1] != "]" { while &s[0..1] != "]" {
let eq = s.find('=').unwrap(); let eq = s.find('=').unwrap();
@ -176,7 +176,7 @@ impl TestEnvironment {
let te = self.unifier.get_ty(ty); let te = self.unifier.get_ty(ty);
if let TypeEnum::TObj { params, .. } = &*te.as_ref() { if let TypeEnum::TObj { params, .. } = &*te.as_ref() {
if !params.is_empty() { if !params.is_empty() {
assert_eq!(&s[0..1], "["); assert!(&s[0..1] == "[");
let mut p = Vec::new(); let mut p = Vec::new();
while &s[0..1] != "]" { while &s[0..1] != "]" {
let result = self.internal_parse(&s[1..], mapping); let result = self.internal_parse(&s[1..], mapping);
@ -339,21 +339,23 @@ fn test_recursive_subst() {
let int = *env.type_mapping.get("int").unwrap(); let int = *env.type_mapping.get("int").unwrap();
let foo_id = *env.type_mapping.get("Foo").unwrap(); let foo_id = *env.type_mapping.get("Foo").unwrap();
let foo_ty = env.unifier.get_ty(foo_id); let foo_ty = env.unifier.get_ty(foo_id);
let mapping: HashMap<_, _>;
with_fields(&mut env.unifier, foo_id, |_unifier, fields| { with_fields(&mut env.unifier, foo_id, |_unifier, fields| {
fields.insert("rec".into(), (foo_id, true)); fields.insert("rec".into(), (foo_id, true));
}); });
let TypeEnum::TObj { params, .. } = &*foo_ty else { if let TypeEnum::TObj { params, .. } = &*foo_ty {
mapping = params.iter().map(|(id, _)| (*id, int)).collect();
} else {
unreachable!() unreachable!()
}; }
let mapping = params.iter().map(|(id, _)| (*id, int)).collect();
let instantiated = env.unifier.subst(foo_id, &mapping).unwrap(); let instantiated = env.unifier.subst(foo_id, &mapping).unwrap();
let instantiated_ty = env.unifier.get_ty(instantiated); let instantiated_ty = env.unifier.get_ty(instantiated);
if let TypeEnum::TObj { fields, .. } = &*instantiated_ty {
let TypeEnum::TObj { fields, .. } = &*instantiated_ty else { assert!(env.unifier.unioned(fields.get(&"a".into()).unwrap().0, int));
assert!(env.unifier.unioned(fields.get(&"rec".into()).unwrap().0, instantiated));
} else {
unreachable!() unreachable!()
}; }
assert!(env.unifier.unioned(fields.get(&"a".into()).unwrap().0, int));
assert!(env.unifier.unioned(fields.get(&"rec".into()).unwrap().0, instantiated));
} }
#[test] #[test]
@ -363,7 +365,7 @@ fn test_virtual() {
let fun = env.unifier.add_ty(TypeEnum::TFunc(FunSignature { let fun = env.unifier.add_ty(TypeEnum::TFunc(FunSignature {
args: vec![], args: vec![],
ret: int, ret: int,
vars: VarMap::new(), vars: HashMap::new(),
})); }));
let bar = env.unifier.add_ty(TypeEnum::TObj { let bar = env.unifier.add_ty(TypeEnum::TObj {
obj_id: DefinitionId(5), obj_id: DefinitionId(5),
@ -371,7 +373,7 @@ fn test_virtual() {
.iter() .iter()
.cloned() .cloned()
.collect::<HashMap<StrRef, _>>(), .collect::<HashMap<StrRef, _>>(),
params: VarMap::new(), params: HashMap::new(),
}); });
let v0 = env.unifier.get_dummy_var().0; let v0 = env.unifier.get_dummy_var().0;
let v1 = env.unifier.get_dummy_var().0; let v1 = env.unifier.get_dummy_var().0;

View File

@ -2,7 +2,7 @@
name = "nac3ld" name = "nac3ld"
version = "0.1.0" version = "0.1.0"
authors = ["M-Labs"] authors = ["M-Labs"]
edition = "2021" edition = "2018"
[dependencies] [dependencies]
byteorder = { version = "1.5", default-features = false } byteorder = { version = "1.4", default-features = false }

View File

@ -27,33 +27,20 @@ pub const DW_EH_PE_indirect: u8 = 0x80;
pub struct DwarfReader<'a> { pub struct DwarfReader<'a> {
pub slice: &'a [u8], pub slice: &'a [u8],
pub virt_addr: u32, pub virt_addr: u32,
base_slice: &'a [u8],
base_virt_addr: u32,
} }
impl<'a> DwarfReader<'a> { impl<'a> DwarfReader<'a> {
pub fn new(slice: &[u8], virt_addr: u32) -> DwarfReader { pub fn new(slice: &[u8], virt_addr: u32) -> DwarfReader {
DwarfReader { slice, virt_addr, base_slice: slice, base_virt_addr: virt_addr } DwarfReader { slice, virt_addr }
} }
/// Creates a new instance from another instance of [DwarfReader], optionally removing any pub fn offset(&mut self, offset: i32) {
/// offsets previously applied to the other instance.
pub fn from_reader(other: &DwarfReader<'a>, reset_offset: bool) -> DwarfReader<'a> {
if reset_offset {
DwarfReader::new(other.base_slice, other.base_virt_addr)
} else {
DwarfReader::new(other.slice, other.virt_addr)
}
}
pub fn offset(&mut self, offset: u32) {
self.slice = &self.slice[offset as usize..]; self.slice = &self.slice[offset as usize..];
self.virt_addr = self.virt_addr.wrapping_add(offset); self.virt_addr = self.virt_addr.wrapping_add(offset as u32);
} }
/// ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable Length Data" of the // ULEB128 and SLEB128 encodings are defined in Section 7.6 - "Variable
/// [DWARF-4 Manual](https://dwarfstd.org/doc/DWARF4.pdf). // Length Data".
pub fn read_uleb128(&mut self) -> u64 { pub fn read_uleb128(&mut self) -> u64 {
let mut shift: usize = 0; let mut shift: usize = 0;
let mut result: u64 = 0; let mut result: u64 = 0;
@ -83,7 +70,7 @@ impl<'a> DwarfReader<'a> {
} }
// sign-extend // sign-extend
if shift < u64::BITS && (byte & 0x40) != 0 { if shift < u64::BITS && (byte & 0x40) != 0 {
result |= (!0u64) << shift; result |= (!0 as u64) << shift;
} }
result as i64 result as i64
} }
@ -213,69 +200,38 @@ fn round_up(unrounded: usize, align: usize) -> Result<usize, ()> {
} }
} }
/// Minimalistic structure to store everything needed for parsing FDEs to synthesize `.eh_frame_hdr` // Minimalistic structure to store everything needed for parsing FDEs to synthesize
/// section. // .eh_frame_hdr section. Since we are only linking 1 object file, there should only be 1 call
/// // frame information (CFI) record, so there should be only 1 common information entry (CIE).
/// Refer to [The Linux Standard Base Core Specification, Generic Part](https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html) // So the class parses the only CIE on init, cache the encoding info, then parse the FDE on
/// for more information. // iterations based on the cached encoding format.
pub struct EH_Frame<'a> { pub struct EH_Frame<'a> {
reader: DwarfReader<'a>, // It refers to the augmentation data that corresponds to 'R' in the augmentation string
pub fde_pointer_encoding: u8,
pub fde_reader: DwarfReader<'a>,
pub fde_sz: usize,
} }
impl<'a> EH_Frame<'a> { impl<'a> EH_Frame<'a> {
/// Creates an [EH_Frame] using the bytes in the `.eh_frame` section and its address in the ELF
/// file.
pub fn new(eh_frame_slice: &[u8], eh_frame_addr: u32) -> Result<EH_Frame, ()> { pub fn new(eh_frame_slice: &[u8], eh_frame_addr: u32) -> Result<EH_Frame, ()> {
Ok(EH_Frame { reader: DwarfReader::new(eh_frame_slice, eh_frame_addr) }) let mut cie_reader = DwarfReader::new(eh_frame_slice, eh_frame_addr);
} let eh_frame_size = eh_frame_slice.len();
/// Returns an [Iterator] over all Call Frame Information (CFI) records.
pub fn cfi_records(&self) -> CFI_Records<'a> {
let reader = DwarfReader::from_reader(&self.reader, true);
let len = reader.slice.len();
CFI_Records {
reader,
available: len,
}
}
}
/// A single Call Frame Information (CFI) record.
///
/// From the [specification](https://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html):
///
/// > Each CFI record contains a Common Information Entry (CIE) record followed by 1 or more Frame
/// Description Entry (FDE) records.
pub struct CFI_Record<'a> {
// It refers to the augmentation data that corresponds to 'R' in the augmentation string
fde_pointer_encoding: u8,
fde_reader: DwarfReader<'a>,
}
impl<'a> CFI_Record<'a> {
pub fn from_reader(cie_reader: &mut DwarfReader<'a>) -> Result<CFI_Record<'a>, ()> {
let length = cie_reader.read_u32(); let length = cie_reader.read_u32();
let fde_reader = match length { let fde_reader = match length {
// eh_frame with 0 lengths means the CIE is terminated // eh_frame with 0 lengths means the CIE is terminated
0 => panic!("Cannot create an EH_Frame from a termination CIE"), // while length == u32::MAX means that the length is only representable with 64 bits,
// length == u32::MAX means that the length is only representable with 64 bits,
// which does not make sense in a system with 32-bit address. // which does not make sense in a system with 32-bit address.
0xFFFFFFFF => unimplemented!(), 0 | 0xFFFFFFFF => unimplemented!(),
_ => { _ => {
let mut fde_reader = DwarfReader::from_reader(cie_reader, false); let mut fde_reader = DwarfReader::new(cie_reader.slice, cie_reader.virt_addr);
fde_reader.offset(length); fde_reader.offset(length as i32);
fde_reader fde_reader
} }
}; };
let fde_sz = eh_frame_size - mem::size_of::<u32>() - length as usize;
// Routine check on the .eh_frame well-formness, in terms of CIE ID & Version args. // Routine check on the .eh_frame well-formness, in terms of CIE ID & Version args.
let cie_ptr = cie_reader.read_u32(); assert_eq!(cie_reader.read_u32(), 0);
assert_eq!(cie_ptr, 0);
assert_eq!(cie_reader.read_u8(), 1); assert_eq!(cie_reader.read_u8(), 1);
// Parse augmentation string // Parse augmentation string
@ -286,7 +242,7 @@ impl<'a> CFI_Record<'a> {
// Skip code/data alignment factors & return address register along the way as well // Skip code/data alignment factors & return address register along the way as well
// We only tackle the case where 'z' and 'R' are part of the augmentation string, otherwise // We only tackle the case where 'z' and 'R' are part of the augmentation string, otherwise
// we cannot get the addresses to make .eh_frame_hdr // we cannot get the addresses to make .eh_frame_hdr
let mut aug_data_reader = DwarfReader::from_reader(cie_reader, false); let mut aug_data_reader = DwarfReader::new(cie_reader.slice, cie_reader.virt_addr);
let mut aug_str_len = 0; let mut aug_str_len = 0;
loop { loop {
if aug_data_reader.read_u8() == b'\0' { if aug_data_reader.read_u8() == b'\0' {
@ -323,121 +279,43 @@ impl<'a> CFI_Record<'a> {
} }
assert_ne!(fde_pointer_encoding, DW_EH_PE_omit); assert_ne!(fde_pointer_encoding, DW_EH_PE_omit);
Ok(CFI_Record { Ok(EH_Frame { fde_pointer_encoding, fde_reader, fde_sz })
fde_pointer_encoding,
fde_reader,
})
} }
/// Returns a [DwarfReader] initialized to the first Frame Description Entry (FDE) of this CFI pub fn iterate_fde(&self, callback: &mut dyn FnMut(u32, u32)) -> Result<(), ()> {
/// record.
pub fn get_fde_reader(&self) -> DwarfReader<'a> {
DwarfReader::from_reader(&self.fde_reader, true)
}
/// Returns an [Iterator] over all Frame Description Entries (FDEs).
pub fn fde_records(&self) -> FDE_Records<'a> {
let reader = self.get_fde_reader();
let len = reader.slice.len();
FDE_Records {
pointer_encoding: self.fde_pointer_encoding,
reader,
available: len,
}
}
}
/// [Iterator] over Call Frame Information (CFI) records in an
/// [Exception Handling (EH) frame][EH_Frame].
pub struct CFI_Records<'a> {
reader: DwarfReader<'a>,
available: usize,
}
impl<'a> Iterator for CFI_Records<'a> {
type Item = CFI_Record<'a>;
fn next(&mut self) -> Option<Self::Item> {
loop {
if self.available == 0 {
return None;
}
let mut this_reader = DwarfReader::from_reader(&self.reader, false);
// Remove the length of the header and the content from the counter
let length = self.reader.read_u32();
let length = match length {
// eh_frame with 0-length means the CIE is terminated
0 => return None,
0xFFFFFFFF => unimplemented!("CIE entries larger than 4 bytes not supported"),
other => other,
} as usize;
// Remove the length of the header and the content from the counter
self.available -= length + mem::size_of::<u32>();
let mut next_reader = DwarfReader::from_reader(&self.reader, false);
next_reader.offset(length as u32);
let cie_ptr = self.reader.read_u32();
self.reader = next_reader;
// Skip this record if it is a FDE
if cie_ptr == 0 {
// Rewind back to the start of the CFI Record
return Some(CFI_Record::from_reader(&mut this_reader).ok().unwrap())
}
}
}
}
/// [Iterator] over Frame Description Entries (FDEs) in an
/// [Exception Handling (EH) frame][EH_Frame].
pub struct FDE_Records<'a> {
pointer_encoding: u8,
reader: DwarfReader<'a>,
available: usize,
}
impl<'a> Iterator for FDE_Records<'a> {
type Item = (u32, u32);
fn next(&mut self) -> Option<Self::Item> {
// Parse each FDE to obtain the starting address that the FDE applies to // Parse each FDE to obtain the starting address that the FDE applies to
// Send the FDE offset and the mentioned address to a callback that write up the // Send the FDE offset and the mentioned address to a callback that write up the
// .eh_frame_hdr section // .eh_frame_hdr section
let mut remaining_len = self.fde_sz;
let mut reader = DwarfReader::new(self.fde_reader.slice, self.fde_reader.virt_addr);
loop {
if remaining_len == 0 {
break;
}
if self.available == 0 { let fde_virt_addr = reader.virt_addr;
return None; let length = match reader.read_u32() {
0 | 0xFFFFFFFF => unimplemented!(),
other => other,
};
// Remove the length of the header and the content from the counter
remaining_len -= length as usize + mem::size_of::<u32>();
let mut next_fde_reader = DwarfReader::new(reader.slice, reader.virt_addr);
next_fde_reader.offset(length as i32);
// Skip CIE pointer offset
reader.read_u32();
// Parse PC Begin using the encoding scheme mentioned in the CIE
let pc_begin = read_encoded_pointer_with_pc(&mut reader, self.fde_pointer_encoding)?;
callback(pc_begin as u32, fde_virt_addr);
reader = next_fde_reader;
} }
// Remove the length of the header and the content from the counter Ok(())
let length = match self.reader.read_u32() {
// eh_frame with 0-length means the CIE is terminated
0 => return None,
0xFFFFFFFF => unimplemented!("CIE entries larger than 4 bytes not supported"),
other => other,
} as usize;
// Remove the length of the header and the content from the counter
self.available -= length + mem::size_of::<u32>();
let mut next_fde_reader = DwarfReader::from_reader(&self.reader, false);
next_fde_reader.offset(length as u32);
let cie_ptr = self.reader.read_u32();
let next_val = if cie_ptr != 0 {
let pc_begin = read_encoded_pointer_with_pc(&mut self.reader, self.pointer_encoding)
.expect("Failed to read PC Begin");
Some((pc_begin as u32, self.reader.virt_addr))
} else {
None
};
self.reader = next_fde_reader;
next_val
} }
} }
@ -448,32 +326,29 @@ pub struct EH_Frame_Hdr<'a> {
} }
impl<'a> EH_Frame_Hdr<'a> { impl<'a> EH_Frame_Hdr<'a> {
// Create a EH_Frame_Hdr object, and write out the fixed fields of .eh_frame_hdr to memory
/// Create a [EH_Frame_Hdr] object, and write out the fixed fields of `.eh_frame_hdr` to memory. // eh_frame_ptr_enc will be 0x1B (PC-relative, 4 bytes)
/// // table_enc will be 0x3B (Relative to the start of .eh_frame_hdr, 4 bytes)
/// Load address is not known at this point. // Load address is not known at this point.
pub fn new( pub fn new(
eh_frame_hdr_slice: &mut [u8], eh_frame_hdr_slice: &mut [u8],
eh_frame_hdr_addr: u32, eh_frame_hdr_addr: u32,
eh_frame_addr: u32, eh_frame_addr: u32,
) -> EH_Frame_Hdr { ) -> EH_Frame_Hdr {
let mut writer = DwarfWriter::new(eh_frame_hdr_slice); let mut writer = DwarfWriter::new(eh_frame_hdr_slice);
writer.write_u8(1);
writer.write_u8(0x1B);
writer.write_u8(0x03);
writer.write_u8(0x3B);
writer.write_u8(1); // version let eh_frame_offset =
writer.write_u8(0x1B); // eh_frame_ptr_enc - PC-relative 4-byte signed value (eh_frame_addr).wrapping_sub(eh_frame_hdr_addr + ((mem::size_of::<u8>() as u32) * 4));
writer.write_u8(0x03); // fde_count_enc - 4-byte unsigned value writer.write_u32(eh_frame_offset);
writer.write_u8(0x3B); // table_enc - .eh_frame_hdr section-relative 4-byte signed value writer.write_u32(0);
let eh_frame_offset = eh_frame_addr
.wrapping_sub(eh_frame_hdr_addr + writer.offset as u32 + ((mem::size_of::<u8>() as u32) * 4));
writer.write_u32(eh_frame_offset); // eh_frame_ptr
writer.write_u32(0); // `fde_count`, will be written in finalize_fde
EH_Frame_Hdr { fde_writer: writer, eh_frame_hdr_addr, fdes: Vec::new() } EH_Frame_Hdr { fde_writer: writer, eh_frame_hdr_addr, fdes: Vec::new() }
} }
/// The offset of the `fde_count` value relative to the start of the `.eh_frame_hdr` section in
/// bytes.
fn fde_count_offset() -> usize { fn fde_count_offset() -> usize {
8 8
} }
@ -507,17 +382,11 @@ impl<'a> EH_Frame_Hdr<'a> {
if entry_length == 0 || entry_length == 0xFFFFFFFF { if entry_length == 0 || entry_length == 0xFFFFFFFF {
unimplemented!() unimplemented!()
} }
if reader.read_u32() != 0 {
// This slot stores the CIE ID (for CIE)/CIE Pointer (for FDE).
// This value must be non-zero for FDEs.
let cie_ptr = reader.read_u32();
if cie_ptr != 0 {
fde_count += 1; fde_count += 1;
} }
reader.offset(entry_length as i32 - mem::size_of::<u32>() as i32)
reader.offset(entry_length - mem::size_of::<u32>() as u32)
} }
12 + fde_count * 8 12 + fde_count * 8
} }
} }

View File

@ -10,7 +10,7 @@ pub const EI_MAG2: usize = 2;
pub const ELFMAG2: u8 = b'L'; pub const ELFMAG2: u8 = b'L';
pub const EI_MAG3: usize = 3; pub const EI_MAG3: usize = 3;
pub const ELFMAG3: u8 = b'F'; pub const ELFMAG3: u8 = b'F';
pub const ELFMAG: &[u8; 5usize] = b"\x7fELF\x00"; pub const ELFMAG: &'static [u8; 5usize] = b"\x7fELF\x00";
pub const SELFMAG: usize = 4; pub const SELFMAG: usize = 4;
pub const EI_CLASS: usize = 4; pub const EI_CLASS: usize = 4;
pub const ELFCLASSNONE: u8 = 0; pub const ELFCLASSNONE: u8 = 0;
@ -428,8 +428,8 @@ pub const VER_NDX_ELIMINATE: usize = 65281;
pub const VER_NEED_NONE: usize = 0; pub const VER_NEED_NONE: usize = 0;
pub const VER_NEED_CURRENT: usize = 1; pub const VER_NEED_CURRENT: usize = 1;
pub const VER_NEED_NUM: usize = 2; pub const VER_NEED_NUM: usize = 2;
pub const ELF_NOTE_SOLARIS: &[u8; 13usize] = b"SUNW Solaris\x00"; pub const ELF_NOTE_SOLARIS: &'static [u8; 13usize] = b"SUNW Solaris\x00";
pub const ELF_NOTE_GNU: &[u8; 4usize] = b"GNU\x00"; pub const ELF_NOTE_GNU: &'static [u8; 4usize] = b"GNU\x00";
pub const ELF_NOTE_PAGESIZE_HINT: usize = 1; pub const ELF_NOTE_PAGESIZE_HINT: usize = 1;
pub const NT_GNU_ABI_TAG: usize = 1; pub const NT_GNU_ABI_TAG: usize = 1;
pub const ELF_NOTE_ABI: usize = 1; pub const ELF_NOTE_ABI: usize = 1;

View File

@ -1,7 +1,7 @@
use dwarf::*; use dwarf::*;
use elf::*; use elf::*;
use std::collections::HashMap; use std::collections::HashMap;
use std::{mem, ptr, slice, str}; use std::{convert, mem, ptr, slice, str};
extern crate byteorder; extern crate byteorder;
use byteorder::{ByteOrder, LittleEndian}; use byteorder::{ByteOrder, LittleEndian};
@ -21,7 +21,7 @@ pub enum Error {
Lookup(&'static str), Lookup(&'static str),
} }
impl From<&'static str> for Error { impl convert::From<&'static str> for Error {
fn from(desc: &'static str) -> Error { fn from(desc: &'static str) -> Error {
Error::Parsing(desc) Error::Parsing(desc)
} }
@ -74,7 +74,7 @@ fn read_unaligned<T: Copy>(data: &[u8], offset: usize) -> Result<T, ()> {
if data.len() < offset + mem::size_of::<T>() { if data.len() < offset + mem::size_of::<T>() {
Err(()) Err(())
} else { } else {
let ptr = data.as_ptr().wrapping_add(offset) as *const T; let ptr = data.as_ptr().wrapping_offset(offset as isize) as *const T;
Ok(unsafe { ptr::read_unaligned(ptr) }) Ok(unsafe { ptr::read_unaligned(ptr) })
} }
} }
@ -83,7 +83,7 @@ pub fn get_ref_slice<T: Copy>(data: &[u8], offset: usize, len: usize) -> Result<
if data.len() < offset + mem::size_of::<T>() * len { if data.len() < offset + mem::size_of::<T>() * len {
Err(()) Err(())
} else { } else {
let ptr = data.as_ptr().wrapping_add(offset) as *const T; let ptr = data.as_ptr().wrapping_offset(offset as isize) as *const T;
Ok(unsafe { slice::from_raw_parts(ptr, len) }) Ok(unsafe { slice::from_raw_parts(ptr, len) })
} }
} }
@ -177,7 +177,7 @@ impl<'a> Linker<'a> {
} }
fn load_section(&mut self, shdr: &Elf32_Shdr, sh_name_str: &'a str, data: Vec<u8>) -> usize { fn load_section(&mut self, shdr: &Elf32_Shdr, sh_name_str: &'a str, data: Vec<u8>) -> usize {
let mut elf_shdr = *shdr; let mut elf_shdr = shdr.clone();
// Maintain alignment requirement specified in sh_addralign // Maintain alignment requirement specified in sh_addralign
let align = shdr.sh_addralign; let align = shdr.sh_addralign;
@ -207,7 +207,7 @@ impl<'a> Linker<'a> {
STN_UNDEF => None, STN_UNDEF => None,
sym_index => Some( sym_index => Some(
self.symtab self.symtab
.get(sym_index) .get(sym_index as usize)
.ok_or("symbol out of bounds of symbol table")?, .ok_or("symbol out of bounds of symbol table")?,
), ),
}; };
@ -240,7 +240,7 @@ impl<'a> Linker<'a> {
let get_target_section_index = || -> Result<usize, Error> { let get_target_section_index = || -> Result<usize, Error> {
self.section_map self.section_map
.get(&(target_section as usize)) .get(&(target_section as usize))
.copied() .map(|&index| index)
.ok_or(Error::Parsing("Cannot find section with matching sh_index")) .ok_or(Error::Parsing("Cannot find section with matching sh_index"))
}; };
@ -314,9 +314,13 @@ impl<'a> Linker<'a> {
R_RISCV_PCREL_LO12_I => { R_RISCV_PCREL_LO12_I => {
let expected_offset = sym_option.map_or(0, |sym| sym.st_value); let expected_offset = sym_option.map_or(0, |sym| sym.st_value);
let indirect_reloc = relocs let indirect_reloc = if let Some(reloc) =
.iter() relocs.iter().find(|reloc| reloc.offset() == expected_offset)
.find(|reloc| reloc.offset() == expected_offset)?; {
reloc
} else {
return None;
};
Some(RelocInfo { Some(RelocInfo {
defined_val: { defined_val: {
let indirect_sym = let indirect_sym =
@ -552,9 +556,10 @@ impl<'a> Linker<'a> {
eh_frame_hdr_rec.shdr.sh_offset, eh_frame_hdr_rec.shdr.sh_offset,
eh_frame_rec.shdr.sh_offset, eh_frame_rec.shdr.sh_offset,
); );
eh_frame.cfi_records() let mut fde_callback = |init_pos, virt_addr| eh_frame_hdr.add_fde(init_pos, virt_addr);
.flat_map(|cfi| cfi.fde_records()) eh_frame
.for_each(&mut |(init_pos, virt_addr)| eh_frame_hdr.add_fde(init_pos, virt_addr)); .iterate_fde(&mut fde_callback)
.map_err(|()| "failed to add FDE to .eh_frame_hdr while iterating .eh_frame")?;
// Sort FDE entries in .eh_frame_hdr // Sort FDE entries in .eh_frame_hdr
eh_frame_hdr.finalize_fde(); eh_frame_hdr.finalize_fde();
@ -599,24 +604,23 @@ impl<'a> Linker<'a> {
// Section table for the .elf paired with the section name // Section table for the .elf paired with the section name
// To be formalized incrementally // To be formalized incrementally
// Very hashmap-like structure, but the order matters, so it is a vector // Very hashmap-like structure, but the order matters, so it is a vector
let elf_shdrs = vec![ let mut elf_shdrs = Vec::new();
SectionRecord { elf_shdrs.push(SectionRecord {
shdr: Elf32_Shdr { shdr: Elf32_Shdr {
sh_name: 0, sh_name: 0,
sh_type: 0, sh_type: 0,
sh_flags: 0, sh_flags: 0,
sh_addr: 0, sh_addr: 0,
sh_offset: 0, sh_offset: 0,
sh_size: 0, sh_size: 0,
sh_link: 0, sh_link: 0,
sh_info: 0, sh_info: 0,
sh_addralign: 0, sh_addralign: 0,
sh_entsize: 0, sh_entsize: 0,
},
name: "",
data: vec![0; 0],
}, },
]; name: "",
data: vec![0; 0],
});
let elf_sh_data_off = mem::size_of::<Elf32_Ehdr>() + mem::size_of::<Elf32_Phdr>() * 5; let elf_sh_data_off = mem::size_of::<Elf32_Ehdr>() + mem::size_of::<Elf32_Phdr>() * 5;
// Image of the linked dynamic library, to be formalized incrementally // Image of the linked dynamic library, to be formalized incrementally
@ -656,8 +660,8 @@ impl<'a> Linker<'a> {
linker.load_section( linker.load_section(
&text_shdr, &text_shdr,
".text", ".text",
data[text_shdr.sh_offset as usize (&data[text_shdr.sh_offset as usize
..text_shdr.sh_offset as usize + text_shdr.sh_size as usize] ..text_shdr.sh_offset as usize + text_shdr.sh_size as usize])
.to_vec(), .to_vec(),
); );
linker.section_map.insert(text_shdr_index, 1); linker.section_map.insert(text_shdr_index, 1);
@ -675,8 +679,8 @@ impl<'a> Linker<'a> {
let loaded_index = linker.load_section( let loaded_index = linker.load_section(
&arm_exidx_shdr, &arm_exidx_shdr,
".ARM.exidx", ".ARM.exidx",
data[arm_exidx_shdr.sh_offset as usize (&data[arm_exidx_shdr.sh_offset as usize
..arm_exidx_shdr.sh_offset as usize + arm_exidx_shdr.sh_size as usize] ..arm_exidx_shdr.sh_offset as usize + arm_exidx_shdr.sh_size as usize])
.to_vec(), .to_vec(),
); );
linker.section_map.insert(arm_exidx_shdr_index, loaded_index); linker.section_map.insert(arm_exidx_shdr_index, loaded_index);
@ -695,7 +699,7 @@ impl<'a> Linker<'a> {
let elf_shdrs_index = linker.load_section( let elf_shdrs_index = linker.load_section(
shdr, shdr,
str::from_utf8(section_name).unwrap(), str::from_utf8(section_name).unwrap(),
data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize].to_vec(), (&data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize]).to_vec(),
); );
linker.section_map.insert(i, elf_shdrs_index); linker.section_map.insert(i, elf_shdrs_index);
} }
@ -915,7 +919,7 @@ impl<'a> Linker<'a> {
dynsym_names.push((0, 0)); dynsym_names.push((0, 0));
for rela_dyn_sym_index in rela_dyn_sym_indices { for rela_dyn_sym_index in rela_dyn_sym_indices {
let mut sym = linker.symtab[rela_dyn_sym_index as usize]; let mut sym = linker.symtab[rela_dyn_sym_index as usize].clone();
let sym_name = name_starting_at_slice(strtab, sym.st_name as usize) let sym_name = name_starting_at_slice(strtab, sym.st_name as usize)
.map_err(|_| "cannot read symbol name from the original .strtab")?; .map_err(|_| "cannot read symbol name from the original .strtab")?;
let dynstr_start_index = dynstr.len(); let dynstr_start_index = dynstr.len();
@ -925,7 +929,7 @@ impl<'a> Linker<'a> {
let elf_shdr_index = linker let elf_shdr_index = linker
.section_map .section_map
.get(&(sym.st_shndx as usize)) .get(&(sym.st_shndx as usize))
.copied() .map(|&index| index)
.ok_or(Error::Parsing("Cannot find section with matching sh_index"))?; .ok_or(Error::Parsing("Cannot find section with matching sh_index"))?;
let elf_shdr_offset = linker.elf_shdrs[elf_shdr_index].shdr.sh_offset; let elf_shdr_offset = linker.elf_shdrs[elf_shdr_index].shdr.sh_offset;
sym.st_value += elf_shdr_offset; sym.st_value += elf_shdr_offset;
@ -952,7 +956,7 @@ impl<'a> Linker<'a> {
let modinit_shdr_index = linker let modinit_shdr_index = linker
.section_map .section_map
.get(&(modinit_sym.st_shndx as usize)) .get(&(modinit_sym.st_shndx as usize))
.copied() .map(|&index| index)
.ok_or(Error::Parsing("Cannot find section with matching sh_index"))?; .ok_or(Error::Parsing("Cannot find section with matching sh_index"))?;
let modinit_shdr = linker.elf_shdrs[modinit_shdr_index].shdr; let modinit_shdr = linker.elf_shdrs[modinit_shdr_index].shdr;
@ -1010,8 +1014,9 @@ impl<'a> Linker<'a> {
let mut hash_bucket: Vec<u32> = vec![0; dynsym.len()]; let mut hash_bucket: Vec<u32> = vec![0; dynsym.len()];
let mut hash_chain: Vec<u32> = vec![0; dynsym.len()]; let mut hash_chain: Vec<u32> = vec![0; dynsym.len()];
for (sym_index, (str_start, str_end)) in dynsym_names.iter().enumerate().take(dynsym.len()).skip(1) { for sym_index in 1..dynsym.len() {
let hash = elf_hash(&dynstr[*str_start..*str_end]); let (str_start, str_end) = dynsym_names[sym_index];
let hash = elf_hash(&dynstr[str_start..str_end]);
let mut hash_index = hash as usize % hash_bucket.len(); let mut hash_index = hash as usize % hash_bucket.len();
if hash_bucket[hash_index] == 0 { if hash_bucket[hash_index] == 0 {
@ -1100,7 +1105,7 @@ impl<'a> Linker<'a> {
let elf_shdrs_index = linker.load_section( let elf_shdrs_index = linker.load_section(
shdr, shdr,
str::from_utf8(section_name).unwrap(), str::from_utf8(section_name).unwrap(),
data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize] (&data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize])
.to_vec(), .to_vec(),
); );
linker.section_map.insert(i, elf_shdrs_index); linker.section_map.insert(i, elf_shdrs_index);
@ -1204,7 +1209,7 @@ impl<'a> Linker<'a> {
let elf_shdrs_index = linker.load_section( let elf_shdrs_index = linker.load_section(
shdr, shdr,
section_name, section_name,
data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize] (&data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize])
.to_vec(), .to_vec(),
); );
linker.section_map.insert(i, elf_shdrs_index); linker.section_map.insert(i, elf_shdrs_index);
@ -1258,7 +1263,7 @@ impl<'a> Linker<'a> {
let bss_elf_index = linker.load_section( let bss_elf_index = linker.load_section(
shdr, shdr,
section_name, section_name,
data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize] (&data[shdr.sh_offset as usize..(shdr.sh_offset + shdr.sh_size) as usize])
.to_vec(), .to_vec(),
); );
linker.section_map.insert(bss_section_index, bss_elf_index); linker.section_map.insert(bss_section_index, bss_elf_index);
@ -1459,7 +1464,7 @@ impl<'a> Linker<'a> {
// Update the EHDR // Update the EHDR
let ehdr_ptr = linker.image.as_mut_ptr() as *mut Elf32_Ehdr; let ehdr_ptr = linker.image.as_mut_ptr() as *mut Elf32_Ehdr;
unsafe { unsafe {
*ehdr_ptr = Elf32_Ehdr { (*ehdr_ptr) = Elf32_Ehdr {
e_ident: ehdr.e_ident, e_ident: ehdr.e_ident,
e_type: ET_DYN, e_type: ET_DYN,
e_machine: ehdr.e_machine, e_machine: ehdr.e_machine,

View File

@ -5,20 +5,20 @@ description = "Parser for python code."
authors = [ "RustPython Team", "M-Labs" ] authors = [ "RustPython Team", "M-Labs" ]
build = "build.rs" build = "build.rs"
license = "MIT" license = "MIT"
edition = "2021" edition = "2018"
[build-dependencies] [build-dependencies]
lalrpop = "0.20" lalrpop = "0.19"
[dependencies] [dependencies]
nac3ast = { path = "../nac3ast" } nac3ast = { path = "../nac3ast" }
lalrpop-util = "0.20" lalrpop-util = "0.19"
log = "0.4" log = "0.4"
unic-emoji-char = "0.9" unic-emoji-char = "0.9"
unic-ucd-ident = "0.9" unic-ucd-ident = "0.9"
unicode_names2 = "1.2" unicode_names2 = "0.5"
phf = { version = "0.11", features = ["macros"] } phf = { version = "0.11", features = ["macros"] }
ahash = "0.8" ahash = "0.7"
[dev-dependencies] [dev-dependencies]
insta = "=1.11.0" insta = "=1.11.0"

View File

@ -170,7 +170,7 @@ impl From<LalrpopError<Location, Tok, LexicalError>> for ParseError {
location: token.0, location: token.0,
} }
} }
LalrpopError::UnrecognizedEof { location, .. } => ParseError { LalrpopError::UnrecognizedEOF { location, .. } => ParseError {
error: ParseErrorType::Eof, error: ParseErrorType::Eof,
location, location,
}, },

View File

@ -486,8 +486,8 @@ where
} }
} }
match p { match p {
0xD800..=0xDFFF => Ok(char::REPLACEMENT_CHARACTER), 0xD800..=0xDFFF => Ok(std::char::REPLACEMENT_CHARACTER),
_ => char::from_u32(p).ok_or(unicode_error), _ => std::char::from_u32(p).ok_or(unicode_error),
} }
} }

View File

@ -2,18 +2,14 @@
name = "nac3standalone" name = "nac3standalone"
version = "0.1.0" version = "0.1.0"
authors = ["M-Labs"] authors = ["M-Labs"]
edition = "2021" edition = "2018"
[dependencies] [dependencies]
parking_lot = "0.12" parking_lot = "0.12"
nac3parser = { path = "../nac3parser" } nac3parser = { path = "../nac3parser" }
nac3core = { path = "../nac3core" } nac3core = { path = "../nac3core" }
[dependencies.clap]
version = "4.5"
features = ["derive"]
[dependencies.inkwell] [dependencies.inkwell]
version = "0.4" git = "https://github.com/TheDan64/inkwell.git"
default-features = false default-features = false
features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"] features = ["llvm14-0", "target-x86", "target-arm", "target-riscv", "no-libffi-linking"]

View File

@ -1,25 +0,0 @@
#!/usr/bin/env bash
set -e
if [ -z "$1" ]; then
echo "Requires at least one argument"
exit 1
fi
declare -a nac3args
while [ $# -gt 1 ]; do
nac3args+=("$1")
shift
done
demo="$1"
echo -n "Checking $demo... "
./interpret_demo.py "$demo" > interpreted.log
./run_demo.sh --out run.log "${nac3args[@]}" "$demo"
./run_demo.sh --lli --out run_lli.log "${nac3args[@]}" "$demo"
diff -Nau interpreted.log run.log
diff -Nau interpreted.log run_lli.log
echo "ok"
rm -f interpreted.log run.log run_lli.log

View File

@ -4,8 +4,12 @@ set -e
count=0 count=0
for demo in src/*.py; do for demo in src/*.py; do
./check_demo.sh "$@" "$demo" echo -n "checking $demo... "
((count += 1)) ./interpret_demo.py $demo > interpreted.log
./run_demo.sh $demo > run.log
diff -Nau interpreted.log run.log
echo "ok"
let "count+=1"
done done
echo "Ran $count demo checks - PASSED" echo "Ran $count demo checks - PASSED"

View File

@ -1,109 +0,0 @@
#include <inttypes.h>
#include <math.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define usize size_t
double dbl_nan(void) {
return NAN;
}
double dbl_inf(void) {
return INFINITY;
}
void output_bool(bool x) {
puts(x ? "True" : "False");
}
void output_int32(int32_t x) {
printf("%"PRId32"\n", x);
}
void output_int64(int64_t x) {
printf("%"PRId64"\n", x);
}
void output_uint32(uint32_t x) {
printf("%"PRIu32"\n", x);
}
void output_uint64(uint64_t x) {
printf("%"PRIu64"\n", x);
}
void output_float64(double x) {
if (isnan(x)) {
puts("nan");
} else {
printf("%f\n", x);
}
}
void output_asciiart(int32_t x) {
static const char *chars = " .,-:;i+hHM$*#@ ";
if (x < 0) {
putchar('\n');
} else {
putchar(chars[x]);
}
}
struct cslice {
void *data;
usize len;
};
void output_int32_list(struct cslice *slice) {
const int32_t *data = (int32_t *) slice->data;
putchar('[');
for (usize i = 0; i < slice->len; ++i) {
if (i == slice->len - 1) {
printf("%d", data[i]);
} else {
printf("%d, ", data[i]);
}
}
putchar(']');
putchar('\n');
}
void output_str(struct cslice *slice) {
const char *data = (const char *) slice->data;
for (usize i = 0; i < slice->len; ++i) {
putchar(data[i]);
}
putchar('\n');
}
uint64_t dbg_stack_address(__attribute__((unused)) struct cslice *slice) {
int i;
void *ptr = (void *) &i;
return (uintptr_t) ptr;
}
uint32_t __nac3_personality(uint32_t state, uint32_t exception_object, uint32_t context) {
printf("__nac3_personality(state: %u, exception_object: %u, context: %u)\n", state, exception_object, context);
exit(101);
__builtin_unreachable();
}
uint32_t __nac3_raise(uint32_t state, uint32_t exception_object, uint32_t context) {
printf("__nac3_raise(state: %u, exception_object: %u, context: %u)\n", state, exception_object, context);
exit(101);
__builtin_unreachable();
}
void __nac3_end_catch(void) {}
extern int32_t run(void);
int main(void) {
run();
}

View File

@ -0,0 +1,90 @@
mod cslice {
// copied from https://github.com/dherman/cslice
use std::marker::PhantomData;
use std::slice;
#[repr(C)]
#[derive(Clone, Copy)]
pub struct CSlice<'a, T> {
base: *const T,
len: usize,
marker: PhantomData<&'a ()>,
}
impl<'a, T> AsRef<[T]> for CSlice<'a, T> {
fn as_ref(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.base, self.len) }
}
}
}
#[no_mangle]
pub extern "C" fn output_int32(x: i32) {
println!("{}", x);
}
#[no_mangle]
pub extern "C" fn output_int64(x: i64) {
println!("{}", x);
}
#[no_mangle]
pub extern "C" fn output_uint32(x: u32) {
println!("{}", x);
}
#[no_mangle]
pub extern "C" fn output_uint64(x: u64) {
println!("{}", x);
}
#[no_mangle]
pub extern "C" fn output_float64(x: f64) {
// debug output to preserve the digits after the decimal points
// to match python `print` function
println!("{:?}", x);
}
#[no_mangle]
pub extern "C" fn output_asciiart(x: i32) {
let chars = " .,-:;i+hHM$*#@ ";
if x < 0 {
println!("");
} else {
print!("{}", chars.chars().nth(x as usize).unwrap());
}
}
#[no_mangle]
pub extern "C" fn output_int32_list(x: &cslice::CSlice<i32>) {
print!("[");
let mut it = x.as_ref().iter().peekable();
while let Some(e) = it.next() {
if it.peek().is_none() {
print!("{}", e);
} else {
print!("{}, ", e);
}
}
println!("]");
}
#[no_mangle]
pub extern "C" fn __nac3_personality(_state: u32, _exception_object: u32, _context: u32) -> u32 {
unimplemented!();
}
#[no_mangle]
pub extern "C" fn __nac3_raise(_state: u32, _exception_object: u32, _context: u32) -> u32 {
unimplemented!();
}
extern "C" {
fn run() -> i32;
}
fn main() {
unsafe {
run();
}
}

View File

@ -3,14 +3,10 @@
import sys import sys
import importlib.util import importlib.util
import importlib.machinery import importlib.machinery
import math
import numpy as np
import numpy.typing as npt
import pathlib import pathlib
from numpy import int32, int64, uint32, uint64 from numpy import int32, int64, uint32, uint64
from scipy import special from typing import TypeVar, Generic
from typing import TypeVar, Generic, Literal, Union
T = TypeVar('T') T = TypeVar('T')
class Option(Generic[T]): class Option(Generic[T]):
@ -45,93 +41,26 @@ def Some(v: T) -> Option[T]:
none = Option(None) none = Option(None)
class _ConstGenericMarker:
pass
def ConstGeneric(name, constraint):
return TypeVar(name, _ConstGenericMarker, constraint)
N = TypeVar("N", bound=np.uint64)
class _NDArrayDummy(Generic[T, N]):
pass
# https://stackoverflow.com/questions/67803260/how-to-create-a-type-alias-with-a-throw-away-generic
NDArray = Union[npt.NDArray[T], _NDArrayDummy[T, N]]
def _bool(x):
if isinstance(x, np.ndarray):
return np.bool_(x)
else:
return bool(x)
def _float(x):
if isinstance(x, np.ndarray):
return np.float_(x)
else:
return float(x)
def round_away_zero(x):
if isinstance(x, np.ndarray):
return np.vectorize(round_away_zero)(x)
else:
if x >= 0.0:
return math.floor(x + 0.5)
else:
return math.ceil(x - 0.5)
def _floor(x):
if isinstance(x, np.ndarray):
return np.vectorize(_floor)(x)
else:
return math.floor(x)
def _ceil(x):
if isinstance(x, np.ndarray):
return np.vectorize(_ceil)(x)
else:
return math.ceil(x)
def patch(module): def patch(module):
def dbl_nan():
return np.nan
def dbl_inf():
return np.inf
def output_asciiart(x): def output_asciiart(x):
if x < 0: if x < 0:
sys.stdout.write("\n") sys.stdout.write("\n")
else: else:
sys.stdout.write(" .,-:;i+hHM$*#@ "[x]) sys.stdout.write(" .,-:;i+hHM$*#@ "[x])
def output_float(x):
print("%f" % x)
def dbg_stack_address(_):
return 0
def extern(fun): def extern(fun):
name = fun.__name__ name = fun.__name__
if name == "dbl_nan": if name == "output_asciiart":
return dbl_nan
elif name == "dbl_inf":
return dbl_inf
elif name == "output_asciiart":
return output_asciiart return output_asciiart
elif name == "output_float64":
return output_float
elif name in { elif name in {
"output_bool",
"output_int32", "output_int32",
"output_int64", "output_int64",
"output_int32_list", "output_int32_list",
"output_uint32", "output_uint32",
"output_uint64", "output_uint64",
"output_str", "output_float64"
}: }:
return print return print
elif name == "dbg_stack_address":
return dbg_stack_address
else: else:
raise NotImplementedError raise NotImplementedError
@ -139,92 +68,13 @@ def patch(module):
module.int64 = int64 module.int64 = int64
module.uint32 = uint32 module.uint32 = uint32
module.uint64 = uint64 module.uint64 = uint64
module.bool = _bool
module.float = _float
module.TypeVar = TypeVar module.TypeVar = TypeVar
module.ConstGeneric = ConstGeneric
module.Generic = Generic module.Generic = Generic
module.Literal = Literal
module.extern = extern module.extern = extern
module.Option = Option module.Option = Option
module.Some = Some module.Some = Some
module.none = none module.none = none
# Builtin Math functions
module.round = round_away_zero
module.round64 = round_away_zero
module.np_round = np.round
module.floor = _floor
module.floor64 = _floor
module.np_floor = np.floor
module.ceil = _ceil
module.ceil64 = _ceil
module.np_ceil = np.ceil
# NumPy ndarray functions
module.ndarray = NDArray
module.np_ndarray = np.ndarray
module.np_empty = np.empty
module.np_zeros = np.zeros
module.np_ones = np.ones
module.np_full = np.full
module.np_eye = np.eye
module.np_identity = np.identity
# NumPy Math functions
module.np_isnan = np.isnan
module.np_isinf = np.isinf
module.np_min = np.min
module.np_minimum = np.minimum
module.np_max = np.max
module.np_maximum = np.maximum
module.np_sin = np.sin
module.np_cos = np.cos
module.np_exp = np.exp
module.np_exp2 = np.exp2
module.np_log = np.log
module.np_log10 = np.log10
module.np_log2 = np.log2
module.np_fabs = np.fabs
module.np_trunc = np.trunc
module.np_sqrt = np.sqrt
module.np_rint = np.rint
module.np_tan = np.tan
module.np_arcsin = np.arcsin
module.np_arccos = np.arccos
module.np_arctan = np.arctan
module.np_sinh = np.sinh
module.np_cosh = np.cosh
module.np_tanh = np.tanh
module.np_arcsinh = np.arcsinh
module.np_arccosh = np.arccosh
module.np_arctanh = np.arctanh
module.np_expm1 = np.expm1
module.np_cbrt = np.cbrt
module.np_arctan2 = np.arctan2
module.np_copysign = np.copysign
module.np_fmax = np.fmax
module.np_fmin = np.fmin
module.np_ldexp = np.ldexp
module.np_hypot = np.hypot
module.np_nextafter = np.nextafter
# SciPy Math Functions
module.sp_spec_erf = special.erf
module.sp_spec_erfc = special.erfc
module.sp_spec_gamma = special.gamma
module.sp_spec_gammaln = special.gammaln
module.sp_spec_j0 = special.j0
module.sp_spec_j1 = special.j1
# NumPy NDArray Functions
module.np_ndarray = np.ndarray
module.np_empty = np.empty
module.np_zeros = np.zeros
module.np_ones = np.ones
module.np_full = np.full
module.np_eye = np.eye
module.np_identity = np.identity
def file_import(filename, prefix="file_import_"): def file_import(filename, prefix="file_import_"):
filename = pathlib.Path(filename) filename = pathlib.Path(filename)

View File

@ -7,72 +7,14 @@ if [ -z "$1" ]; then
exit 1 exit 1
fi fi
declare -a nac3args if [ -e ../../target/release/nac3standalone ]; then
while [ $# -ge 1 ]; do
case "$1" in
--help)
echo "Usage: run_demo.sh [--help] [--out OUTFILE] [--lli] [--debug] -- [NAC3ARGS...]"
exit
;;
--out)
shift
outfile="$1"
;;
--lli)
use_lli=1
;;
--debug)
debug=1
;;
--)
shift
break
;;
*)
break
;;
esac
shift
done
while [ $# -ge 1 ]; do
nac3args+=("$1")
shift
done
if [ -n "$debug" ] && [ -e ../../target/debug/nac3standalone ]; then
nac3standalone=../../target/debug/nac3standalone
elif [ -e ../../target/release/nac3standalone ]; then
nac3standalone=../../target/release/nac3standalone nac3standalone=../../target/release/nac3standalone
else else
# used by Nix builds # used by Nix builds
nac3standalone=../../target/x86_64-unknown-linux-gnu/release/nac3standalone nac3standalone=../../target/x86_64-unknown-linux-gnu/release/nac3standalone
fi fi
rm -f ./*.o ./*.bc demo rm -f *.o
if [ -z "$use_lli" ]; then $nac3standalone $1
$nac3standalone "${nac3args[@]}" rustc -o demo demo.rs -Crelocation-model=static -Clink-arg=./module.o
./demo
clang -c -std=gnu11 -Wall -Wextra -O3 -o demo.o demo.c
clang -lm -o demo module.o demo.o
if [ -z "$outfile" ]; then
./demo
else
./demo > "$outfile"
fi
else
$nac3standalone --emit-llvm "${nac3args[@]}"
clang -c -std=gnu11 -Wall -Wextra -O3 -emit-llvm -o demo.bc demo.c
shopt -s nullglob
llvm-link -o nac3out.bc module*.bc main.bc
shopt -u nullglob
if [ -z "$outfile" ]; then
lli --extra-module demo.bc --extra-module irrt.bc nac3out.bc
else
lli --extra-module demo.bc --extra-module irrt.bc nac3out.bc > "$outfile"
fi
fi

View File

@ -1,30 +0,0 @@
# Different cases for using boolean variables in boolean contexts.
# Tests whether all boolean variables (expressed as i8s) are lowered into i1s before used in branching instruction (`br`)
def bfunc(b: bool) -> bool:
return not b
def run() -> int32:
b1 = True
b2 = False
if b1:
pass
if not b2:
pass
while b2:
pass
l = [i for i in range(10) if b2]
b_and = True and False
b_or = True or False
b_and = b1 and b2
b_or = b1 or b2
bfunc(b1)
return 0

View File

@ -23,8 +23,8 @@ class A:
def get_a(self) -> int32: def get_a(self) -> int32:
return self.a return self.a
# def get_b(self) -> B: def get_b(self) -> B:
# return self.b return self.b
def run() -> int32: def run() -> int32:

View File

@ -1,50 +0,0 @@
A = ConstGeneric("A", int32)
B = ConstGeneric("B", uint32)
T = TypeVar("T")
class ConstGenericClass(Generic[A]):
def __init__(self):
pass
class ConstGeneric2Class(Generic[A, B]):
def __init__(self):
pass
class HybridGenericClass2(Generic[A, T]):
pass
class HybridGenericClass3(Generic[T, A, B]):
pass
def make_generic_2() -> ConstGenericClass[Literal[2]]:
return ...
def make_generic2_1_2() -> ConstGeneric2Class[Literal[1], Literal[2]]:
return ...
def make_hybrid_class_2_int32() -> HybridGenericClass2[Literal[2], int32]:
return ...
def make_hybrid_class_i32_0_1() -> HybridGenericClass3[int32, Literal[0], Literal[1]]:
return ...
def consume_generic_2(instance: ConstGenericClass[Literal[2]]):
pass
def consume_generic2_1_2(instance: ConstGeneric2Class[Literal[1], Literal[2]]):
pass
def consume_hybrid_class_2_i32(instance: HybridGenericClass2[Literal[2], int32]):
pass
def consume_hybrid_class_i32_0_1(instance: HybridGenericClass3[int32, Literal[0], Literal[1]]):
pass
def f():
consume_generic_2(make_generic_2())
consume_generic2_1_2(make_generic2_1_2())
consume_hybrid_class_2_i32(make_hybrid_class_2_int32())
consume_hybrid_class_i32_0_1(make_hybrid_class_i32_0_1())
def run() -> int32:
return 0

View File

@ -1,8 +0,0 @@
def f():
return
return
def run() -> int32:
f()
return 0

View File

@ -1,83 +0,0 @@
@extern
def output_bool(x: bool):
...
@extern
def output_int32(x: int32):
...
@extern
def output_int64(x: int64):
...
@extern
def output_uint32(x: uint32):
...
@extern
def output_uint64(x: uint64):
...
@extern
def output_float64(x: float):
...
@extern
def output_int32_list(x: list[int32]):
...
@extern
def output_asciiart(x: int32):
...
@extern
def output_str(x: str):
...
def test_output_bool():
output_bool(True)
output_bool(False)
def test_output_int32():
output_int32(-128)
def test_output_int64():
output_int64(int64(-256))
def test_output_uint32():
output_uint32(uint32(128))
def test_output_uint64():
output_uint64(uint64(256))
def test_output_float64():
output_float64(0.0)
output_float64(1.0)
output_float64(-1.0)
output_float64(128.0)
output_float64(-128.0)
output_float64(16.25)
output_float64(-16.25)
def test_output_asciiart():
for i in range(17):
output_asciiart(i)
output_asciiart(0)
def test_output_int32_list():
output_int32_list([0, 1, 3, 5, 10])
def test_output_str_family():
output_str("hello world")
def run() -> int32:
test_output_bool()
test_output_int32()
test_output_int64()
test_output_uint32()
test_output_uint64()
test_output_float64()
test_output_asciiart()
test_output_int32_list()
test_output_str_family()
return 0

View File

@ -1,17 +0,0 @@
@extern
def output_int32(x: int32):
...
@extern
def output_int32_list(x: list[int32]):
...
def run() -> int32:
bl = [True, False]
bl1 = bl[:]
bl1[1:] = [True]
output_int32_list([int32(b) for b in bl1])
output_int32_list([int32(b) for b in bl1])
return 0

View File

@ -1,12 +1,9 @@
# For Loop using an increasing range() expression as its iterable
@extern @extern
def output_int32(x: int32): def output_int32(x: int32):
... ...
def run() -> int32: def run() -> int32:
i = 0 for _ in range(10):
for i in range(10): output_int32(_)
output_int32(i) _ = 0
output_int32(i)
return 0 return 0

View File

@ -1,21 +0,0 @@
@extern
def output_int32(x: int32):
...
def run() -> int32:
for i in range(4):
output_int32(i)
if i < 2:
continue
else:
break
n = [0, 1, 2, 3]
for i in n:
output_int32(i)
if i < 2:
continue
else:
break
return 0

View File

@ -1,12 +0,0 @@
# For Loop using a decreasing range() expression as its iterable
@extern
def output_int32(x: int32):
...
def run() -> int32:
i = 0
for i in range(10, 0, -1):
output_int32(i)
output_int32(i)
return 0

View File

@ -1,17 +0,0 @@
# For Loop using a list as its iterable
@extern
def output_int32(x: int32):
...
def run() -> int32:
l = [0, 1, 2, 3, 4]
# i: int32 # declaration-without-initializer not yet supported
i = 0 # i must be declared before the loop; this is not necessary in Python
for i in l:
output_int32(i)
i = 0
output_int32(i)
output_int32(i)
return 0

View File

@ -1,14 +0,0 @@
# For Loop using an range() expression as its iterable, additionally reassigning the target on each iteration
@extern
def output_int32(x: int32):
...
def run() -> int32:
i = 0
for i in range(10):
output_int32(i)
i = 0
output_int32(i)
output_int32(i)
return 0

View File

@ -1,33 +0,0 @@
# Break within try statement within a loop
# Taken from https://book.pythontips.com/en/latest/for_-_else.html
@extern
def output_int32(x: int32):
...
@extern
def output_float64(x: float):
...
@extern
def output_str(x: str):
...
def run() -> int32:
for n in range(2, 10):
for x in range(2, n):
try:
if n % x == 0:
output_int32(n)
output_str(" equals ")
output_int32(x)
output_str(" * ")
output_float64(n / x)
except: # Assume this is intended to catch x == 0
break
else:
# loop fell through without finding a factor
output_int32(n)
output_str(" is a prime number")
return 0

View File

@ -1,274 +0,0 @@
@extern
def output_bool(x: bool):
...
@extern
def output_int32(x: int32):
...
@extern
def output_int64(x: int64):
...
@extern
def output_float64(x: float):
...
@extern
def dbl_nan() -> float:
...
@extern
def dbl_inf() -> float:
...
def dbl_pi() -> float:
return 3.1415926535897932384626433
def dbl_e() -> float:
return 2.71828182845904523536028747135266249775724709369995
def test_round():
for x in [-1.5, -0.5, 0.5, 1.5]:
output_int32(round(x))
def test_round64():
for x in [-1.5, -0.5, 0.5, 1.5]:
output_int64(round64(x))
def test_np_round():
for x in [-1.5, -0.5, 0.5, 1.5, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_round(x))
def test_np_isnan():
for x in [dbl_nan(), 0.0, dbl_inf()]:
output_bool(np_isnan(x))
def test_np_isinf():
for x in [dbl_inf(), -dbl_inf(), 0.0, dbl_nan()]:
output_bool(np_isinf(x))
def test_np_sin():
pi = dbl_pi()
for x in [-pi, -pi / 2.0, -pi / 4.0, 0.0, pi / 4.0, pi / 2.0, pi, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_sin(x))
def test_np_cos():
pi = dbl_pi()
for x in [-pi, -pi / 2.0, -pi / 4.0, 0.0, pi / 4.0, pi / 2.0, pi, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_cos(x))
def test_np_exp():
for x in [0.0, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_exp(x))
def test_np_exp2():
for x in [0.0, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_exp2(x))
def test_np_log():
e = dbl_e()
for x in [1.0, e, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_log(x))
def test_np_log10():
for x in [1.0, 10.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_log10(x))
def test_np_log2():
for x in [1.0, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_log2(x))
def test_np_fabs():
for x in [-1.0, 0.0, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_fabs(x))
def test_floor():
for x in [-1.5, -0.5, 0.5, 1.5]:
output_int32(floor(x))
def test_floor64():
for x in [-1.5, -0.5, 0.5, 1.5]:
output_int64(floor64(x))
def test_np_floor():
for x in [-1.5, -0.5, 0.5, 1.5, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_floor(x))
def test_ceil():
for x in [-1.5, -0.5, 0.5, 1.5]:
output_int32(ceil(x))
def test_ceil64():
for x in [-1.5, -0.5, 0.5, 1.5]:
output_int64(ceil64(x))
def test_np_ceil():
for x in [-1.5, -0.5, 0.5, 1.5, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_ceil(x))
def test_np_sqrt():
for x in [1.0, 2.0, 4.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_sqrt(x))
def test_np_rint():
for x in [-1.5, -0.5, 0.5, 1.5, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_rint(x))
def test_np_tan():
pi = dbl_pi()
for x in [-pi, -pi / 2.0, -pi / 4.0, 0.0, pi / 4.0, pi / 2.0, pi, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_tan(x))
def test_np_arcsin():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_arcsin(x))
def test_np_arccos():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_arccos(x))
def test_np_arctan():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_arctan(x))
def test_np_sinh():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_sinh(x))
def test_np_cosh():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_cosh(x))
def test_np_tanh():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_tanh(x))
def test_np_arcsinh():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_arcsinh(x))
def test_np_arccosh():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_arccosh(x))
def test_np_arctanh():
for x in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_arctanh(x))
def test_np_expm1():
for x in [0.0, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_expm1(x))
def test_np_cbrt():
for x in [1.0, 8.0, 27.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_cbrt(x))
def test_sp_spec_erf():
for x in [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(sp_spec_erf(x))
def test_sp_spec_erfc():
for x in [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(sp_spec_erfc(x))
def test_sp_spec_gamma():
for x in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(sp_spec_gamma(x))
def test_sp_spec_gammaln():
for x in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(sp_spec_gammaln(x))
def test_sp_spec_j0():
for x in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(sp_spec_j0(x))
def test_sp_spec_j1():
for x in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0]:
output_float64(sp_spec_j1(x))
def test_np_arctan2():
for x1 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
for x2 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_arctan2(x1, x2))
def test_np_copysign():
for x1 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
for x2 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_copysign(x1, x2))
def test_np_fmax():
for x1 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
for x2 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_fmax(x1, x2))
def test_np_fmin():
for x1 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
for x2 in [-1.0, -0.5, 0.0, 0.5, 1.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_fmin(x1, x2))
def test_np_ldexp():
for x1 in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
for x2 in [-2, -1, 0, 1, 2]:
output_float64(np_ldexp(x1, x2))
def test_np_hypot():
for x1 in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
for x2 in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_hypot(x1, x2))
def test_np_nextafter():
for x1 in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
for x2 in [-2.0, -1.5, -1.0, -0.5, 0.0, 0.5, 1.0, 1.5, 2.0, dbl_inf(), -dbl_inf(), dbl_nan()]:
output_float64(np_nextafter(x1, x2))
def run() -> int32:
test_round()
test_round64()
test_np_round()
test_np_isnan()
test_np_isinf()
test_np_sin()
test_np_cos()
test_np_exp()
test_np_exp2()
test_np_log()
test_np_log10()
test_np_log2()
test_np_fabs()
test_floor()
test_floor64()
test_np_floor()
test_ceil()
test_ceil64()
test_np_ceil()
test_np_sqrt()
test_np_rint()
test_np_tan()
test_np_arcsin()
test_np_arccos()
test_np_arctan()
test_np_sinh()
test_np_cosh()
test_np_tanh()
test_np_arcsinh()
test_np_arccosh()
test_np_arctanh()
test_np_expm1()
test_np_cbrt()
test_sp_spec_erf()
test_sp_spec_erfc()
test_sp_spec_gamma()
test_sp_spec_gammaln()
test_sp_spec_j0()
test_sp_spec_j1()
test_np_arctan2()
test_np_copysign()
test_np_fmax()
test_np_fmin()
test_np_ldexp()
test_np_hypot()
test_np_nextafter()
return 0

File diff suppressed because it is too large Load Diff

View File

@ -1,184 +0,0 @@
@extern
def output_bool(x: bool):
...
@extern
def output_int32(x: int32):
...
@extern
def output_int64(x: int64):
...
@extern
def output_uint32(x: uint32):
...
@extern
def output_uint64(x: uint64):
...
@extern
def output_float64(x: float):
...
def u32_min() -> uint32:
return uint32(0)
def u32_max() -> uint32:
return ~uint32(0)
def i32_min() -> int32:
return int32(1 << 31)
def i32_max() -> int32:
return int32(~(1 << 31))
def u64_min() -> uint64:
return uint64(0)
def u64_max() -> uint64:
return ~uint64(0)
def i64_min() -> int64:
return int64(1) << 63
def i64_max() -> int64:
return ~(int64(1) << 63)
def test_u32_bnot():
output_uint32(~uint32(0))
def test_u64_bnot():
output_uint64(~uint64(0))
def test_conv_from_i32():
for x in [
i32_min(),
i32_min() + 1,
-1,
0,
1,
i32_max() - 1,
i32_max()
]:
output_int64(int64(x))
output_uint32(uint32(x))
output_uint64(uint64(x))
output_float64(float(x))
def test_conv_from_u32():
for x in [
u32_min(),
u32_min() + uint32(1),
u32_max() - uint32(1),
u32_max()
]:
output_uint64(uint64(x))
output_int32(int32(x))
output_int64(int64(x))
output_float64(float(x))
def test_conv_from_i64():
for x in [
i64_min(),
i64_min() + int64(1),
int64(-1),
int64(0),
int64(1),
i64_max() - int64(1),
i64_max()
]:
output_int32(int32(x))
output_uint64(uint64(x))
output_uint32(uint32(x))
output_float64(float(x))
def test_conv_from_u64():
for x in [
u64_min(),
u64_min() + uint64(1),
u64_max() - uint64(1),
u64_max()
]:
output_uint32(uint32(x))
output_int64(int64(x))
output_int32(int32(x))
output_float64(float(x))
def test_f64toi32():
for x in [
float(i32_min()) - 1.0,
float(i32_min()),
float(i32_min()) + 1.0,
-1.5,
-0.5,
0.5,
1.5,
float(i32_max()) - 1.0,
float(i32_max()),
float(i32_max()) + 1.0
]:
output_int32(int32(x))
def test_f64toi64():
for x in [
float(i64_min()),
float(i64_min()) + 1.0,
-1.5,
-0.5,
0.5,
1.5,
# 2^53 is the highest integral power-of-two of which uint64 and float have a one-to-one correspondence
float(uint64(2) ** uint64(52)) - 1.0,
float(uint64(2) ** uint64(52)),
float(uint64(2) ** uint64(52)) + 1.0,
]:
output_int64(int64(x))
def test_f64tou32():
for x in [
-1.5,
float(u32_min()) - 1.0,
-0.5,
float(u32_min()),
0.5,
float(u32_min()) + 1.0,
1.5,
float(u32_max()) - 1.0,
float(u32_max()),
float(u32_max()) + 1.0
]:
output_uint32(uint32(x))
def test_f64tou64():
for x in [
-1.5,
float(u64_min()) - 1.0,
-0.5,
float(u64_min()),
0.5,
float(u64_min()) + 1.0,
1.5,
# 2^53 is the highest integral power-of-two of which uint64 and float have a one-to-one correspondence
float(uint64(2) ** uint64(52)) - 1.0,
float(uint64(2) ** uint64(52)),
float(uint64(2) ** uint64(52)) + 1.0,
]:
output_uint64(uint64(x))
def run() -> int32:
test_u32_bnot()
test_u64_bnot()
test_conv_from_i32()
test_conv_from_u32()
test_conv_from_i64()
test_conv_from_u64()
test_f64toi32()
test_f64toi64()
test_f64tou32()
test_f64tou64()
return 0

View File

@ -1,9 +1,5 @@
from __future__ import annotations from __future__ import annotations
@extern
def output_bool(x: bool):
...
@extern @extern
def output_int32(x: int32): def output_int32(x: int32):
... ...
@ -21,27 +17,14 @@ def output_float64(x: float):
... ...
def run() -> int32: def run() -> int32:
test_bool()
test_int32() test_int32()
test_uint32() test_uint32()
test_int64() test_int64()
test_uint64() test_uint64()
# test_A() test_A()
# test_B() test_B()
return 0 return 0
def test_bool():
t = True
f = False
output_bool(not t)
output_bool(not f)
output_int32(~t)
output_int32(~f)
output_int32(+t)
output_int32(+f)
output_int32(-t)
output_int32(-f)
def test_int32(): def test_int32():
a = 17 a = 17
b = 3 b = 3
@ -54,9 +37,7 @@ def test_int32():
output_int32(a ^ b) output_int32(a ^ b)
output_int32(a & b) output_int32(a & b)
output_int32(a << b) output_int32(a << b)
output_int32(a << uint32(b))
output_int32(a >> b) output_int32(a >> b)
output_int32(a >> uint32(b))
output_float64(a / b) output_float64(a / b)
a += b a += b
output_int32(a) output_int32(a)
@ -93,9 +74,7 @@ def test_uint32():
output_uint32(a ^ b) output_uint32(a ^ b)
output_uint32(a & b) output_uint32(a & b)
output_uint32(a << b) output_uint32(a << b)
output_uint32(a << int32(b))
output_uint32(a >> b) output_uint32(a >> b)
output_uint32(a >> int32(b))
output_float64(a / b) output_float64(a / b)
a += b a += b
output_uint32(a) output_uint32(a)
@ -129,10 +108,8 @@ def test_int64():
output_int64(a | b) output_int64(a | b)
output_int64(a ^ b) output_int64(a ^ b)
output_int64(a & b) output_int64(a & b)
output_int64(a << int32(b)) output_int64(a << b)
output_int64(a << uint32(b)) output_int64(a >> b)
output_int64(a >> int32(b))
output_int64(a >> uint32(b))
output_float64(a / b) output_float64(a / b)
a += b a += b
output_int64(a) output_int64(a)
@ -150,9 +127,9 @@ def test_int64():
output_int64(a) output_int64(a)
a &= b a &= b
output_int64(a) output_int64(a)
a <<= int32(b) a <<= b
output_int64(a) output_int64(a)
a >>= int32(b) a >>= b
output_int64(a) output_int64(a)
def test_uint64(): def test_uint64():
@ -166,8 +143,8 @@ def test_uint64():
output_uint64(a | b) output_uint64(a | b)
output_uint64(a ^ b) output_uint64(a ^ b)
output_uint64(a & b) output_uint64(a & b)
output_uint64(a << uint32(b)) output_uint64(a << b)
output_uint64(a >> uint32(b)) output_uint64(a >> b)
output_float64(a / b) output_float64(a / b)
a += b a += b
output_uint64(a) output_uint64(a)
@ -185,97 +162,96 @@ def test_uint64():
output_uint64(a) output_uint64(a)
a &= b a &= b
output_uint64(a) output_uint64(a)
a <<= uint32(b) a <<= b
output_uint64(a) output_uint64(a)
a >>= uint32(b) a >>= b
output_uint64(a) output_uint64(a)
# FIXME Fix returning objects of non-primitive types; Currently this is disabled in the function checker class A:
# class A: a: int32
# a: int32 def __init__(self, a: int32):
# def __init__(self, a: int32): self.a = a
# self.a = a
# def __add__(self, other: A) -> A:
# def __add__(self, other: A) -> A: output_int32(self.a + other.a)
# output_int32(self.a + other.a) return A(self.a + other.a)
# return A(self.a + other.a)
# def __sub__(self, other: A) -> A:
# def __sub__(self, other: A) -> A: output_int32(self.a - other.a)
# output_int32(self.a - other.a) return A(self.a - other.a)
# return A(self.a - other.a)
# def test_A():
# def test_A(): a = A(17)
# a = A(17) b = A(3)
# b = A(3)
# c = a + b
# c = a + b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(c.a)
# # output_int32(c.a)
# a += b
# a += b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(a.a)
# # output_int32(a.a)
# a = A(17)
# a = A(17) b = A(3)
# b = A(3) d = a - b
# d = a - b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(c.a)
# # output_int32(c.a)
# a -= b
# a -= b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(a.a)
# # output_int32(a.a)
# a = A(17)
# a = A(17) b = A(3)
# b = A(3) a.__add__(b)
# a.__add__(b) a.__sub__(b)
# a.__sub__(b)
#
# class B:
# class B: a: int32
# a: int32 def __init__(self, a: int32):
# def __init__(self, a: int32): self.a = a
# self.a = a
# def __add__(self, other: B) -> B:
# def __add__(self, other: B) -> B: output_int32(self.a + other.a)
# output_int32(self.a + other.a) return B(self.a + other.a)
# return B(self.a + other.a)
# def __sub__(self, other: B) -> B:
# def __sub__(self, other: B) -> B: output_int32(self.a - other.a)
# output_int32(self.a - other.a) return B(self.a - other.a)
# return B(self.a - other.a)
# def __iadd__(self, other: B) -> B:
# def __iadd__(self, other: B) -> B: output_int32(self.a + other.a + 24)
# output_int32(self.a + other.a + 24) return B(self.a + other.a + 24)
# return B(self.a + other.a + 24)
# def __isub__(self, other: B) -> B:
# def __isub__(self, other: B) -> B: output_int32(self.a - other.a - 24)
# output_int32(self.a - other.a - 24) return B(self.a - other.a - 24)
# return B(self.a - other.a - 24)
# def test_B():
# def test_B(): a = B(17)
# a = B(17) b = B(3)
# b = B(3)
# c = a + b
# c = a + b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(c.a)
# # output_int32(c.a)
# a += b
# a += b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(a.a)
# # output_int32(a.a)
# a = B(17)
# a = B(17) b = B(3)
# b = B(3) d = a - b
# d = a - b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(c.a)
# # output_int32(c.a)
# a -= b
# a -= b # fail due to alloca in __add__ function
# # fail due to alloca in __add__ function # output_int32(a.a)
# # output_int32(a.a)
# a = B(17)
# a = B(17) b = B(3)
# b = B(3) a.__add__(b)
# a.__add__(b) a.__sub__(b)
# a.__sub__(b)

View File

@ -1,15 +0,0 @@
@extern
def output_bool(x: bool):
...
@extern
def dbg_stack_address(x: str) -> uint64:
...
def run() -> int32:
a = dbg_stack_address("a")
b = dbg_stack_address("b")
output_bool(a == b)
return 0

View File

@ -1,26 +0,0 @@
def run() -> int32:
# Numeric Primitives
b: bool = False
i32: int32 = 0
i64: int64 = int64(0)
u32: uint32 = uint32(0)
u64: uint64 = uint64(0)
f64: float = 0.0
# String
s: str = ""
# List
l_i32: list[int32] = []
l_f64: list[float] = []
l_str: list[str] = []
# Option
o_some: Option[int32] = Some(0)
o_none: Option[int32] = none
# Tuple
t_i32_i32: tuple[int32, int32] = (0, 0)
t_i32_f64: tuple[int32, float] = (0, 0.0)
return 0

View File

@ -10,7 +10,6 @@ use nac3core::{
use nac3parser::ast::{self, StrRef}; use nac3parser::ast::{self, StrRef};
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use std::{collections::HashMap, sync::Arc}; use std::{collections::HashMap, sync::Arc};
use std::collections::HashSet;
pub struct ResolverInternal { pub struct ResolverInternal {
pub id_to_type: Mutex<HashMap<StrRef, Type>>, pub id_to_type: Mutex<HashMap<StrRef, Type>>,
@ -51,22 +50,19 @@ impl SymbolResolver for Resolver {
_: &PrimitiveStore, _: &PrimitiveStore,
str: StrRef, str: StrRef,
) -> Result<Type, String> { ) -> Result<Type, String> {
self.0.id_to_type.lock().get(&str).copied().ok_or(format!("cannot get type of {str}")) self.0.id_to_type.lock().get(&str).cloned().ok_or(format!("cannot get type of {}", str))
} }
fn get_symbol_value<'ctx>( fn get_symbol_value<'ctx, 'a>(
&self, &self,
_: StrRef, _: StrRef,
_: &mut CodeGenContext<'ctx, '_>, _: &mut CodeGenContext<'ctx, 'a>,
) -> Option<ValueEnum<'ctx>> { ) -> Option<ValueEnum<'ctx>> {
unimplemented!() unimplemented!()
} }
fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, HashSet<String>> { fn get_identifier_def(&self, id: StrRef) -> Result<DefinitionId, String> {
self.0.id_to_def.lock().get(&id).copied() self.0.id_to_def.lock().get(&id).cloned().ok_or_else(|| "Undefined identifier".to_string())
.ok_or_else(|| HashSet::from([
format!("Undefined identifier `{id}`"),
]))
} }
fn get_string_id(&self, s: &str) -> i32 { fn get_string_id(&self, s: &str) -> i32 {
@ -74,8 +70,7 @@ impl SymbolResolver for Resolver {
if let Some(id) = str_store.get(s) { if let Some(id) = str_store.get(s) {
*id *id
} else { } else {
let id = i32::try_from(str_store.len()) let id = str_store.len() as i32;
.expect("Symbol resolver string store size exceeds max capacity (i32::MAX)");
str_store.insert(s.to_string(), id); str_store.insert(s.to_string(), id);
id id
} }

View File

@ -1,165 +1,73 @@
use clap::Parser;
use inkwell::{ use inkwell::{
memory_buffer::MemoryBuffer, memory_buffer::MemoryBuffer,
passes::PassBuilderOptions, passes::{PassManager, PassManagerBuilder},
support::is_multithreaded,
targets::*, targets::*,
OptimizationLevel, OptimizationLevel,
}; };
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use std::{collections::HashMap, fs, path::Path, sync::Arc}; use std::{borrow::Borrow, collections::HashMap, env, fs, path::Path, sync::Arc};
use std::collections::HashSet;
use nac3core::{ use nac3core::{
codegen::{ codegen::{
concrete_type::ConcreteTypeStore, irrt::load_irrt, CodeGenLLVMOptions, concrete_type::ConcreteTypeStore, irrt::load_irrt, CodeGenTask, DefaultCodeGenerator,
CodeGenTargetMachineOptions, CodeGenTask, DefaultCodeGenerator, WithCall, WorkerRegistry, WithCall, WorkerRegistry,
}, },
symbol_resolver::SymbolResolver, symbol_resolver::SymbolResolver,
toplevel::{ toplevel::{
composer::{ComposerConfig, TopLevelComposer}, composer::TopLevelComposer, helper::parse_parameter_default_value, type_annotation::*,
helper::parse_parameter_default_value,
type_annotation::*,
TopLevelDef, TopLevelDef,
}, },
typecheck::{ typecheck::{
type_inferencer::PrimitiveStore, type_inferencer::PrimitiveStore,
typedef::{FunSignature, Type, Unifier, VarMap}, typedef::{FunSignature, Type, Unifier},
}, },
}; };
use nac3parser::{ use nac3parser::{
ast::{Constant, Expr, ExprKind, StmtKind, StrRef}, ast::{Expr, ExprKind, StmtKind},
parser, parser,
}; };
mod basic_symbol_resolver; mod basic_symbol_resolver;
use basic_symbol_resolver::*; use basic_symbol_resolver::*;
/// Command-line argument parser definition.
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct CommandLineArgs {
/// The name of the input file.
file_name: String,
/// The number of threads allocated to processing the source file. If 0 is passed to this
/// parameter, all available threads will be used for compilation.
#[arg(short = 'T', default_value_t = 1)]
threads: u32,
/// The level to optimize the LLVM IR.
#[arg(short = 'O', default_value_t = 2, value_parser = clap::value_parser!(u32).range(0..=3))]
opt_level: u32,
/// Whether to emit LLVM IR at the end of every module.
///
/// If multithreaded compilation is also enabled, each thread will emit its own module.
#[arg(long, default_value_t = false)]
emit_llvm: bool,
/// The target triple to compile for.
#[arg(long)]
triple: Option<String>,
/// The target CPU to compile for.
#[arg(long)]
mcpu: Option<String>,
/// Additional target features to enable/disable, specified using the `+`/`-` prefixes.
#[arg(long)]
target_features: Option<String>,
}
fn handle_typevar_definition( fn handle_typevar_definition(
var: &Expr, var: &Expr,
resolver: &(dyn SymbolResolver + Send + Sync), resolver: &(dyn SymbolResolver + Send + Sync),
def_list: &[Arc<RwLock<TopLevelDef>>], def_list: &[Arc<RwLock<TopLevelDef>>],
unifier: &mut Unifier, unifier: &mut Unifier,
primitives: &PrimitiveStore, primitives: &PrimitiveStore,
) -> Result<Type, HashSet<String>> { ) -> Result<Type, String> {
let ExprKind::Call { func, args, .. } = &var.node else { if let ExprKind::Call { func, args, .. } = &var.node {
return Err(HashSet::from([ if matches!(&func.node, ExprKind::Name { id, .. } if id == &"TypeVar".into()) {
format!(
"expression {var:?} cannot be handled as a generic parameter in global scope"
),
]))
};
match &func.node {
ExprKind::Name { id, .. } if id == &"TypeVar".into() => {
let ExprKind::Constant { value: Constant::Str(ty_name), .. } = &args[0].node else {
return Err(HashSet::from([
format!("Expected string constant for first parameter of `TypeVar`, got {:?}", &args[0].node),
]))
};
let generic_name: StrRef = ty_name.to_string().into();
let constraints = args let constraints = args
.iter() .iter()
.skip(1) .skip(1)
.map(|x| -> Result<Type, HashSet<String>> { .map(|x| -> Result<Type, String> {
let ty = parse_ast_to_type_annotation_kinds( let ty = parse_ast_to_type_annotation_kinds(
resolver, resolver,
def_list, def_list,
unifier, unifier,
primitives, primitives,
x, x,
HashMap::default(), Default::default(),
)?; )?;
get_type_from_type_annotation_kinds( get_type_from_type_annotation_kinds(
def_list, unifier, &ty, &mut None def_list, unifier, primitives, &ty, &mut None
) )
}) })
.collect::<Result<Vec<_>, _>>()?; .collect::<Result<Vec<_>, _>>()?;
let loc = func.location; Ok(unifier.get_fresh_var_with_range(&constraints, None, None).0)
} else {
if constraints.len() == 1 { Err(format!(
return Err(HashSet::from([ "expression {:?} cannot be handled as a TypeVar in global scope",
format!("A single constraint is not allowed (at {loc})"), var
])) ))
}
Ok(unifier.get_fresh_var_with_range(&constraints, Some(generic_name), Some(loc)).0)
} }
} else {
ExprKind::Name { id, .. } if id == &"ConstGeneric".into() => { Err(format!(
if args.len() != 2 { "expression {:?} cannot be handled as a TypeVar in global scope",
return Err(HashSet::from([ var
format!("Expected 2 arguments for `ConstGeneric`, got {}", args.len()), ))
]))
}
let ExprKind::Constant { value: Constant::Str(ty_name), .. } = &args[0].node else {
return Err(HashSet::from([
format!(
"Expected string constant for first parameter of `ConstGeneric`, got {:?}",
&args[0].node
),
]))
};
let generic_name: StrRef = ty_name.to_string().into();
let ty = parse_ast_to_type_annotation_kinds(
resolver,
def_list,
unifier,
primitives,
&args[1],
HashMap::default(),
)?;
let constraint = get_type_from_type_annotation_kinds(
def_list, unifier, &ty, &mut None
)?;
let loc = func.location;
Ok(unifier.get_fresh_const_generic_var(constraint, Some(generic_name), Some(loc)).0)
}
_ => Err(HashSet::from([
format!(
"expression {var:?} cannot be handled as a generic parameter in global scope"
),
]))
} }
} }
@ -176,7 +84,7 @@ fn handle_assignment_pattern(
match &targets[0].node { match &targets[0].node {
ExprKind::Name { id, .. } => { ExprKind::Name { id, .. } => {
if let Ok(var) = handle_typevar_definition( if let Ok(var) = handle_typevar_definition(
value, value.borrow(),
resolver, resolver,
def_list, def_list,
unifier, unifier,
@ -185,12 +93,12 @@ fn handle_assignment_pattern(
internal_resolver.add_id_type(*id, var); internal_resolver.add_id_type(*id, var);
Ok(()) Ok(())
} else if let Ok(val) = } else if let Ok(val) =
parse_parameter_default_value(value, resolver) parse_parameter_default_value(value.borrow(), resolver)
{ {
internal_resolver.add_module_global(*id, val); internal_resolver.add_module_global(*id, val);
Ok(()) Ok(())
} else { } else {
Err(format!("fails to evaluate this expression `{:?}` as a constant or generic parameter at {}", Err(format!("fails to evaluate this expression `{:?}` as a constant or TypeVar at {}",
targets[0].node, targets[0].node,
targets[0].location, targets[0].location,
)) ))
@ -216,7 +124,14 @@ fn handle_assignment_pattern(
} else { } else {
match &value.node { match &value.node {
ExprKind::List { elts, .. } | ExprKind::Tuple { elts, .. } => { ExprKind::List { elts, .. } | ExprKind::Tuple { elts, .. } => {
if elts.len() == targets.len() { if elts.len() != targets.len() {
Err(format!(
"number of elements to unpack does not match (expect {}, found {}) at {}",
targets.len(),
elts.len(),
value.location
))
} else {
for (tar, val) in targets.iter().zip(elts) { for (tar, val) in targets.iter().zip(elts) {
handle_assignment_pattern( handle_assignment_pattern(
std::slice::from_ref(tar), std::slice::from_ref(tar),
@ -229,13 +144,6 @@ fn handle_assignment_pattern(
)?; )?;
} }
Ok(()) Ok(())
} else {
Err(format!(
"number of elements to unpack does not match (expect {}, found {}) at {}",
targets.len(),
elts.len(),
value.location
))
} }
} }
_ => Err(format!( _ => Err(format!(
@ -247,74 +155,36 @@ fn handle_assignment_pattern(
} }
fn main() { fn main() {
const SIZE_T: u32 = usize::BITS; let file_name = env::args().nth(1).unwrap();
let threads: u32 = env::args().nth(2).map(|s| str::parse(&s).unwrap()).unwrap_or(1);
let cli = CommandLineArgs::parse();
let CommandLineArgs {
file_name,
threads,
opt_level,
emit_llvm,
triple,
mcpu,
target_features,
} = cli;
Target::initialize_all(&InitializationConfig::default()); Target::initialize_all(&InitializationConfig::default());
let host_target_machine = CodeGenTargetMachineOptions::from_host();
let triple = triple.unwrap_or(host_target_machine.triple.clone());
let mcpu = mcpu
.map(|arg| if arg == "native" { host_target_machine.cpu.clone() } else { arg })
.unwrap_or_default();
let target_features = target_features.unwrap_or_default();
let threads = if is_multithreaded() {
if threads == 0 {
std::thread::available_parallelism()
.map(|threads| threads.get() as u32)
.unwrap_or(1u32)
} else {
threads
}
} else {
if threads != 1 {
println!("Warning: Number of threads specified in command-line but multithreading is disabled in LLVM at build time! Defaulting to single-threaded compilation");
}
1
};
let opt_level = match opt_level {
0 => OptimizationLevel::None,
1 => OptimizationLevel::Less,
2 => OptimizationLevel::Default,
// The default behavior for -O<n> where n>3 defaults to O3 for both Clang and GCC
_ => OptimizationLevel::Aggressive,
};
let program = match fs::read_to_string(file_name.clone()) { let program = match fs::read_to_string(file_name.clone()) {
Ok(program) => program, Ok(program) => program,
Err(err) => { Err(err) => {
println!("Cannot open input file: {err}"); println!("Cannot open input file: {}", err);
return; return;
} }
}; };
let primitive: PrimitiveStore = TopLevelComposer::make_primitives(SIZE_T).0; let primitive: PrimitiveStore = TopLevelComposer::make_primitives().0;
let (mut composer, builtins_def, builtins_ty) = let (mut composer, builtins_def, builtins_ty) =
TopLevelComposer::new(vec![], ComposerConfig::default(), SIZE_T); TopLevelComposer::new(vec![], Default::default());
let internal_resolver: Arc<ResolverInternal> = ResolverInternal { let internal_resolver: Arc<ResolverInternal> = ResolverInternal {
id_to_type: builtins_ty.into(), id_to_type: builtins_ty.into(),
id_to_def: builtins_def.into(), id_to_def: builtins_def.into(),
class_names: Mutex::default(), class_names: Default::default(),
module_globals: Mutex::default(), module_globals: Default::default(),
str_store: Mutex::default(), str_store: Default::default(),
}.into(); }.into();
let resolver = let resolver =
Arc::new(Resolver(internal_resolver.clone())) as Arc<dyn SymbolResolver + Send + Sync>; Arc::new(Resolver(internal_resolver.clone())) as Arc<dyn SymbolResolver + Send + Sync>;
let parser_result = parser::parse_program(&program, file_name.into()).unwrap(); let parser_result = parser::parse_program(&program, file_name.into()).unwrap();
for stmt in parser_result { for stmt in parser_result.into_iter() {
match &stmt.node { match &stmt.node {
StmtKind::Assign { targets, value, .. } => { StmtKind::Assign { targets, value, .. } => {
let def_list = composer.extract_def_list(); let def_list = composer.extract_def_list();
@ -329,7 +199,7 @@ fn main() {
unifier, unifier,
primitives, primitives,
) { ) {
eprintln!("{err}"); eprintln!("{}", err);
return; return;
} }
}, },
@ -338,7 +208,7 @@ fn main() {
if module == &Some("__future__".into()) && names.len() == 1 && names[0].name == "annotations".into() => (), if module == &Some("__future__".into()) && names.len() == 1 && names[0].name == "annotations".into() => (),
_ => { _ => {
let (name, def_id, ty) = let (name, def_id, ty) =
composer.register_top_level(stmt, Some(resolver.clone()), "__main__", true).unwrap(); composer.register_top_level(stmt, Some(resolver.clone()), "__main__".into(), true).unwrap();
internal_resolver.add_id_def(name, def_id); internal_resolver.add_id_def(name, def_id);
if let Some(ty) = ty { if let Some(ty) = ty {
internal_resolver.add_id_type(name, ty); internal_resolver.add_id_type(name, ty);
@ -347,7 +217,7 @@ fn main() {
} }
} }
let signature = FunSignature { args: vec![], ret: primitive.int32, vars: VarMap::new() }; let signature = FunSignature { args: vec![], ret: primitive.int32, vars: HashMap::new() };
let mut store = ConcreteTypeStore::new(); let mut store = ConcreteTypeStore::new();
let mut cache = HashMap::new(); let mut cache = HashMap::new();
let signature = store.from_signature(&mut composer.unifier, &primitive, &signature, &mut cache); let signature = store.from_signature(&mut composer.unifier, &primitive, &signature, &mut cache);
@ -364,26 +234,16 @@ fn main() {
.unwrap_or_else(|_| panic!("cannot find run() entry point")) .unwrap_or_else(|_| panic!("cannot find run() entry point"))
.0] .0]
.write(); .write();
let TopLevelDef::Function { instance_to_stmt, instance_to_symbol, .. } = &mut *instance else { if let TopLevelDef::Function { instance_to_stmt, instance_to_symbol, .. } = &mut *instance {
instance_to_symbol.insert("".to_string(), "run".to_string());
instance_to_stmt[""].clone()
} else {
unreachable!() unreachable!()
}; }
instance_to_symbol.insert(String::new(), "run".to_string());
instance_to_stmt[""].clone()
};
let llvm_options = CodeGenLLVMOptions {
opt_level,
target: CodeGenTargetMachineOptions {
triple,
cpu: mcpu,
features: target_features,
reloc_mode: RelocMode::PIC,
..host_target_machine
},
}; };
let task = CodeGenTask { let task = CodeGenTask {
subst: Vec::default(), subst: Default::default(),
symbol_name: "run".to_string(), symbol_name: "run".to_string(),
body: instance.body, body: instance.body,
signature, signature,
@ -394,7 +254,7 @@ fn main() {
id: 0, id: 0,
}; };
let membuffers: Arc<Mutex<Vec<Vec<u8>>>> = Arc::default(); let membuffers: Arc<Mutex<Vec<Vec<u8>>>> = Default::default();
let membuffer = membuffers.clone(); let membuffer = membuffers.clone();
let f = Arc::new(WithCall::new(Box::new(move |module| { let f = Arc::new(WithCall::new(Box::new(move |module| {
@ -403,9 +263,9 @@ fn main() {
membuffer.lock().push(buffer); membuffer.lock().push(buffer);
}))); })));
let threads = (0..threads) let threads = (0..threads)
.map(|i| Box::new(DefaultCodeGenerator::new(format!("module{i}"), SIZE_T))) .map(|i| Box::new(DefaultCodeGenerator::new(format!("module{}", i), 64)))
.collect(); .collect();
let (registry, handles) = WorkerRegistry::create_workers(threads, top_level, &llvm_options, &f); let (registry, handles) = WorkerRegistry::create_workers(threads, top_level, f);
registry.add_task(task); registry.add_task(task);
registry.wait_tasks_complete(handles); registry.wait_tasks_complete(handles);
@ -414,27 +274,14 @@ fn main() {
let main = context let main = context
.create_module_from_ir(MemoryBuffer::create_from_memory_range(&buffers[0], "main")) .create_module_from_ir(MemoryBuffer::create_from_memory_range(&buffers[0], "main"))
.unwrap(); .unwrap();
if emit_llvm { for buffer in buffers.iter().skip(1) {
main.write_bitcode_to_path(Path::new("main.bc"));
}
for (idx, buffer) in buffers.iter().skip(1).enumerate() {
let other = context let other = context
.create_module_from_ir(MemoryBuffer::create_from_memory_range(buffer, "main")) .create_module_from_ir(MemoryBuffer::create_from_memory_range(buffer, "main"))
.unwrap(); .unwrap();
if emit_llvm {
other.write_bitcode_to_path(Path::new(&format!("module{idx}.bc")));
}
main.link_in_module(other).unwrap(); main.link_in_module(other).unwrap();
} }
main.link_in_module(load_irrt(&context)).unwrap();
let irrt = load_irrt(&context);
if emit_llvm {
irrt.write_bitcode_to_path(Path::new("irrt.bc"));
}
main.link_in_module(irrt).unwrap();
let mut function_iter = main.get_first_function(); let mut function_iter = main.get_first_function();
while let Some(func) = function_iter { while let Some(func) = function_iter {
@ -444,18 +291,25 @@ fn main() {
function_iter = func.get_next_function(); function_iter = func.get_next_function();
} }
let target_machine = llvm_options.target let builder = PassManagerBuilder::create();
.create_target_machine(llvm_options.opt_level) builder.set_optimization_level(OptimizationLevel::Aggressive);
let passes = PassManager::create(());
builder.set_inliner_with_threshold(255);
builder.populate_module_pass_manager(&passes);
passes.run_on(&main);
let triple = TargetMachine::get_default_triple();
let target = Target::from_triple(&triple).expect("couldn't create target from target triple");
let target_machine = target
.create_target_machine(
&triple,
"",
"",
OptimizationLevel::Default,
RelocMode::Default,
CodeModel::Default,
)
.expect("couldn't create target machine"); .expect("couldn't create target machine");
let pass_options = PassBuilderOptions::create();
pass_options.set_merge_functions(true);
let passes = format!("default<O{}>", opt_level as u32);
let result = main.run_passes(passes.as_str(), &target_machine, pass_options);
if let Err(err) = result {
panic!("Failed to run optimization for module `main`: {}", err.to_string());
}
target_machine target_machine
.write_to_file(&main, FileType::Object, Path::new("module.o")) .write_to_file(&main, FileType::Object, Path::new("module.o"))
.expect("couldn't write module to file"); .expect("couldn't write module to file");

View File

@ -112,7 +112,7 @@ in stdenv.mkDerivation (rec {
"-DLLVM_HOST_TRIPLE=${stdenv.hostPlatform.config}" "-DLLVM_HOST_TRIPLE=${stdenv.hostPlatform.config}"
"-DLLVM_DEFAULT_TARGET_TRIPLE=${stdenv.hostPlatform.config}" "-DLLVM_DEFAULT_TARGET_TRIPLE=${stdenv.hostPlatform.config}"
"-DLLVM_ENABLE_UNWIND_TABLES=OFF" "-DLLVM_ENABLE_UNWIND_TABLES=OFF"
"-DLLVM_ENABLE_THREADS=ON" "-DLLVM_ENABLE_THREADS=OFF"
"-DLLVM_INCLUDE_BENCHMARKS=OFF" "-DLLVM_INCLUDE_BENCHMARKS=OFF"
"-DLLVM_BUILD_TOOLS=OFF" "-DLLVM_BUILD_TOOLS=OFF"
"-DLLVM_TARGETS_TO_BUILD=X86;ARM;RISCV" "-DLLVM_TARGETS_TO_BUILD=X86;ARM;RISCV"

View File

@ -1,16 +1,16 @@
pkgbase="mingw-w64-nac3artiq" pkgbase="mingw-w64-nac3artiq"
pkgname="mingw-w64-clang-x86_64-nac3artiq" pkgname="mingw-w64-x86_64-nac3artiq"
pkgver=1.0 pkgver=1.0
pkgrel=1 pkgrel=1
pkgdesc="New ARTIQ compiler 3" pkgdesc="New ARTIQ compiler 3"
arch=("any") arch=("any")
mingw_arch=("clang64") mingw_arch=("mingw64")
url="https://m-labs.hk" url="https://m-labs.hk"
license=("LGPL") license=("LGPL")
source=("nac3artiq.pyd") source=("nac3artiq.pyd")
noextract=("nac3artiq.pyd") noextract=("nac3artiq.pyd")
sha256sums=("SKIP") sha256sums=("SKIP")
depends=("mingw-w64-clang-x86_64-python") depends=("mingw-w64-x86_64-python")
prepare() { prepare() {
true true
@ -21,6 +21,6 @@ build() {
} }
package() { package() {
mkdir -p $pkgdir/clang64/lib/python3.11/site-packages mkdir -p $pkgdir/mingw64/lib/python3.10/site-packages
cp ${srcdir}/nac3artiq.pyd $pkgdir/clang64/lib/python3.11/site-packages cp ${srcdir}/nac3artiq.pyd $pkgdir/mingw64/lib/python3.10/site-packages
} }

View File

@ -21,11 +21,11 @@ let
text = text =
'' ''
implementation=CPython implementation=CPython
version=3.11 version=3.10
shared=true shared=true
abi3=false abi3=false
lib_name=python3.11 lib_name=python3.10
lib_dir=${msys2-env}/clang64/lib lib_dir=${msys2-env}/mingw64/lib
pointer_width=64 pointer_width=64
build_flags=WITH_THREAD build_flags=WITH_THREAD
suppress_build_script_link_lines=false suppress_build_script_link_lines=false
@ -61,11 +61,11 @@ in rec {
'' ''
export HOME=`mktemp -d` export HOME=`mktemp -d`
export WINEDEBUG=-all export WINEDEBUG=-all
export WINEPATH=Z:${msys2-env}/clang64/bin export WINEPATH=Z:${msys2-env}/mingw64/bin
${silenceFontconfig} ${silenceFontconfig}
mkdir build mkdir build
cd build cd build
wine64 cmake .. -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_UNWIND_TABLES=OFF -DLLVM_ENABLE_THREADS=ON -DLLVM_TARGETS_TO_BUILD=X86\;ARM\;RISCV -DLLVM_LINK_LLVM_DYLIB=OFF -DLLVM_ENABLE_FFI=OFF -DFFI_INCLUDE_DIR=fck-cmake -DFFI_LIBRARY_DIR=fck-cmake -DLLVM_ENABLE_LIBXML2=OFF -DLLVM_INCLUDE_BENCHMARKS=OFF -DLLVM_ENABLE_PROJECTS=clang -DCMAKE_INSTALL_PREFIX=Z:$out wine64 cmake .. -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_UNWIND_TABLES=OFF -DLLVM_ENABLE_THREADS=OFF -DLLVM_TARGETS_TO_BUILD=X86\;ARM\;RISCV -DLLVM_LINK_LLVM_DYLIB=OFF -DLLVM_ENABLE_FFI=OFF -DFFI_INCLUDE_DIR=fck-cmake -DFFI_LIBRARY_DIR=fck-cmake -DLLVM_ENABLE_LIBXML2=OFF -DLLVM_INCLUDE_BENCHMARKS=OFF -DLLVM_ENABLE_PROJECTS=clang -DCMAKE_INSTALL_PREFIX=Z:$out
''; '';
buildPhase = buildPhase =
'' ''
@ -77,28 +77,23 @@ in rec {
''; '';
dontFixup = true; dontFixup = true;
}; };
llvm-tools-irrt = pkgs.runCommandNoCC "llvm-tools-irrt" {}
''
mkdir -p $out/bin
ln -s ${llvm-nac3}/bin/clang.exe $out/bin/clang-irrt.exe
ln -s ${llvm-nac3}/bin/llvm-as.exe $out/bin/llvm-as-irrt.exe
'';
nac3artiq = pkgs.rustPlatform.buildRustPackage { nac3artiq = pkgs.rustPlatform.buildRustPackage {
name = "nac3artiq-msys2"; name = "nac3artiq-msys2";
src = ../../.; src = ../../.;
cargoLock = { cargoLock = {
lockFile = ../../Cargo.lock; lockFile = ../../Cargo.lock;
outputHashes = {
"inkwell-0.1.0" = "sha256-+ih3SO0n6YmZ/mcf+rLDwPAy/1MEZ/A+tI4pM1pUhvU=";
};
}; };
nativeBuildInputs = [ pkgs.wineWowPackages.stable ]; nativeBuildInputs = [ pkgs.wineWowPackages.stable ];
buildPhase = buildPhase =
'' ''
export HOME=`mktemp -d` export HOME=`mktemp -d`
export WINEDEBUG=-all export WINEDEBUG=-all
export WINEPATH=Z:${msys2-env}/clang64/bin\;Z:${llvm-nac3}/bin\;Z:${llvm-tools-irrt}/bin export WINEPATH=Z:${msys2-env}/mingw64/bin\;Z:${llvm-nac3}/bin
${silenceFontconfig} ${silenceFontconfig}
export PYO3_CONFIG_FILE=Z:${pyo3-mingw-config} export PYO3_CONFIG_FILE=Z:${pyo3-mingw-config}
export CC=clang
export LLVM_SYS_140_PREFIX=Z:${llvm-nac3}
wine64 cargo build --release -p nac3artiq wine64 cargo build --release -p nac3artiq
''; '';
installPhase = installPhase =
@ -107,7 +102,6 @@ in rec {
cp target/release/nac3artiq.dll $out/nac3artiq.pyd cp target/release/nac3artiq.dll $out/nac3artiq.pyd
echo file binary-dist $out/nac3artiq.pyd >> $out/nix-support/hydra-build-products echo file binary-dist $out/nac3artiq.pyd >> $out/nix-support/hydra-build-products
''; '';
doCheck = false; # https://git.m-labs.hk/M-Labs/nac3/issues/358
checkPhase = checkPhase =
'' ''
wine64 cargo test --release wine64 cargo test --release
@ -135,7 +129,7 @@ in rec {
wine-msys2 = pkgs.writeShellScriptBin "wine-msys2" wine-msys2 = pkgs.writeShellScriptBin "wine-msys2"
'' ''
export WINEDEBUG=-all export WINEDEBUG=-all
export WINEPATH=Z:${msys2-env}/clang64/bin\;Z:${llvm-nac3}/bin\;Z:${llvm-tools-irrt}/bin export WINEPATH=Z:${msys2-env}/mingw64/bin\;Z:${llvm-nac3}/bin
export PYO3_CONFIG_FILE=Z:${pyo3-mingw-config} export PYO3_CONFIG_FILE=Z:${pyo3-mingw-config}
exec ${pkgs.wineWowPackages.stable}/bin/wine64 cmd exec ${pkgs.wineWowPackages.stable}/bin/wine64 cmd
''; '';
@ -143,7 +137,7 @@ in rec {
'' ''
export HOME=`mktemp -d` export HOME=`mktemp -d`
export WINEDEBUG=-all export WINEDEBUG=-all
export WINEPATH=Z:${msys2-env}/clang64/bin export WINEPATH=Z:${msys2-env}/mingw64/bin
${silenceFontconfig} ${silenceFontconfig}
exec ${pkgs.wineWowPackages.stable}/bin/wine64 $@ exec ${pkgs.wineWowPackages.stable}/bin/wine64 $@
''; '';

View File

@ -7,21 +7,18 @@ MSYS2DIR=`pwd`/msys2
mkdir -p $MSYS2DIR/var/lib/pacman $MSYS2DIR/msys/etc mkdir -p $MSYS2DIR/var/lib/pacman $MSYS2DIR/msys/etc
curl -L https://mirror.msys2.org/msys/x86_64/pacman-mirrors-20220205-1-any.pkg.tar.zst | tar xvf - -C $MSYS2DIR --zstd curl -L https://mirror.msys2.org/msys/x86_64/pacman-mirrors-20220205-1-any.pkg.tar.zst | tar xvf - -C $MSYS2DIR --zstd
curl -L https://raw.githubusercontent.com/msys2/MSYS2-packages/master/pacman/pacman.conf | sed "s|SigLevel = Required|SigLevel = Never|g" | sed "s|/etc/pacman.d|$MSYS2DIR/etc/pacman.d|g" > $MSYS2DIR/etc/pacman.conf curl -L https://raw.githubusercontent.com/msys2/MSYS2-packages/master/pacman/pacman.conf | grep -v SigLevel | sed s\|/etc/pacman.d\|$MSYS2DIR/etc/pacman.d\|g > $MSYS2DIR/etc/pacman.conf
fakeroot pacman --root $MSYS2DIR --config $MSYS2DIR/etc/pacman.conf -Syy fakeroot pacman --root $MSYS2DIR --config $MSYS2DIR/etc/pacman.conf -Syy
pacman --root $MSYS2DIR --config $MSYS2DIR/etc/pacman.conf --cachedir $MSYS2DIR/msys/cache -Sp mingw-w64-clang-x86_64-rust mingw-w64-clang-x86_64-cmake mingw-w64-clang-x86_64-ninja mingw-w64-clang-x86_64-python3.11 mingw-w64-clang-x86_64-python-numpy mingw-w64-clang-x86_64-python-setuptools > $MSYS2DIR/packages.txt pacman --root $MSYS2DIR --config $MSYS2DIR/etc/pacman.conf --cachedir $MSYS2DIR/msys/cache -Sp mingw-w64-x86_64-rust mingw-w64-x86_64-cmake mingw-w64-x86_64-ninja mingw-w64-x86_64-python3.10 mingw-w64-x86_64-python-numpy mingw-w64-x86_64-python-setuptools > $MSYS2DIR/packages.txt
echo "{ pkgs } : [" > msys2_packages.nix echo "{ pkgs } : [" > msys2_packages.nix
while read package; do while read package; do
basename=${package##*/} hash=$(nix-prefetch-url $package)
name=${basename//\~/}
hash=$(nix-prefetch-url $package --name $name)
echo " echo "
(pkgs.fetchurl { (pkgs.fetchurl {
url = \"$package\"; url = \"$package\";
sha256 = \"$hash\"; sha256 = \"$hash\";
name = \"$name\";
})" >> msys2_packages.nix })" >> msys2_packages.nix
done < $MSYS2DIR/packages.txt done < $MSYS2DIR/packages.txt
echo "]" >> msys2_packages.nix echo "]" >> msys2_packages.nix

View File

@ -1,350 +1,302 @@
{ pkgs } : [ { pkgs } : [
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libunwind-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libiconv-1.17-1-any.pkg.tar.zst";
sha256 = "0ksz7xz1lbwsmdr9sa1444k0dlfkbd8k11pq7w08ir7r1wjy6fid"; sha256 = "1pb1x5wrlmmpjdpzsc7rs5xk6ydlsd5mval0fwrqq54jf6dxdzpz";
name = "mingw-w64-clang-x86_64-libunwind-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libc++-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-zlib-1.2.12-1-any.pkg.tar.zst";
sha256 = "0r8skyjqv4cpkqif0niakx4hdpkscil1zf6mzj34pqna0j5gdnq2"; sha256 = "1b461ic5s3hjk3y70ldik82ny08rdywn1zfqa8d2jyyvnh4dya77";
name = "mingw-w64-clang-x86_64-libc++-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libffi-3.4.6-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-binutils-2.38-4-any.pkg.tar.zst";
sha256 = "1q6gms980985bp087rnnpvz2fwfakgm5266izfk3b1mbp620s1yv"; sha256 = "18cgs1cvhr8hrq46g2av9as589wxn76rrshhzvx8max8iqzwprm3";
name = "mingw-w64-clang-x86_64-libffi-3.4.6-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libiconv-1.17-4-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-headers-git-10.0.0.r59.gaacb650be-1-any.pkg.tar.zst";
sha256 = "1g2bkhgf60dywccxw911ydyigf3m25yqfh81m5099swr7mjsmzyf"; sha256 = "0gq38zb880ar0xj62ddcggw8cqg7h6g1yw0x422i8cgak6x8qasp";
name = "mingw-w64-clang-x86_64-libiconv-1.17-4-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-gettext-runtime-0.22.5-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-crt-git-10.0.0.r59.gaacb650be-1-any.pkg.tar.zst";
sha256 = "0ll6ci6d3mc7g04q0xixjc209bh8r874dqbczgns69jsad3wg6mi"; sha256 = "1safighnniwmjrklrig41m1kj1b40lrzaiv48xzf26ljb45fy6lq";
name = "mingw-w64-clang-x86_64-gettext-runtime-0.22.5-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-xz-5.6.1-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-gmp-6.2.1-3-any.pkg.tar.zst";
sha256 = "14p4xxaxjjy6j1ingji82xhai1mc1gls5ali6z40fbb2ylxkaggs"; sha256 = "170640c8j81gl67kp85kr8kmg5axsl1vqwn9g7cx6vcr638qax9c";
name = "mingw-w64-clang-x86_64-xz-5.6.1-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-zlib-1.3.1-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-isl-0.25-1-any.pkg.tar.zst";
sha256 = "06i9xjsskf4ddb2ph4h31md5c7imj9mzjhd4lc4q44j8dmpc1w5p"; sha256 = "0hky9gmd6iz1s3irmp9fk2j10cpqrrw8l810riwr58ynj3i10j2k";
name = "mingw-w64-clang-x86_64-zlib-1.3.1-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libxml2-2.12.6-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-mpfr-4.1.0.p13-1-any.pkg.tar.zst";
sha256 = "177b3rmsknqq6hf0zqwva71s3avh20ca7vzznp2ls2z5qm8vhhlp"; sha256 = "17klcf17mddd7hsrak920zglqh00drqjdh6dxh3v3c4y62xj1qr6";
name = "mingw-w64-clang-x86_64-libxml2-2.12.6-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-zstd-1.5.5-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-mpc-1.2.1-1-any.pkg.tar.zst";
sha256 = "07739wmwgxf0d6db4p8w302a6jwcm01aafr1s8jvcl5k1h5a1m2m"; sha256 = "0761i6aga4982v6mw1hgqrrqznki0c8v93xkpf5fqmsjysfncscc";
name = "mingw-w64-clang-x86_64-zstd-1.5.5-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-llvm-libs-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libwinpthread-git-10.0.0.r59.gaacb650be-1-any.pkg.tar.zst";
sha256 = "0ibiy01v16naik9pj32ch7a9pkbw4yrn3gyq7p0y6kcc63fkjazy"; sha256 = "0a9niq05s7ny0y1x625xy9p3dzakw5l4w8djajv09lkxqx36yp40";
name = "mingw-w64-clang-x86_64-llvm-libs-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-llvm-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-libs-12.1.0-3-any.pkg.tar.zst";
sha256 = "1hcfz6nb6svmmcqzfrdi96az2x7mzj0cispdv2ssbgn7nkf19pi0"; sha256 = "0gxifzjl9v72z5fbr89j47j2b7l7ba9cf4xf49wb3khppqb2q9by";
name = "mingw-w64-clang-x86_64-llvm-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-clang-libs-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-windows-default-manifest-6.4-4-any.pkg.tar.zst";
sha256 = "1k17d18g7rmq2ph4kq1mf84vs8133jzf52nkv6syh39ypjga67wa"; sha256 = "1ylipf8k9j7bgmwndkib2l29mds394i7jcij7a6ciag4kynlhsvi";
name = "mingw-w64-clang-x86_64-clang-libs-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-compiler-rt-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-winpthreads-git-10.0.0.r59.gaacb650be-1-any.pkg.tar.zst";
sha256 = "1w2j0vs888haz9shjr1l8dc4j957sk1p0377zzipkbqnzqwjf1z8"; sha256 = "1mhy806hdx27w3fzpb4zv9ia0c2r6n53ljcpkpcanwbqc3hhmj9f";
name = "mingw-w64-clang-x86_64-compiler-rt-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-headers-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-zstd-1.5.2-2-any.pkg.tar.zst";
sha256 = "18csfwlk2h9pr4411crx1b41qjzn5jgbssm3h109nzwbdizkp62h"; sha256 = "1f14wbc1yvjgv3rbwhv75391l55gcm0as6ipba20vw8phz4ax8ds";
name = "mingw-w64-clang-x86_64-headers-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-crt-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-12.1.0-3-any.pkg.tar.zst";
sha256 = "03l1zkrxgxxssp430xcv2gch1d03rbnbk1c0vgiqxigcs8lljh2g"; sha256 = "0j2p4516r7r9igcnfjcxyzzgppy60hx76gp78lqk0331aj1c5d1d";
name = "mingw-w64-clang-x86_64-crt-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-lld-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-c-ares-1.18.1-1-any.pkg.tar.zst";
sha256 = "1ai4gl7ybpk9n10jmbpf3zzfa893m1krj5qhf44ajln0jabdfnbn"; sha256 = "13j7zx9773k0gx3wbqq38jkcndyjpnm7dfb85i8q2dda27g4iq2m";
name = "mingw-w64-clang-x86_64-lld-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libwinpthread-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-brotli-1.0.9-5-any.pkg.tar.zst";
sha256 = "1svhjzwhvl4ldl439jhgfy47g05y2af1cjqvydgijn1dd4g8y8vq"; sha256 = "044n36p4s2n73fxvac55cqqw6di19v4m92v2h0qnphazj6wcg1d0";
name = "mingw-w64-clang-x86_64-libwinpthread-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-winpthreads-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-expat-2.4.8-1-any.pkg.tar.zst";
sha256 = "0jxdhkl256vnr13xf1x3fyjrdf764zg70xcs3gki3rg109f0a6xk"; sha256 = "1qkw4k61ddaflns5ms0xh0czbx99wxhs0dfbk8sv8by2rkshl51k";
name = "mingw-w64-clang-x86_64-winpthreads-git-11.0.0.r655.gdbfdf8025-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-clang-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-gettext-0.21-3-any.pkg.tar.zst";
sha256 = "0ahfic7vdfv96k5v7fdkgk1agk28l833xjn2igrmbvqg96ak0w6n"; sha256 = "1gy7fmn6jc13ipnyyq44gyhv8rvz5cy7gz1dm3wrna80hjnzli5v";
name = "mingw-w64-clang-x86_64-clang-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-c-ares-1.27.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libunistring-1.0-1-any.pkg.tar.zst";
sha256 = "06y3sgqv6a0gr3dsbzs36jrj8adklssgjqi2ms5clsyq6ay4f91r"; sha256 = "1qks1gm8jscnn93sr7n1azkzcq4a8fybsikpqcf920m9b66cym4k";
name = "mingw-w64-clang-x86_64-c-ares-1.27.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-brotli-1.1.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libidn2-2.3.3-1-any.pkg.tar.zst";
sha256 = "113mha41q53cx0hw13cq1xdf7zbsd58sh8cl1cd7xzg1q69n60w2"; sha256 = "1m3qgnhgf0g389kglrai26x4k64gs2cy9b3mjwlkw5xcs2r3smww";
name = "mingw-w64-clang-x86_64-brotli-1.1.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libunistring-1.1-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libpsl-0.21.1-4-any.pkg.tar.zst";
sha256 = "16myvbg33q5s7jl30w5qd8n8f1r05335ms8r61234vn52n32l2c4"; sha256 = "083nng8zis1v2lshnqymxnalprr8g6gdwf84il5ys1ga90pi6bbn";
name = "mingw-w64-clang-x86_64-libunistring-1.1-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libidn2-2.3.7-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libtasn1-4.18.0-1-any.pkg.tar.zst";
sha256 = "07k8zh5nb2s82md7lz22r8gim8214rhlg586lywck3zcla98jv1w"; sha256 = "0lr1c33d2mkm51kq027bxcj2735vk3nndmn8g02d2v73h6akl48k";
name = "mingw-w64-clang-x86_64-libidn2-2.3.7-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libpsl-0.21.5-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libffi-3.3-4-any.pkg.tar.zst";
sha256 = "1mpx77q5g8pj45s8wgc52c4ww2r93080p6d559p56f558a3cl317"; sha256 = "0iswfiql785ngavdz3qdxahj6wn531j5cwij945gbr9q6wbav0bi";
name = "mingw-w64-clang-x86_64-libpsl-0.21.5-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libtasn1-4.19.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-p11-kit-0.24.1-2-any.pkg.tar.zst";
sha256 = "19m59mjxww26ah2gk9c0i512fmqpyaj6r5na564kmg6wpwvkihcj"; sha256 = "1rx1sjhda4g42qsvmw9nvpdk14ag67sxgfiydivg55hxqjxsvk9p";
name = "mingw-w64-clang-x86_64-libtasn1-4.19.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-p11-kit-0.25.3-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-ca-certificates-20211016-3-any.pkg.tar.zst";
sha256 = "1jrwkc4lvw5hm5rqmi5gqh7mfkbqfa5gi81zjij0krnl0gaxw3c8"; sha256 = "02x6dnbbyjm6mcl6ii61bc5rkwg3qsbaqd2lyzsp5732hxjcmmq4";
name = "mingw-w64-clang-x86_64-p11-kit-0.25.3-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-ca-certificates-20240203-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-openssl-1.1.1.q-1-any.pkg.tar.zst";
sha256 = "1q5nxhsk04gidz66ai5wgd4dr04lfyakkfja9p0r5hrgg4ppqqjg"; sha256 = "0rfb3z9jd0y6xjhv4qx1qqyyqgnzzlchbm07icpb4slgwjbm7cjg";
name = "mingw-w64-clang-x86_64-ca-certificates-20240203-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-openssl-3.2.1-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libssh2-1.10.0-1-any.pkg.tar.zst";
sha256 = "0ix2r4ll09m2z5vz2k94gmwfs0pp3ipvjdimwzx7v6xhcs2l25lz"; sha256 = "1f27an41hxrfs9jifq0708c484ps3zmb582gmsy7xn5idg3wk03d";
name = "mingw-w64-clang-x86_64-openssl-3.2.1-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libssh2-1.11.0-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-jansson-2.14-2-any.pkg.tar.zst";
sha256 = "0l2m823gm1rvnjmqm5ads17mxz1bhpzai5ixyhnkpzrsjxd1ygy5"; sha256 = "0hwvcyp7mcvljii87mv0d467whr5j8i8rjkkam7r784qrp9i49ds";
name = "mingw-w64-clang-x86_64-libssh2-1.11.0-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-nghttp2-1.60.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-jemalloc-5.2.1-2-any.pkg.tar.zst";
sha256 = "0wxw8266hf4qd2m4zpgb1wvlrnaksmcrs0kh5y9zpf2y5sy8f2bq"; sha256 = "04lf1b1sdbb8ncbimbb9q0lv7qlc3s814p2001zsxa2dhdc4xdri";
name = "mingw-w64-clang-x86_64-nghttp2-1.60.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-curl-8.6.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-xz-5.2.5-3-any.pkg.tar.zst";
sha256 = "1racc7cyzj22kink9w8m8jv73ji5hfg6r6d1ka9dqmvcbx04r8p0"; sha256 = "099j96iv49b2xddfaq7a69l0j818hw7cxyas6g7cm7iw3crsykfr";
name = "mingw-w64-clang-x86_64-curl-8.6.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-rust-1.76.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libxml2-2.9.14-4-any.pkg.tar.zst";
sha256 = "0ny3bvwvn5wmqrxzhdfw34akr0kj0m7rg9lg3w5yibqz2mkqhk11"; sha256 = "1d6v37k0hiznlv0qnr25cpjgwa7rphahiwcrc7jf44qwdmbdasrv";
name = "mingw-w64-clang-x86_64-rust-1.76.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-pkgconf-1~2.1.1-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-nghttp2-1.48.0-1-any.pkg.tar.zst";
sha256 = "00kxqg9ds4q74lxrzjh8z0858smqbi1j9r06s0zjadsql0ln98cq"; sha256 = "023lnhncm697sdbgnrzvc56c9lzcn29dsrl1m58hsxxjb7rrcrlf";
name = "mingw-w64-clang-x86_64-pkgconf-12.1.1-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-expat-2.6.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-curl-7.84.0-2-any.pkg.tar.zst";
sha256 = "0kj1vzjh3qh7d2g47avlgk7a6j4nc62111hy1m63jwq0alc01k38"; sha256 = "0j6b3arlcsyk5fn2nr7x92j2pqkn26zyrg1zy3pc0qcd3q8hlbr0";
name = "mingw-w64-clang-x86_64-expat-2.6.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-jsoncpp-1.9.5-3-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-rust-1.62.1-1-any.pkg.tar.zst";
sha256 = "1a8mdn4ram9pgqpx5fwxmhcmzc6bh1fq1s4m37xh0d8p6fpncv10"; sha256 = "1bxnjgf1vx1qyf0nzmmc6s096jbw7354fkb1khhmldi15yb2f8h8";
name = "mingw-w64-clang-x86_64-jsoncpp-1.9.5-3-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-bzip2-1.0.8-3-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-pkgconf-1.8.0-2-any.pkg.tar.zst";
sha256 = "1n8zf2kk1xj7wiszp6mjchy1yzpalddbj0cj17qm625ags2vzflm"; sha256 = "18gzbyc949rvaisdxrf4lyx349xigzpp4dk5a9jj9ghn8zfa1wlg";
name = "mingw-w64-clang-x86_64-bzip2-1.0.8-3-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libb2-0.98.1-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-jsoncpp-1.9.4-2-any.pkg.tar.zst";
sha256 = "0555dvb2xs6695sz5ndrx6y0cz3qa5cg0m5v8q1md13ssg76vlh6"; sha256 = "0xqckav97gsaazdfn4395jz0ma0i3snvs1g4ghb7s5jsxbwrhr82";
name = "mingw-w64-clang-x86_64-libb2-0.98.1-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-lz4-1.9.4-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-bzip2-1.0.8-2-any.pkg.tar.zst";
sha256 = "0nn7cy25j53q5ckkx4n4f77w00xdwwf5wjswm374shvvs58nlln0"; sha256 = "1kqg3aw439cdyhnf02rlfr1pw1n8v9xxvq2alhn7aw6nd8qhw7z5";
name = "mingw-w64-clang-x86_64-lz4-1.9.4-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libtre-git-r177.07e66d0-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libb2-0.98.1-2-any.pkg.tar.zst";
sha256 = "0fc9hxsdks1xy5fv0rcna433hlzf6jhs77hg0hfzkzhn06f9alp4"; sha256 = "1nj669rn1i6fxrwmsqmr9n49p34wxvhn0xlsn9spr6aq1hz73b41";
name = "mingw-w64-clang-x86_64-libtre-git-r177.07e66d0-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libsystre-1.0.1-5-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-lz4-1.9.3-1-any.pkg.tar.zst";
sha256 = "05qsn8fkks4f93jkas43s47axqqgx5m64b45p462si3nlb8cjirq"; sha256 = "0fxvabi93cxfybbn49hlr3wgzs4p7fw5shfa055222apkxnncm92";
name = "mingw-w64-clang-x86_64-libsystre-1.0.1-5-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libarchive-3.7.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libtre-git-r128.6fb7206-2-any.pkg.tar.xz";
sha256 = "1p84yh6yzkdpmr02vyvgz16x5gycckah25jkdc2py09l7iw96bmw"; sha256 = "0dp3ca83j8jlx32gml2qvqpwp5b42q8r98gf6hyiki45d910wb7x";
name = "mingw-w64-clang-x86_64-libarchive-3.7.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-libuv-1.48.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libsystre-1.0.1-4-any.pkg.tar.xz";
sha256 = "0kfzanvx7hg7bvy35h2z2vcfxvwn44sikd36mvzhkv6c3c6y84sn"; sha256 = "037gkzaaj8kp5nspcbc8ll64s9b3mj8d6m663lk1za94bq2axff1";
name = "mingw-w64-clang-x86_64-libuv-1.48.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-ninja-1.11.1-3-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libarchive-3.6.1-2-any.pkg.tar.zst";
sha256 = "13wjfmyfr952n3ydpldjlwx1nla5xpyvr96ng8pfbyw4z900v5ms"; sha256 = "1wgv99pxk2pv4kr5cs111k7813bvlykphirksz8pr62kv8a1n47s";
name = "mingw-w64-clang-x86_64-ninja-1.11.1-3-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-rhash-1.4.4-3-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-libuv-1.42.0-3-any.pkg.tar.zst";
sha256 = "1ysbxirpfr0yf7pvyps75lnwc897w2a2kcid3nb4j6ilw6n64jmc"; sha256 = "0mg4j5lqmxlhgrs9bnkb1bhj3mfpvjvvkzpjyy87y2m2k11ffbja";
name = "mingw-w64-clang-x86_64-rhash-1.4.4-3-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-cmake-3.29.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-rhash-1.4.2-1-any.pkg.tar.zst";
sha256 = "0l79lf6zihn0k8hz93qnjnq259y45yq19235g9c444jc2w093si1"; sha256 = "0yhv8pra83cs0mk0n40w0k12z32slxs88h1p9z2ixvyigf8w86ml";
name = "mingw-w64-clang-x86_64-cmake-3.29.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-mpdecimal-4.0.0-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-ninja-1.11.0-1-any.pkg.tar.zst";
sha256 = "0hrhbjgi0g3jqpw8himshqw6vazm5sxhsfmyg386nbrxwnfgl1gb"; sha256 = "0s4zwj4cwzql5l7yx3rj6c8s9jkhjvqqfv5rg0a2grp4abcmv51m";
name = "mingw-w64-clang-x86_64-mpdecimal-4.0.0-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-ncurses-6.4.20231217-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-cmake-3.24.0-1-any.pkg.tar.zst";
sha256 = "00046d52zsr8zjifl7h22jfihhh53h20ipvbqmvf9myssw2fwjza"; sha256 = "0aykg8g07jnsf549ws293ykgsxy2czbnv2yjix1dwilwc9a11w86";
name = "mingw-w64-clang-x86_64-ncurses-6.4.20231217-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-termcap-1.3.1-7-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-mpdecimal-2.5.1-1-any.pkg.tar.zst";
sha256 = "17ha468qavwin800cc3b7c3xdggwk2gakasfxg7jdx7616d99l0n"; sha256 = "0cpyacmciyzbsar1aka5y592g2gpa4i6a58j3bjdmfjdnpm0j08a";
name = "mingw-w64-clang-x86_64-termcap-1.3.1-7-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-readline-8.2.010-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-ncurses-6.3-5-any.pkg.tar.zst";
sha256 = "1s47pd5iz8y3hspsxn4pnp0v3m05ccia40v5nfvx0rmwgvcaz82v"; sha256 = "029z63bw9pwhamw1zi75fr112pxk934nh08by2l54lwdais0vjq8";
name = "mingw-w64-clang-x86_64-readline-8.2.010-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-tcl-8.6.13-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-termcap-1.3.1-6-any.pkg.tar.zst";
sha256 = "0paaqwk0sfy2zxwlxkmxf2bqq46lyg0sx7cqgzknvazwx8xa2z4x"; sha256 = "1wgbzj53vmv1vm3igjan635j5ims4x19s2y6mgvvc46zgndc2bvq";
name = "mingw-w64-clang-x86_64-tcl-8.6.13-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-sqlite3-3.45.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-readline-8.1.002-2-any.pkg.tar.zst";
sha256 = "1icvw3f08cgi94p0177i46v72wgpsxw95p6kd0sm2w3vj0qlqbcw"; sha256 = "136fp0cymxqzgs4s8dmal1f4v6ns2mw8jn4cbfihxqb2cmf9yil8";
name = "mingw-w64-clang-x86_64-sqlite3-3.45.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-tk-8.6.12-2-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-tcl-8.6.11-5-any.pkg.tar.zst";
sha256 = "0pi74q91vl6vw8vvmmwnvrgai3b1aanp0zhca5qsmv8ljh2wdgzx"; sha256 = "0f3p6x55d0370khpp77xpr1dwhfhrlb8b1wjxxb96y0x67q1casm";
name = "mingw-w64-clang-x86_64-tk-8.6.12-2-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-tzdata-2024a-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-sqlite3-3.39.1-1-any.pkg.tar.zst";
sha256 = "1lsfn3759cyf56zlmfvgy6ihs4iks6zhlnrbfmnq5wml02k936ji"; sha256 = "0nabw7iy5za5hdpflkgn1s1v93786h9zz6sxzjxm23wymfk1yxlg";
name = "mingw-w64-clang-x86_64-tzdata-2024a-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-python-3.11.8-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-tk-8.6.11.1-2-any.pkg.tar.zst";
sha256 = "0djpf4k8s25nys6nrm2x2v134lcgzhhbjs37ihkg0b3sxmmc3b0p"; sha256 = "0awr7hzxliyvrkh0ywrga69lcnl5g41i7d4w4azhdwk7i60i1s40";
name = "mingw-w64-clang-x86_64-python-3.11.8-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-openmp-18.1.2-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-tzdata-2022a-1-any.pkg.tar.zst";
sha256 = "1v9wm3ja3a7a7yna2bpqky481qf244wc98kfdl7l03k7rkvvydpl"; sha256 = "0z1q4359q5vfs77a9wnhmf2i9y3ldfmpijjgzqv4za1grmyj6whd";
name = "mingw-w64-clang-x86_64-openmp-18.1.2-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-openblas-0.3.26-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-python-3.10.5-3-any.pkg.tar.zst";
sha256 = "0kdr72y5lc9dl9s1bjrw8g21qmv2iwd1xvn1r21170i277wsmqiv"; sha256 = "1198p71k30c6kspi8mx6kmsk48fdblfr75291s0gmbmdgba7gfw4";
name = "mingw-w64-clang-x86_64-openblas-0.3.26-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-python-numpy-1.26.4-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-gcc-libgfortran-12.1.0-3-any.pkg.tar.zst";
sha256 = "00h0ap954cjwlsc3p01fjwy7s3nlzs90v0kmnrzxm0rljmvn4jkf"; sha256 = "11mawrmxp4habwsvbmfsalb136m4dmzlrjy3pcwp7rq8wxx2vnah";
name = "mingw-w64-clang-x86_64-python-numpy-1.26.4-1-any.pkg.tar.zst";
}) })
(pkgs.fetchurl { (pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/clang64/mingw-w64-clang-x86_64-python-setuptools-69.1.1-1-any.pkg.tar.zst"; url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-openblas-0.3.20-3-any.pkg.tar.zst";
sha256 = "1mc56anasj0v92nlg84m3pa7dbqgjakxw0b4ibqlrr9cq0xzsg4b"; sha256 = "07d8cp8in2nbh6dsyis9cvy83y16gz5wfq5fp0fddgh1ak8ihyn2";
name = "mingw-w64-clang-x86_64-python-setuptools-69.1.1-1-any.pkg.tar.zst"; })
(pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-python-numpy-1.23.1-1-any.pkg.tar.zst";
sha256 = "05by2nm402jkvzaxcz7g3vmh93qmh6f2ddhambmpn4778np6n9bz";
})
(pkgs.fetchurl {
url = "https://mirror.msys2.org/mingw/mingw64/mingw-w64-x86_64-python-setuptools-63.2.0-2-any.pkg.tar.zst";
sha256 = "0280dajh9rvvg3zl4qrgbap6i6n3lxn172kscn6728ifhn3ap3bh";
}) })
] ]

View File

@ -2,7 +2,7 @@
name = "runkernel" name = "runkernel"
version = "0.1.0" version = "0.1.0"
authors = ["M-Labs"] authors = ["M-Labs"]
edition = "2021" edition = "2018"
[dependencies] [dependencies]
libloading = "0.8" libloading = "0.7"