use crate::debug::{DwarfSectionRelocTarget, ModuleMemoryOffset};
use crate::func_environ::FuncEnvironment;
use crate::{array_call_signature, native_call_signature, DEBUG_ASSERT_TRAP_CODE};
use crate::{builder::LinkOptions, value_type, wasm_call_signature};
use anyhow::{Context as _, Result};
use cranelift_codegen::ir::{
self, InstBuilder, MemFlags, UserExternalName, UserExternalNameRef, UserFuncName, Value,
};
use cranelift_codegen::isa::{OwnedTargetIsa, TargetIsa};
use cranelift_codegen::print_errors::pretty_error;
use cranelift_codegen::Context;
use cranelift_codegen::{CompiledCode, MachStackMap};
use cranelift_entity::{EntityRef, PrimaryMap};
use cranelift_frontend::FunctionBuilder;
use cranelift_wasm::{
DefinedFuncIndex, FuncIndex, FuncTranslator, MemoryIndex, OwnedMemoryIndex, WasmFuncType,
WasmType,
};
use object::write::{Object, StandardSegment, SymbolId};
use object::{RelocationEncoding, RelocationKind, SectionKind};
use std::any::Any;
use std::cmp;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::mem;
use std::path;
use std::sync::{Arc, Mutex};
use wasmparser::{FuncValidatorAllocations, FunctionBody};
use wasmtime_cranelift_shared::{CompiledFunction, ModuleTextBuilder};
use wasmtime_environ::{
AddressMapSection, CacheStore, CompileError, FlagValue, FunctionBodyData, FunctionLoc,
ModuleTranslation, ModuleTypes, PtrSize, StackMapInformation, TrapEncodingBuilder, Tunables,
VMOffsets, WasmFunctionInfo,
};
#[cfg(feature = "component-model")]
mod component;
struct IncrementalCacheContext {
#[cfg(feature = "incremental-cache")]
cache_store: Arc<dyn CacheStore>,
num_hits: usize,
num_cached: usize,
}
struct CompilerContext {
func_translator: FuncTranslator,
codegen_context: Context,
incremental_cache_ctx: Option<IncrementalCacheContext>,
validator_allocations: FuncValidatorAllocations,
}
impl Default for CompilerContext {
fn default() -> Self {
Self {
func_translator: FuncTranslator::new(),
codegen_context: Context::new(),
incremental_cache_ctx: None,
validator_allocations: Default::default(),
}
}
}
pub(crate) struct Compiler {
tunables: Tunables,
contexts: Mutex<Vec<CompilerContext>>,
isa: OwnedTargetIsa,
linkopts: LinkOptions,
cache_store: Option<Arc<dyn CacheStore>>,
clif_dir: Option<path::PathBuf>,
}
impl Drop for Compiler {
fn drop(&mut self) {
if self.cache_store.is_none() {
return;
}
let mut num_hits = 0;
let mut num_cached = 0;
for ctx in self.contexts.lock().unwrap().iter() {
if let Some(ref cache_ctx) = ctx.incremental_cache_ctx {
num_hits += cache_ctx.num_hits;
num_cached += cache_ctx.num_cached;
}
}
let total = num_hits + num_cached;
if num_hits + num_cached > 0 {
log::trace!(
"Incremental compilation cache stats: {}/{} = {}% (hits/lookup)\ncached: {}",
num_hits,
total,
(num_hits as f32) / (total as f32) * 100.0,
num_cached
);
}
}
}
impl Compiler {
pub(crate) fn new(
tunables: Tunables,
isa: OwnedTargetIsa,
cache_store: Option<Arc<dyn CacheStore>>,
linkopts: LinkOptions,
clif_dir: Option<path::PathBuf>,
) -> Compiler {
Compiler {
contexts: Default::default(),
tunables,
isa,
linkopts,
cache_store,
clif_dir,
}
}
}
impl wasmtime_environ::Compiler for Compiler {
fn compile_function(
&self,
translation: &ModuleTranslation<'_>,
func_index: DefinedFuncIndex,
input: FunctionBodyData<'_>,
types: &ModuleTypes,
) -> Result<(WasmFunctionInfo, Box<dyn Any + Send>), CompileError> {
let isa = &*self.isa;
let module = &translation.module;
let func_index = module.func_index(func_index);
let sig = translation.module.functions[func_index].signature;
let wasm_func_ty = &types[sig];
let mut compiler = self.function_compiler();
let context = &mut compiler.cx.codegen_context;
context.func.signature = wasm_call_signature(isa, wasm_func_ty, &self.tunables);
context.func.name = UserFuncName::User(UserExternalName {
namespace: 0,
index: func_index.as_u32(),
});
if self.tunables.generate_native_debuginfo {
context.func.collect_debug_info();
}
let mut func_env = FuncEnvironment::new(isa, translation, types, &self.tunables);
let vmctx = context
.func
.create_global_value(ir::GlobalValueData::VMContext);
let interrupts_ptr = context.func.create_global_value(ir::GlobalValueData::Load {
base: vmctx,
offset: i32::try_from(func_env.offsets.vmctx_runtime_limits())
.unwrap()
.into(),
global_type: isa.pointer_type(),
readonly: true,
});
let stack_limit = context.func.create_global_value(ir::GlobalValueData::Load {
base: interrupts_ptr,
offset: i32::try_from(func_env.offsets.ptr.vmruntime_limits_stack_limit())
.unwrap()
.into(),
global_type: isa.pointer_type(),
readonly: false,
});
context.func.stack_limit = Some(stack_limit);
let FunctionBodyData { validator, body } = input;
let mut validator =
validator.into_validator(mem::take(&mut compiler.cx.validator_allocations));
compiler.cx.func_translator.translate_body(
&mut validator,
body.clone(),
&mut context.func,
&mut func_env,
)?;
if let Some(path) = &self.clif_dir {
use std::io::Write;
let mut path = path.to_path_buf();
path.push(format!("wasm_func_{}", func_index.as_u32()));
path.set_extension("clif");
let mut output = std::fs::File::create(path).unwrap();
write!(output, "{}", context.func.display()).unwrap();
}
let (info, func) = compiler.finish_with_info(Some((&body, &self.tunables)))?;
let timing = cranelift_codegen::timing::take_current();
log::debug!("{:?} translated in {:?}", func_index, timing.total());
log::trace!("{:?} timing info\n{}", func_index, timing);
Ok((info, Box::new(func)))
}
fn compile_array_to_wasm_trampoline(
&self,
translation: &ModuleTranslation<'_>,
types: &ModuleTypes,
def_func_index: DefinedFuncIndex,
) -> Result<Box<dyn Any + Send>, CompileError> {
let func_index = translation.module.func_index(def_func_index);
let sig = translation.module.functions[func_index].signature;
let wasm_func_ty = &types[sig];
let isa = &*self.isa;
let pointer_type = isa.pointer_type();
let wasm_call_sig = wasm_call_signature(isa, wasm_func_ty, &self.tunables);
let array_call_sig = array_call_signature(isa);
let mut compiler = self.function_compiler();
let func = ir::Function::with_name_signature(Default::default(), array_call_sig);
let (mut builder, block0) = compiler.builder(func);
let (vmctx, caller_vmctx, values_vec_ptr, values_vec_len) = {
let params = builder.func.dfg.block_params(block0);
(params[0], params[1], params[2], params[3])
};
let mut args = self.load_values_from_array(
wasm_func_ty.params(),
&mut builder,
values_vec_ptr,
values_vec_len,
);
args.insert(0, caller_vmctx);
args.insert(0, vmctx);
debug_assert_vmctx_kind(isa, &mut builder, vmctx, wasmtime_environ::VMCONTEXT_MAGIC);
let offsets = VMOffsets::new(isa.pointer_bytes(), &translation.module);
let vm_runtime_limits_offset = offsets.vmctx_runtime_limits();
save_last_wasm_entry_sp(
&mut builder,
pointer_type,
&offsets.ptr,
vm_runtime_limits_offset,
vmctx,
);
let call = declare_and_call(&mut builder, wasm_call_sig, func_index.as_u32(), &args);
let results = builder.func.dfg.inst_results(call).to_vec();
self.store_values_to_array(
&mut builder,
wasm_func_ty.returns(),
&results,
values_vec_ptr,
values_vec_len,
);
builder.ins().return_(&[]);
builder.finalize();
Ok(Box::new(compiler.finish()?))
}
fn compile_native_to_wasm_trampoline(
&self,
translation: &ModuleTranslation<'_>,
types: &ModuleTypes,
def_func_index: DefinedFuncIndex,
) -> Result<Box<dyn Any + Send>, CompileError> {
let func_index = translation.module.func_index(def_func_index);
let sig = translation.module.functions[func_index].signature;
let wasm_func_ty = &types[sig];
let isa = &*self.isa;
let pointer_type = isa.pointer_type();
let func_index = translation.module.func_index(def_func_index);
let wasm_call_sig = wasm_call_signature(isa, wasm_func_ty, &self.tunables);
let native_call_sig = native_call_signature(isa, wasm_func_ty);
let mut compiler = self.function_compiler();
let func = ir::Function::with_name_signature(Default::default(), native_call_sig);
let (mut builder, block0) = compiler.builder(func);
let args = builder.func.dfg.block_params(block0).to_vec();
let vmctx = args[0];
debug_assert_vmctx_kind(isa, &mut builder, vmctx, wasmtime_environ::VMCONTEXT_MAGIC);
let offsets = VMOffsets::new(isa.pointer_bytes(), &translation.module);
let vm_runtime_limits_offset = offsets.vmctx_runtime_limits();
save_last_wasm_entry_sp(
&mut builder,
pointer_type,
&offsets.ptr,
vm_runtime_limits_offset,
vmctx,
);
let ret = NativeRet::classify(pointer_type, wasm_func_ty);
let wasm_args = ret.native_args(&args);
let call = declare_and_call(&mut builder, wasm_call_sig, func_index.as_u32(), wasm_args);
let results = builder.func.dfg.inst_results(call).to_vec();
ret.native_return(&mut builder, block0, &results);
builder.finalize();
Ok(Box::new(compiler.finish()?))
}
fn compile_wasm_to_native_trampoline(
&self,
wasm_func_ty: &WasmFuncType,
) -> Result<Box<dyn Any + Send>, CompileError> {
let isa = &*self.isa;
let pointer_type = isa.pointer_type();
let wasm_call_sig = wasm_call_signature(isa, wasm_func_ty, &self.tunables);
let native_call_sig = native_call_signature(isa, wasm_func_ty);
let mut compiler = self.function_compiler();
let func = ir::Function::with_name_signature(Default::default(), wasm_call_sig);
let (mut builder, block0) = compiler.builder(func);
let mut args = builder.func.dfg.block_params(block0).to_vec();
let callee_vmctx = args[0];
let caller_vmctx = args[1];
let ret = NativeRet::classify(pointer_type, wasm_func_ty);
debug_assert_vmctx_kind(
isa,
&mut builder,
caller_vmctx,
wasmtime_environ::VMCONTEXT_MAGIC,
);
let ptr = isa.pointer_bytes();
let limits = builder.ins().load(
pointer_type,
MemFlags::trusted(),
caller_vmctx,
i32::try_from(ptr.vmcontext_runtime_limits()).unwrap(),
);
save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &ptr, limits);
let slot = match &ret {
NativeRet::Bare => None,
NativeRet::Retptr { size, .. } => Some(builder.func.create_sized_stack_slot(
ir::StackSlotData::new(ir::StackSlotKind::ExplicitSlot, *size),
)),
};
if let Some(slot) = slot {
args.push(builder.ins().stack_addr(pointer_type, slot, 0));
}
let ptr_size = isa.pointer_bytes();
let callee = builder.ins().load(
pointer_type,
MemFlags::trusted(),
callee_vmctx,
ptr_size.vmnative_call_host_func_context_func_ref()
+ ptr_size.vm_func_ref_native_call(),
);
let callee_signature = builder.func.import_signature(native_call_sig);
let call = builder.ins().call_indirect(callee_signature, callee, &args);
let mut results = builder.func.dfg.inst_results(call).to_vec();
if let NativeRet::Retptr { offsets, .. } = ret {
let slot = *args.last().unwrap();
assert_eq!(offsets.len(), wasm_func_ty.returns().len() - 1);
for (ty, offset) in wasm_func_ty.returns()[1..].iter().zip(offsets) {
let ty = crate::value_type(isa, *ty);
results.push(
builder
.ins()
.load(ty, MemFlags::trusted(), slot, offset as i32),
);
}
}
builder.ins().return_(&results);
builder.finalize();
Ok(Box::new(compiler.finish()?))
}
fn append_code(
&self,
obj: &mut Object<'static>,
funcs: &[(String, Box<dyn Any + Send>)],
resolve_reloc: &dyn Fn(usize, FuncIndex) -> usize,
) -> Result<Vec<(SymbolId, FunctionLoc)>> {
let mut builder =
ModuleTextBuilder::new(obj, self, self.isa.text_section_builder(funcs.len()));
if self.linkopts.force_jump_veneers {
builder.force_veneers();
}
let mut addrs = AddressMapSection::default();
let mut traps = TrapEncodingBuilder::default();
let mut ret = Vec::with_capacity(funcs.len());
for (i, (sym, func)) in funcs.iter().enumerate() {
let func = func
.downcast_ref::<CompiledFunction<CompiledFuncEnv>>()
.unwrap();
let (sym, range) = builder.append_func(&sym, func, |idx| resolve_reloc(i, idx));
if self.tunables.generate_address_map {
let addr = func.address_map();
addrs.push(range.clone(), &addr.instructions);
}
traps.push(range.clone(), &func.traps().collect::<Vec<_>>());
builder.append_padding(self.linkopts.padding_between_functions);
let info = FunctionLoc {
start: u32::try_from(range.start).unwrap(),
length: u32::try_from(range.end - range.start).unwrap(),
};
ret.push((sym, info));
}
builder.finish();
if self.tunables.generate_address_map {
addrs.append_to(obj);
}
traps.append_to(obj);
Ok(ret)
}
fn emit_trampolines_for_array_call_host_func(
&self,
ty: &WasmFuncType,
host_fn: usize,
obj: &mut Object<'static>,
) -> Result<(FunctionLoc, FunctionLoc)> {
let mut wasm_to_array = self.wasm_to_array_trampoline(ty, host_fn)?;
let mut native_to_array = self.native_to_array_trampoline(ty, host_fn)?;
let mut builder = ModuleTextBuilder::new(obj, self, self.isa.text_section_builder(2));
let (_, wasm_to_array) =
builder.append_func("wasm_to_array", &mut wasm_to_array, |_| unreachable!());
let (_, native_to_array) =
builder.append_func("native_to_array", &mut native_to_array, |_| unreachable!());
let wasm_to_array = FunctionLoc {
start: u32::try_from(wasm_to_array.start).unwrap(),
length: u32::try_from(wasm_to_array.end - wasm_to_array.start).unwrap(),
};
let native_to_array = FunctionLoc {
start: u32::try_from(native_to_array.start).unwrap(),
length: u32::try_from(native_to_array.end - native_to_array.start).unwrap(),
};
builder.finish();
Ok((wasm_to_array, native_to_array))
}
fn triple(&self) -> &target_lexicon::Triple {
self.isa.triple()
}
fn flags(&self) -> BTreeMap<String, FlagValue> {
wasmtime_cranelift_shared::clif_flags_to_wasmtime(self.isa.flags().iter())
}
fn isa_flags(&self) -> BTreeMap<String, FlagValue> {
wasmtime_cranelift_shared::clif_flags_to_wasmtime(self.isa.isa_flags())
}
fn is_branch_protection_enabled(&self) -> bool {
self.isa.is_branch_protection_enabled()
}
#[cfg(feature = "component-model")]
fn component_compiler(&self) -> &dyn wasmtime_environ::component::ComponentCompiler {
self
}
fn append_dwarf(
&self,
obj: &mut Object<'_>,
translation: &ModuleTranslation<'_>,
funcs: &PrimaryMap<DefinedFuncIndex, (SymbolId, &(dyn Any + Send))>,
) -> Result<()> {
let ofs = VMOffsets::new(
self.isa
.triple()
.architecture
.pointer_width()
.unwrap()
.bytes(),
&translation.module,
);
let memory_offset = if ofs.num_imported_memories > 0 {
ModuleMemoryOffset::Imported(ofs.vmctx_vmmemory_import(MemoryIndex::new(0)))
} else if ofs.num_defined_memories > 0 {
assert_eq!(
ofs.num_defined_memories, ofs.num_owned_memories,
"the memory base pointer may be incorrect due to sharing memory"
);
ModuleMemoryOffset::Defined(
ofs.vmctx_vmmemory_definition_base(OwnedMemoryIndex::new(0)),
)
} else {
ModuleMemoryOffset::None
};
let functions_info = funcs
.iter()
.map(|(_, (_, func))| {
let f: &CompiledFunction<CompiledFuncEnv> = func.downcast_ref().unwrap();
f.metadata()
})
.collect();
let dwarf_sections = crate::debug::emit_dwarf(
&*self.isa,
&translation.debuginfo,
&functions_info,
&memory_offset,
)
.with_context(|| "failed to emit DWARF debug information")?;
let (debug_bodies, debug_relocs): (Vec<_>, Vec<_>) = dwarf_sections
.iter()
.map(|s| ((s.name, &s.body), (s.name, &s.relocs)))
.unzip();
let mut dwarf_sections_ids = HashMap::new();
for (name, body) in debug_bodies {
let segment = obj.segment_name(StandardSegment::Debug).to_vec();
let section_id = obj.add_section(segment, name.as_bytes().to_vec(), SectionKind::Debug);
dwarf_sections_ids.insert(name, section_id);
obj.append_section_data(section_id, &body, 1);
}
for (name, relocs) in debug_relocs {
let section_id = *dwarf_sections_ids.get(name).unwrap();
for reloc in relocs {
let target_symbol = match reloc.target {
DwarfSectionRelocTarget::Func(index) => funcs[DefinedFuncIndex::new(index)].0,
DwarfSectionRelocTarget::Section(name) => {
obj.section_symbol(dwarf_sections_ids[name])
}
};
obj.add_relocation(
section_id,
object::write::Relocation {
offset: u64::from(reloc.offset),
size: reloc.size << 3,
kind: RelocationKind::Absolute,
encoding: RelocationEncoding::Generic,
symbol: target_symbol,
addend: i64::from(reloc.addend),
},
)?;
}
}
Ok(())
}
fn create_systemv_cie(&self) -> Option<gimli::write::CommonInformationEntry> {
self.isa.create_systemv_cie()
}
}
#[cfg(feature = "incremental-cache")]
mod incremental_cache {
use super::*;
struct CraneliftCacheStore(Arc<dyn CacheStore>);
impl cranelift_codegen::incremental_cache::CacheKvStore for CraneliftCacheStore {
fn get(&self, key: &[u8]) -> Option<std::borrow::Cow<[u8]>> {
self.0.get(key)
}
fn insert(&mut self, key: &[u8], val: Vec<u8>) {
self.0.insert(key, val);
}
}
pub(super) fn compile_maybe_cached<'a>(
context: &'a mut Context,
isa: &dyn TargetIsa,
cache_ctx: Option<&mut IncrementalCacheContext>,
) -> Result<(&'a CompiledCode, Vec<u8>), CompileError> {
let cache_ctx = match cache_ctx {
Some(ctx) => ctx,
None => return compile_uncached(context, isa),
};
let mut cache_store = CraneliftCacheStore(cache_ctx.cache_store.clone());
let (compiled_code, from_cache) = context
.compile_with_cache(isa, &mut cache_store, &mut Default::default())
.map_err(|error| CompileError::Codegen(pretty_error(&error.func, error.inner)))?;
if from_cache {
cache_ctx.num_hits += 1;
} else {
cache_ctx.num_cached += 1;
}
Ok((compiled_code, compiled_code.code_buffer().to_vec()))
}
}
#[cfg(feature = "incremental-cache")]
use incremental_cache::*;
#[cfg(not(feature = "incremental-cache"))]
fn compile_maybe_cached<'a>(
context: &'a mut Context,
isa: &dyn TargetIsa,
_cache_ctx: Option<&mut IncrementalCacheContext>,
) -> Result<(&'a CompiledCode, Vec<u8>), CompileError> {
compile_uncached(context, isa)
}
fn compile_uncached<'a>(
context: &'a mut Context,
isa: &dyn TargetIsa,
) -> Result<(&'a CompiledCode, Vec<u8>), CompileError> {
let mut code_buf = Vec::new();
let compiled_code = context
.compile_and_emit(isa, &mut code_buf, &mut Default::default())
.map_err(|error| CompileError::Codegen(pretty_error(&error.func, error.inner)))?;
Ok((compiled_code, code_buf))
}
impl Compiler {
fn native_to_array_trampoline(
&self,
ty: &WasmFuncType,
host_fn: usize,
) -> Result<CompiledFunction<CompiledFuncEnv>, CompileError> {
let isa = &*self.isa;
let pointer_type = isa.pointer_type();
let native_call_sig = native_call_signature(isa, ty);
let array_call_sig = array_call_signature(isa);
let mut compiler = self.function_compiler();
let func = ir::Function::with_name_signature(Default::default(), native_call_sig);
let (mut builder, block0) = compiler.builder(func);
let args = builder.func.dfg.block_params(block0).to_vec();
let ret = NativeRet::classify(pointer_type, ty);
let wasm_args = &ret.native_args(&args)[2..];
let (values_vec_ptr, values_vec_len) =
self.allocate_stack_array_and_spill_args(ty, &mut builder, wasm_args);
let values_vec_len = builder
.ins()
.iconst(pointer_type, i64::from(values_vec_len));
let callee_args = [args[0], args[1], values_vec_ptr, values_vec_len];
let new_sig = builder.import_signature(array_call_sig);
let callee_value = builder.ins().iconst(pointer_type, host_fn as i64);
builder
.ins()
.call_indirect(new_sig, callee_value, &callee_args);
let results =
self.load_values_from_array(ty.returns(), &mut builder, values_vec_ptr, values_vec_len);
ret.native_return(&mut builder, block0, &results);
builder.finalize();
compiler.finish()
}
fn wasm_to_array_trampoline(
&self,
ty: &WasmFuncType,
host_fn: usize,
) -> Result<CompiledFunction<CompiledFuncEnv>, CompileError> {
let isa = &*self.isa;
let pointer_type = isa.pointer_type();
let wasm_call_sig = wasm_call_signature(isa, ty, &self.tunables);
let array_call_sig = array_call_signature(isa);
let mut compiler = self.function_compiler();
let func = ir::Function::with_name_signature(Default::default(), wasm_call_sig);
let (mut builder, block0) = compiler.builder(func);
let args = builder.func.dfg.block_params(block0).to_vec();
let caller_vmctx = args[1];
debug_assert_vmctx_kind(
isa,
&mut builder,
caller_vmctx,
wasmtime_environ::VMCONTEXT_MAGIC,
);
let ptr_size = isa.pointer_bytes();
let limits = builder.ins().load(
pointer_type,
MemFlags::trusted(),
caller_vmctx,
ptr_size.vmcontext_runtime_limits(),
);
save_last_wasm_exit_fp_and_pc(&mut builder, pointer_type, &ptr_size, limits);
let (values_vec_ptr, values_vec_len) =
self.allocate_stack_array_and_spill_args(ty, &mut builder, &args[2..]);
let values_vec_len = builder
.ins()
.iconst(pointer_type, i64::from(values_vec_len));
let block_params = builder.func.dfg.block_params(block0);
let callee_args = [
block_params[0],
block_params[1],
values_vec_ptr,
values_vec_len,
];
let new_sig = builder.import_signature(array_call_sig);
let callee_value = builder.ins().iconst(pointer_type, host_fn as i64);
builder
.ins()
.call_indirect(new_sig, callee_value, &callee_args);
let results =
self.load_values_from_array(ty.returns(), &mut builder, values_vec_ptr, values_vec_len);
builder.ins().return_(&results);
builder.finalize();
compiler.finish()
}
fn allocate_stack_array_and_spill_args(
&self,
ty: &WasmFuncType,
builder: &mut FunctionBuilder,
args: &[ir::Value],
) -> (Value, u32) {
let isa = &*self.isa;
let pointer_type = isa.pointer_type();
let value_size = mem::size_of::<u128>();
let values_vec_len = cmp::max(ty.params().len(), ty.returns().len());
let values_vec_byte_size = u32::try_from(value_size * values_vec_len).unwrap();
let values_vec_len = u32::try_from(values_vec_len).unwrap();
let slot = builder.func.create_sized_stack_slot(ir::StackSlotData::new(
ir::StackSlotKind::ExplicitSlot,
values_vec_byte_size,
));
let values_vec_ptr = builder.ins().stack_addr(pointer_type, slot, 0);
{
let values_vec_len = builder
.ins()
.iconst(ir::types::I32, i64::try_from(values_vec_len).unwrap());
self.store_values_to_array(builder, ty.params(), args, values_vec_ptr, values_vec_len);
}
(values_vec_ptr, values_vec_len)
}
fn store_values_to_array(
&self,
builder: &mut FunctionBuilder,
types: &[WasmType],
values: &[Value],
values_vec_ptr: Value,
values_vec_capacity: Value,
) {
debug_assert_enough_capacity_for_length(builder, types.len(), values_vec_capacity);
let mut mflags = MemFlags::trusted();
mflags.set_endianness(ir::Endianness::Little);
let value_size = mem::size_of::<u128>();
for (i, val) in values.iter().copied().enumerate() {
builder
.ins()
.store(mflags, val, values_vec_ptr, (i * value_size) as i32);
}
}
fn load_values_from_array(
&self,
types: &[WasmType],
builder: &mut FunctionBuilder,
values_vec_ptr: Value,
values_vec_capacity: Value,
) -> Vec<ir::Value> {
let isa = &*self.isa;
let value_size = mem::size_of::<u128>();
debug_assert_enough_capacity_for_length(builder, types.len(), values_vec_capacity);
let mut mflags = MemFlags::trusted();
mflags.set_endianness(ir::Endianness::Little);
let mut results = Vec::new();
for (i, r) in types.iter().enumerate() {
let load = builder.ins().load(
value_type(isa, *r),
mflags,
values_vec_ptr,
(i * value_size) as i32,
);
results.push(load);
}
results
}
fn function_compiler(&self) -> FunctionCompiler<'_> {
let saved_context = self.contexts.lock().unwrap().pop();
FunctionCompiler {
compiler: self,
cx: saved_context
.map(|mut ctx| {
ctx.codegen_context.clear();
ctx
})
.unwrap_or_else(|| CompilerContext {
#[cfg(feature = "incremental-cache")]
incremental_cache_ctx: self.cache_store.as_ref().map(|cache_store| {
IncrementalCacheContext {
cache_store: cache_store.clone(),
num_hits: 0,
num_cached: 0,
}
}),
..Default::default()
}),
}
}
}
pub struct CompiledFuncEnv {
map: PrimaryMap<UserExternalNameRef, UserExternalName>,
}
impl wasmtime_cranelift_shared::CompiledFuncEnv for CompiledFuncEnv {
fn resolve_user_external_name_ref(&self, external: ir::UserExternalNameRef) -> (u32, u32) {
let UserExternalName { index, namespace } = self.map[external];
(namespace, index)
}
}
struct FunctionCompiler<'a> {
compiler: &'a Compiler,
cx: CompilerContext,
}
impl FunctionCompiler<'_> {
fn builder(&mut self, func: ir::Function) -> (FunctionBuilder<'_>, ir::Block) {
self.cx.codegen_context.func = func;
let mut builder = FunctionBuilder::new(
&mut self.cx.codegen_context.func,
self.cx.func_translator.context(),
);
let block0 = builder.create_block();
builder.append_block_params_for_function_params(block0);
builder.switch_to_block(block0);
builder.seal_block(block0);
(builder, block0)
}
fn finish(self) -> Result<CompiledFunction<CompiledFuncEnv>, CompileError> {
let (info, func) = self.finish_with_info(None)?;
assert!(info.stack_maps.is_empty());
Ok(func)
}
fn finish_with_info(
mut self,
body_and_tunables: Option<(&FunctionBody<'_>, &Tunables)>,
) -> Result<(WasmFunctionInfo, CompiledFunction<CompiledFuncEnv>), CompileError> {
let context = &mut self.cx.codegen_context;
let isa = &*self.compiler.isa;
let (_, _code_buf) =
compile_maybe_cached(context, isa, self.cx.incremental_cache_ctx.as_mut())?;
let compiled_code = context.compiled_code().unwrap();
let preferred_alignment = if body_and_tunables.is_some() {
self.compiler.isa.function_alignment().preferred
} else {
1
};
let alignment = compiled_code.buffer.alignment.max(preferred_alignment);
let env = CompiledFuncEnv {
map: context.func.params.user_named_funcs().clone(),
};
let mut compiled_function =
CompiledFunction::new(compiled_code.buffer.clone(), env, alignment);
if let Some((body, tunables)) = body_and_tunables {
let data = body.get_binary_reader();
let offset = data.original_position();
let len = data.bytes_remaining();
compiled_function.set_address_map(
offset as u32,
len as u32,
tunables.generate_address_map,
);
}
if body_and_tunables
.map(|(_, t)| t.generate_native_debuginfo)
.unwrap_or(false)
{
compiled_function.set_value_labels_ranges(compiled_code.value_labels_ranges.clone());
}
if isa.flags().unwind_info() {
let unwind = compiled_code
.create_unwind_info(isa)
.map_err(|error| CompileError::Codegen(pretty_error(&context.func, error)))?;
if let Some(unwind_info) = unwind {
compiled_function.set_unwind_info(unwind_info);
}
}
let stack_maps = mach_stack_maps_to_stack_maps(compiled_code.buffer.stack_maps());
compiled_function
.set_sized_stack_slots(std::mem::take(&mut context.func.sized_stack_slots));
self.compiler.contexts.lock().unwrap().push(self.cx);
Ok((
WasmFunctionInfo {
start_srcloc: compiled_function.metadata().address_map.start_srcloc,
stack_maps: stack_maps.into(),
},
compiled_function,
))
}
}
fn mach_stack_maps_to_stack_maps(mach_stack_maps: &[MachStackMap]) -> Vec<StackMapInformation> {
let mut stack_maps = Vec::new();
for &MachStackMap {
offset_end,
ref stack_map,
..
} in mach_stack_maps
{
let stack_map = wasmtime_environ::StackMap::new(
stack_map.mapped_words(),
stack_map.as_slice().iter().map(|a| a.0),
);
stack_maps.push(StackMapInformation {
code_offset: offset_end,
stack_map,
});
}
stack_maps.sort_unstable_by_key(|info| info.code_offset);
stack_maps
}
fn declare_and_call(
builder: &mut FunctionBuilder,
signature: ir::Signature,
func_index: u32,
args: &[ir::Value],
) -> ir::Inst {
let name = ir::ExternalName::User(builder.func.declare_imported_user_function(
ir::UserExternalName {
namespace: 0,
index: func_index,
},
));
let signature = builder.func.import_signature(signature);
let callee = builder.func.dfg.ext_funcs.push(ir::ExtFuncData {
name,
signature,
colocated: true,
});
builder.ins().call(callee, &args)
}
fn debug_assert_enough_capacity_for_length(
builder: &mut FunctionBuilder,
length: usize,
capacity: ir::Value,
) {
if cfg!(debug_assertions) {
let enough_capacity = builder.ins().icmp_imm(
ir::condcodes::IntCC::UnsignedGreaterThanOrEqual,
capacity,
ir::immediates::Imm64::new(length.try_into().unwrap()),
);
builder
.ins()
.trapz(enough_capacity, ir::TrapCode::User(DEBUG_ASSERT_TRAP_CODE));
}
}
fn debug_assert_vmctx_kind(
isa: &dyn TargetIsa,
builder: &mut FunctionBuilder,
vmctx: ir::Value,
expected_vmctx_magic: u32,
) {
if cfg!(debug_assertions) {
let magic = builder.ins().load(
ir::types::I32,
MemFlags::trusted().with_endianness(isa.endianness()),
vmctx,
0,
);
let is_expected_vmctx = builder.ins().icmp_imm(
ir::condcodes::IntCC::Equal,
magic,
i64::from(expected_vmctx_magic),
);
builder.ins().trapz(
is_expected_vmctx,
ir::TrapCode::User(DEBUG_ASSERT_TRAP_CODE),
);
}
}
fn save_last_wasm_entry_sp(
builder: &mut FunctionBuilder,
pointer_type: ir::Type,
ptr_size: &impl PtrSize,
vm_runtime_limits_offset: u32,
vmctx: Value,
) {
let limits = builder.ins().load(
pointer_type,
MemFlags::trusted(),
vmctx,
i32::try_from(vm_runtime_limits_offset).unwrap(),
);
let sp = builder.ins().get_stack_pointer(pointer_type);
builder.ins().store(
MemFlags::trusted(),
sp,
limits,
ptr_size.vmruntime_limits_last_wasm_entry_sp(),
);
}
fn save_last_wasm_exit_fp_and_pc(
builder: &mut FunctionBuilder,
pointer_type: ir::Type,
ptr: &impl PtrSize,
limits: Value,
) {
let trampoline_fp = builder.ins().get_frame_pointer(pointer_type);
let wasm_fp = builder.ins().load(
pointer_type,
MemFlags::trusted(),
trampoline_fp,
0,
);
builder.ins().store(
MemFlags::trusted(),
wasm_fp,
limits,
ptr.vmruntime_limits_last_wasm_exit_fp(),
);
let wasm_pc = builder.ins().get_return_address(pointer_type);
builder.ins().store(
MemFlags::trusted(),
wasm_pc,
limits,
ptr.vmruntime_limits_last_wasm_exit_pc(),
);
}
enum NativeRet {
Bare,
Retptr { offsets: Vec<u32>, size: u32 },
}
impl NativeRet {
fn classify(pointer_type: ir::Type, ty: &WasmFuncType) -> NativeRet {
fn align_to(val: u32, align: u32) -> u32 {
(val + (align - 1)) & !(align - 1)
}
match ty.returns() {
[] | [_] => NativeRet::Bare,
other => {
let mut offset = 0;
let mut offsets = Vec::new();
let mut max_align = 1;
for ty in other[1..].iter() {
let size = match ty {
WasmType::I32 | WasmType::F32 => 4,
WasmType::I64 | WasmType::F64 => 8,
WasmType::Ref(_) => pointer_type.bytes(),
WasmType::V128 => 16,
};
offset = align_to(offset, size);
offsets.push(offset);
offset += size;
max_align = max_align.max(size);
}
NativeRet::Retptr {
offsets,
size: align_to(offset, max_align),
}
}
}
}
fn native_args<'a>(&self, args: &'a [ir::Value]) -> &'a [ir::Value] {
match self {
NativeRet::Bare => args,
NativeRet::Retptr { .. } => &args[..args.len() - 1],
}
}
fn native_return(
&self,
builder: &mut FunctionBuilder<'_>,
block0: ir::Block,
results: &[ir::Value],
) {
match self {
NativeRet::Bare => {
builder.ins().return_(&results);
}
NativeRet::Retptr { offsets, .. } => {
let ptr = *builder.func.dfg.block_params(block0).last().unwrap();
let (first, rest) = results.split_first().unwrap();
assert_eq!(rest.len(), offsets.len());
for (arg, offset) in rest.iter().zip(offsets) {
builder
.ins()
.store(MemFlags::trusted(), *arg, ptr, *offset as i32);
}
builder.ins().return_(&[*first]);
}
}
}
}