4129 lines
170 KiB
Rust
4129 lines
170 KiB
Rust
use crate::call_frame::CallFrame;
|
|
use crate::verifier::Verifier;
|
|
use crate::vm_init_error::VmInitError;
|
|
use crate::{HostContext, NativeInterface};
|
|
use prometeu_bytecode::isa::core::CoreOpCode as OpCode;
|
|
use prometeu_bytecode::ProgramImage;
|
|
use prometeu_bytecode::Value;
|
|
use crate::roots::{RootVisitor, visit_value_for_roots};
|
|
use crate::heap::{Heap, CoroutineState};
|
|
use crate::object::ObjectKind;
|
|
use crate::scheduler::Scheduler;
|
|
use prometeu_bytecode::{
|
|
TRAP_BAD_RET_SLOTS, TRAP_DIV_ZERO, TRAP_INVALID_FUNC, TRAP_INVALID_SYSCALL, TRAP_OOB,
|
|
TRAP_STACK_UNDERFLOW, TRAP_TYPE, TrapInfo,
|
|
};
|
|
use prometeu_bytecode::HeapRef;
|
|
use prometeu_hal::syscalls::caps::ALL;
|
|
use prometeu_hal::vm_fault::VmFault;
|
|
|
|
/// Reason why the Virtual Machine stopped execution during a specific run.
|
|
/// This allows the system to decide if it should continue execution in the next tick
|
|
/// or if the frame is finalized.
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
pub enum LogicalFrameEndingReason {
|
|
/// Execution reached a `FRAME_SYNC` instruction, marking the end of the logical frame.
|
|
FrameSync,
|
|
/// The cycle budget for the current host tick was exhausted before reaching `FRAME_SYNC`.
|
|
BudgetExhausted,
|
|
/// A `HALT` instruction was executed, terminating the program.
|
|
Halted,
|
|
/// The Program Counter (PC) reached the end of the available bytecode.
|
|
EndOfRom,
|
|
/// Execution hit a registered breakpoint.
|
|
Breakpoint,
|
|
/// A runtime trap occurred (e.g., out-of-bounds access).
|
|
Trap(TrapInfo),
|
|
/// A fatal error occurred that cannot be recovered (e.g., stack underflow).
|
|
Panic(String),
|
|
}
|
|
|
|
pub(crate) enum OpError {
|
|
Trap(u32, String),
|
|
Panic(String),
|
|
}
|
|
|
|
impl From<TrapInfo> for LogicalFrameEndingReason {
|
|
fn from(info: TrapInfo) -> Self {
|
|
LogicalFrameEndingReason::Trap(info)
|
|
}
|
|
}
|
|
|
|
/// A report detailing the results of an execution slice (run_budget).
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
pub struct BudgetReport {
|
|
/// Total virtual cycles consumed during this run.
|
|
pub cycles_used: u64,
|
|
/// Number of VM instructions executed.
|
|
pub steps_executed: u32,
|
|
/// The reason why this execution slice ended.
|
|
pub reason: LogicalFrameEndingReason,
|
|
}
|
|
|
|
pub struct VirtualMachine {
|
|
/// Program Counter (PC): The absolute byte offset in ROM for the next instruction.
|
|
pc: usize,
|
|
/// Operand Stack: The primary workspace for all mathematical and logical operations.
|
|
operand_stack: Vec<Value>,
|
|
/// Call Stack: Manages function call context (return addresses, frame limits).
|
|
call_stack: Vec<CallFrame>,
|
|
/// Global Variable Store: Variables that persist for the lifetime of the program.
|
|
globals: Vec<Value>,
|
|
/// The loaded executable (Bytecode + Constant Pool), that is the ROM translated.
|
|
program: ProgramImage,
|
|
/// Heap Memory: Dynamic allocation pool.
|
|
heap: Heap,
|
|
/// Total virtual cycles consumed since the VM started.
|
|
cycles: u64,
|
|
/// Stop flag: true if a `HALT` opcode was encountered.
|
|
halted: bool,
|
|
/// Set of ROM addresses used for software breakpoints in the debugger.
|
|
breakpoints: std::collections::HashSet<usize>,
|
|
/// GC: number of newly allocated live objects threshold to trigger a collection at safepoint.
|
|
/// The GC only runs at safepoints (e.g., FRAME_SYNC). 0 disables automatic GC.
|
|
gc_alloc_threshold: usize,
|
|
/// GC: snapshot of live objects count after the last collection (or VM init).
|
|
last_gc_live_count: usize,
|
|
/// Capability flags granted to the currently running program/cart.
|
|
/// Syscalls are capability-gated using `prometeu_hal::syscalls::SyscallMeta::caps`.
|
|
capabilities: prometeu_hal::syscalls::CapFlags,
|
|
/// Cooperative scheduler: set to true when `YIELD` opcode is executed.
|
|
/// The runtime/scheduler should only act on this at safepoints (FRAME_SYNC).
|
|
yield_requested: bool,
|
|
/// Absolute wake tick requested by the currently running coroutine (when it executes `SLEEP`).
|
|
///
|
|
/// Canonical rule (authoritative):
|
|
/// - `SLEEP N` suspends the coroutine for exactly N full scheduler ticks AFTER the current
|
|
/// `FRAME_SYNC` completes. If `SLEEP` is executed during tick `T`, the coroutine must resume
|
|
/// in the frame whose end-of-frame tick will be `T + N + 1`.
|
|
/// - Implementation detail: we compute `wake_tick = current_tick + duration + 1` at the time
|
|
/// `SLEEP` executes. The scheduler wakes sleeping coroutines when `current_tick >= wake_tick`.
|
|
///
|
|
/// This definition is deterministic and eliminates off-by-one ambiguity.
|
|
sleep_requested_until: Option<u64>,
|
|
/// Logical tick counter advanced at each FRAME_SYNC boundary.
|
|
current_tick: u64,
|
|
/// Cooperative scheduler instance managing ready/sleeping queues.
|
|
scheduler: Scheduler,
|
|
/// Handle to the currently running coroutine (owns the active VM context).
|
|
current_coro: Option<HeapRef>,
|
|
}
|
|
|
|
|
|
impl Default for VirtualMachine {
|
|
fn default() -> Self {
|
|
Self::new(vec![], vec![])
|
|
}
|
|
}
|
|
|
|
impl VirtualMachine {
|
|
/// Returns the current program counter.
|
|
pub fn pc(&self) -> usize { self.pc }
|
|
|
|
/// Returns true if there are no active call frames.
|
|
pub fn call_stack_is_empty(&self) -> bool { self.call_stack.is_empty() }
|
|
|
|
/// Returns up to `n` values from the top of the operand stack (top-first order).
|
|
pub fn operand_stack_top(&self, n: usize) -> Vec<Value> {
|
|
let len = self.operand_stack.len();
|
|
let start = len.saturating_sub(n);
|
|
self.operand_stack[start..].iter().rev().cloned().collect()
|
|
}
|
|
|
|
/// Returns true if the VM has executed a HALT and is not currently running.
|
|
pub fn is_halted(&self) -> bool { self.halted }
|
|
|
|
/// Adds a software breakpoint at the given PC.
|
|
pub fn insert_breakpoint(&mut self, pc: usize) { let _ = self.breakpoints.insert(pc); }
|
|
|
|
/// Removes a software breakpoint at the given PC, if present.
|
|
pub fn remove_breakpoint(&mut self, pc: usize) { let _ = self.breakpoints.remove(&pc); }
|
|
|
|
/// Returns the list of currently configured breakpoints.
|
|
pub fn breakpoints_list(&self) -> Vec<usize> { self.breakpoints.iter().cloned().collect() }
|
|
|
|
// Test-only helpers for internal unit tests within this crate.
|
|
#[cfg(test)]
|
|
pub(crate) fn push_operand_for_test(&mut self, v: Value) { self.operand_stack.push(v); }
|
|
/// Creates a new VM instance with the provided bytecode and constants.
|
|
pub fn new(rom: Vec<u8>, constant_pool: Vec<Value>) -> Self {
|
|
Self {
|
|
pc: 0,
|
|
operand_stack: Vec::new(),
|
|
call_stack: Vec::new(),
|
|
globals: Vec::new(),
|
|
program: ProgramImage::new(
|
|
rom,
|
|
constant_pool,
|
|
vec![],
|
|
None,
|
|
std::collections::HashMap::new(),
|
|
),
|
|
heap: Heap::new(),
|
|
cycles: 0,
|
|
halted: false,
|
|
breakpoints: std::collections::HashSet::new(),
|
|
gc_alloc_threshold: 1024, // conservative default; tests may override
|
|
last_gc_live_count: 0,
|
|
capabilities: ALL,
|
|
yield_requested: false,
|
|
sleep_requested_until: None,
|
|
current_tick: 0,
|
|
scheduler: Scheduler::new(),
|
|
current_coro: None,
|
|
}
|
|
}
|
|
|
|
/// Resets the VM state and loads a new program.
|
|
/// This is typically called by the Firmware when starting a new App/Cartridge.
|
|
pub fn initialize(
|
|
&mut self,
|
|
program_bytes: Vec<u8>,
|
|
entrypoint: &str,
|
|
) -> Result<(), VmInitError> {
|
|
// Fail fast: reset state upfront. If we return early with an error,
|
|
// the VM is left in a "halted and empty" state.
|
|
self.program = ProgramImage::default();
|
|
self.pc = 0;
|
|
self.operand_stack.clear();
|
|
self.call_stack.clear();
|
|
self.globals.clear();
|
|
self.heap = Heap::new();
|
|
self.cycles = 0;
|
|
self.halted = true; // execution is impossible until a successful load
|
|
self.last_gc_live_count = 0;
|
|
self.current_tick = 0;
|
|
self.sleep_requested_until = None;
|
|
self.scheduler = Scheduler::new();
|
|
self.current_coro = None;
|
|
// Preserve capabilities across loads; firmware may set them per cart.
|
|
|
|
// Only recognized format is loadable: PBS v0 industrial format
|
|
let program = if program_bytes.starts_with(b"PBS\0") {
|
|
match prometeu_bytecode::BytecodeLoader::load(&program_bytes) {
|
|
Ok(module) => {
|
|
// Run verifier on the module
|
|
let max_stacks = Verifier::verify(&module.code, &module.functions)
|
|
.map_err(|e| VmInitError::VerificationFailed(format!("{:?}", e)))?;
|
|
|
|
let mut program = ProgramImage::from(module);
|
|
|
|
let mut functions = program.functions.as_ref().to_vec();
|
|
for (func, max_stack) in functions.iter_mut().zip(max_stacks) {
|
|
func.max_stack_slots = max_stack;
|
|
}
|
|
program.functions = std::sync::Arc::from(functions);
|
|
|
|
program
|
|
}
|
|
Err(prometeu_bytecode::LoadError::InvalidVersion) => {
|
|
return Err(VmInitError::UnsupportedFormat);
|
|
}
|
|
Err(e) => {
|
|
return Err(VmInitError::ImageLoadFailed(e));
|
|
}
|
|
}
|
|
} else {
|
|
return Err(VmInitError::InvalidFormat);
|
|
};
|
|
|
|
// Resolve the entrypoint: empty (defaults to func 0), numeric func_idx, or symbol name.
|
|
let pc = if entrypoint.is_empty() {
|
|
program.functions.first().map(|f| f.code_offset as usize).unwrap_or(0)
|
|
} else if let Ok(func_idx) = entrypoint.parse::<usize>() {
|
|
program
|
|
.functions
|
|
.get(func_idx)
|
|
.map(|f| f.code_offset as usize)
|
|
.ok_or(VmInitError::EntrypointNotFound)?
|
|
} else {
|
|
// Try to resolve as a symbol name from the exports map
|
|
if let Some(&func_idx) = program.exports.get(entrypoint) {
|
|
program
|
|
.functions
|
|
.get(func_idx as usize)
|
|
.map(|f| f.code_offset as usize)
|
|
.ok_or(VmInitError::EntrypointNotFound)?
|
|
} else {
|
|
return Err(VmInitError::EntrypointNotFound);
|
|
}
|
|
};
|
|
|
|
// Finalize initialization by applying the new program and PC.
|
|
self.program = program;
|
|
self.pc = pc;
|
|
self.halted = false; // Successfully loaded, execution is now possible
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Sets the capability flags for the current program.
|
|
pub fn set_capabilities(&mut self, caps: prometeu_hal::syscalls::CapFlags) {
|
|
self.capabilities = caps;
|
|
}
|
|
|
|
/// Prepares the VM to execute a specific entrypoint by setting the PC and
|
|
/// pushing an initial call frame.
|
|
pub fn prepare_call(&mut self, entrypoint: &str) {
|
|
let func_idx = if let Ok(idx) = entrypoint.parse::<usize>() {
|
|
idx
|
|
} else {
|
|
// Try to resolve as a symbol name
|
|
self.program.exports.get(entrypoint).map(|&idx| idx as usize).ok_or(()).unwrap_or(0) // Default to 0 if not found
|
|
};
|
|
|
|
let callee = self.program.functions.get(func_idx).cloned().unwrap_or_default();
|
|
let addr = callee.code_offset as usize;
|
|
|
|
self.pc = addr;
|
|
self.halted = false;
|
|
|
|
// Pushing a sentinel frame so RET works at the top level.
|
|
// The return address is set to the end of ROM, which will naturally
|
|
// cause the VM to stop after returning from the entrypoint.
|
|
self.operand_stack.clear();
|
|
self.call_stack.clear();
|
|
|
|
// Entrypoint also needs locals allocated.
|
|
// For the sentinel frame, stack_base is always 0.
|
|
if let Some(func) = self.program.functions.get(func_idx) {
|
|
let total_slots = func.param_slots as u32 + func.local_slots as u32;
|
|
for _ in 0..total_slots {
|
|
self.operand_stack.push(Value::Null);
|
|
}
|
|
}
|
|
|
|
self.call_stack.push(CallFrame {
|
|
return_pc: self.program.rom.len() as u32,
|
|
stack_base: 0,
|
|
func_idx,
|
|
});
|
|
|
|
// Initialize the main coroutine object.
|
|
// IMPORTANT INVARIANT:
|
|
// - The RUNNING coroutine's authoritative execution state lives in the VM fields
|
|
// (pc, operand_stack, call_stack).
|
|
// - The heap-side coroutine object is authoritative ONLY when the coroutine is suspended
|
|
// (Ready/Sleeping/Finished). While running, its `stack`/`frames` should be empty.
|
|
//
|
|
// Therefore we do NOT clone the VM stacks into the heap here. We create the main
|
|
// coroutine object with empty stack/frames and mark it as Running, and the VM already
|
|
// holds the live execution context initialized above.
|
|
let main_href = self.heap.allocate_coroutine(
|
|
self.pc,
|
|
CoroutineState::Running,
|
|
0,
|
|
Vec::new(),
|
|
Vec::new(),
|
|
);
|
|
self.current_coro = Some(main_href);
|
|
self.scheduler.set_current(self.current_coro);
|
|
}
|
|
|
|
/// Executes the VM for a limited number of cycles (budget).
|
|
///
|
|
/// This is the heart of the deterministic execution model. Instead of running
|
|
/// indefinitely, the VM runs until it consumes its allocated budget or reaches
|
|
/// a synchronization point (`FRAME_SYNC`).
|
|
///
|
|
/// # Arguments
|
|
/// * `budget` - Maximum number of cycles allowed for this execution slice.
|
|
/// * `native` - Interface for handling syscalls (Firmware/OS).
|
|
/// * `hw` - Access to virtual hardware peripherals.
|
|
pub fn run_budget(
|
|
&mut self,
|
|
budget: u64,
|
|
native: &mut dyn NativeInterface,
|
|
ctx: &mut HostContext,
|
|
) -> Result<BudgetReport, String> {
|
|
let start_cycles = self.cycles;
|
|
let mut steps_executed = 0;
|
|
let mut ending_reason: Option<LogicalFrameEndingReason> = None;
|
|
|
|
while (self.cycles - start_cycles) < budget && !self.halted && self.pc < self.program.rom.len() {
|
|
// Debugger support: stop before executing an instruction if there's a breakpoint.
|
|
// Note: we skip the check for the very first step of a slice to avoid
|
|
// getting stuck on the same breakpoint repeatedly.
|
|
if steps_executed > 0 && self.breakpoints.contains(&self.pc) {
|
|
ending_reason = Some(LogicalFrameEndingReason::Breakpoint);
|
|
break;
|
|
}
|
|
|
|
let pc_before = self.pc;
|
|
let cycles_before = self.cycles;
|
|
|
|
// Execute a single step (Fetch-Decode-Execute)
|
|
if let Err(reason) = self.step(native, ctx) {
|
|
ending_reason = Some(reason);
|
|
break;
|
|
}
|
|
steps_executed += 1;
|
|
|
|
// Integrity check: ensure real progress is being made to avoid infinite loops
|
|
// caused by zero-cycle instructions or stuck PC.
|
|
if self.pc == pc_before && self.cycles == cycles_before && !self.halted {
|
|
ending_reason = Some(LogicalFrameEndingReason::Panic(format!(
|
|
"VM stuck at PC 0x{:08X}",
|
|
self.pc
|
|
)));
|
|
break;
|
|
}
|
|
}
|
|
|
|
// Determine why we stopped if no explicit reason (FrameSync/Breakpoint) was set.
|
|
if ending_reason.is_none() {
|
|
if self.halted {
|
|
ending_reason = Some(LogicalFrameEndingReason::Halted);
|
|
} else if self.pc >= self.program.rom.len() {
|
|
ending_reason = Some(LogicalFrameEndingReason::EndOfRom);
|
|
} else {
|
|
ending_reason = Some(LogicalFrameEndingReason::BudgetExhausted);
|
|
}
|
|
}
|
|
|
|
Ok(BudgetReport {
|
|
cycles_used: self.cycles - start_cycles,
|
|
steps_executed,
|
|
reason: ending_reason.unwrap(),
|
|
})
|
|
}
|
|
|
|
|
|
/// Harness: run exactly `frames` logical frames deterministically.
|
|
///
|
|
/// This repeatedly calls `run_budget` with the provided `budget_per_slice` until
|
|
/// a full logical frame is completed (i.e., a `FrameSync` is observed). If a
|
|
/// terminal condition is reached earlier (Halt/EndOfRom/Panic/Trap/Breakpoint),
|
|
/// the function returns early with all collected slice reports so far.
|
|
pub fn run_frames(
|
|
&mut self,
|
|
frames: u64,
|
|
budget_per_slice: u64,
|
|
native: &mut dyn NativeInterface,
|
|
ctx: &mut HostContext,
|
|
) -> Result<Vec<BudgetReport>, String> {
|
|
assert!(budget_per_slice > 0, "budget_per_slice must be > 0");
|
|
|
|
let mut out = Vec::new();
|
|
let mut frames_done = 0u64;
|
|
while frames_done < frames {
|
|
let rep = self.run_budget(budget_per_slice, native, ctx)?;
|
|
let terminal = matches!(
|
|
rep.reason,
|
|
LogicalFrameEndingReason::Halted
|
|
| LogicalFrameEndingReason::EndOfRom
|
|
| LogicalFrameEndingReason::Panic(_)
|
|
| LogicalFrameEndingReason::Trap(_)
|
|
| LogicalFrameEndingReason::Breakpoint
|
|
);
|
|
|
|
let is_frame_end = matches!(rep.reason, LogicalFrameEndingReason::FrameSync);
|
|
out.push(rep);
|
|
|
|
if terminal {
|
|
break;
|
|
}
|
|
if is_frame_end {
|
|
frames_done += 1;
|
|
}
|
|
}
|
|
Ok(out)
|
|
}
|
|
|
|
/// Harness: alias for `run_frames(frames, ...)`.
|
|
pub fn run_ticks(
|
|
&mut self,
|
|
ticks: u64,
|
|
budget_per_slice: u64,
|
|
native: &mut dyn NativeInterface,
|
|
ctx: &mut HostContext,
|
|
) -> Result<Vec<BudgetReport>, String> {
|
|
self.run_frames(ticks, budget_per_slice, native, ctx)
|
|
}
|
|
|
|
/// Harness: run until HALT/EndOfRom/Panic/Trap/Breakpoint deterministically.
|
|
///
|
|
/// Repeatedly invokes `run_budget` with a fixed `budget_per_slice`, collecting
|
|
/// each slice's report until a terminal condition is reached.
|
|
pub fn run_until_halt(
|
|
&mut self,
|
|
budget_per_slice: u64,
|
|
native: &mut dyn NativeInterface,
|
|
ctx: &mut HostContext,
|
|
) -> Result<Vec<BudgetReport>, String> {
|
|
assert!(budget_per_slice > 0, "budget_per_slice must be > 0");
|
|
|
|
let mut out = Vec::new();
|
|
loop {
|
|
let rep = self.run_budget(budget_per_slice, native, ctx)?;
|
|
let terminal = matches!(
|
|
rep.reason,
|
|
LogicalFrameEndingReason::Halted
|
|
| LogicalFrameEndingReason::EndOfRom
|
|
| LogicalFrameEndingReason::Panic(_)
|
|
| LogicalFrameEndingReason::Trap(_)
|
|
| LogicalFrameEndingReason::Breakpoint
|
|
);
|
|
out.push(rep);
|
|
if terminal { break; }
|
|
}
|
|
Ok(out)
|
|
}
|
|
|
|
|
|
/// Executes a single instruction at the current Program Counter (PC).
|
|
///
|
|
/// This follows the classic CPU cycle:
|
|
/// 1. Fetch: Read the opcode from memory.
|
|
/// 2. Decode: Identify what operation to perform.
|
|
/// 3. Execute: Perform the operation, updating stacks, memory, or calling peripherals.
|
|
pub fn step(
|
|
&mut self,
|
|
native: &mut dyn NativeInterface,
|
|
ctx: &mut HostContext,
|
|
) -> Result<(), LogicalFrameEndingReason> {
|
|
// If there is no currently running coroutine (e.g., all are sleeping),
|
|
// we cannot execute any instruction this frame. End the frame immediately
|
|
// with a safepoint to advance tick and potentially wake sleepers.
|
|
if self.current_coro.is_none() {
|
|
self.cycles += OpCode::FrameSync.cycles();
|
|
self.handle_safepoint();
|
|
return Err(LogicalFrameEndingReason::FrameSync);
|
|
}
|
|
if self.halted || self.pc >= self.program.rom.len() {
|
|
return Ok(());
|
|
}
|
|
|
|
let start_pc = self.pc;
|
|
|
|
// Fetch & Decode
|
|
let instr = prometeu_bytecode::decode_next(self.pc, &self.program.rom)
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
|
|
let opcode = instr.opcode;
|
|
self.pc = instr.next_pc;
|
|
|
|
// Execute
|
|
match opcode {
|
|
OpCode::Nop => {}
|
|
OpCode::Halt => {
|
|
self.halted = true;
|
|
}
|
|
OpCode::Jmp => {
|
|
let target = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
let func_start = self
|
|
.call_stack
|
|
.last()
|
|
.map(|f| self.program.functions[f.func_idx].code_offset as usize)
|
|
.unwrap_or(0);
|
|
self.pc = func_start + target;
|
|
}
|
|
OpCode::JmpIfFalse => {
|
|
let target = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
|
|
match val {
|
|
Value::Boolean(false) => {
|
|
let func_start = self
|
|
.call_stack
|
|
.last()
|
|
.map(|f| self.program.functions[f.func_idx].code_offset as usize)
|
|
.unwrap_or(0);
|
|
self.pc = func_start + target;
|
|
}
|
|
Value::Boolean(true) => {}
|
|
_ => {
|
|
return Err(self.trap(
|
|
TRAP_TYPE,
|
|
opcode as u16,
|
|
format!("Expected boolean for JMP_IF_FALSE, got {:?}", val),
|
|
start_pc as u32,
|
|
));
|
|
}
|
|
}
|
|
}
|
|
OpCode::JmpIfTrue => {
|
|
let target = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
|
|
match val {
|
|
Value::Boolean(true) => {
|
|
let func_start = self
|
|
.call_stack
|
|
.last()
|
|
.map(|f| self.program.functions[f.func_idx].code_offset as usize)
|
|
.unwrap_or(0);
|
|
self.pc = func_start + target;
|
|
}
|
|
Value::Boolean(false) => {}
|
|
_ => {
|
|
return Err(self.trap(
|
|
TRAP_TYPE,
|
|
opcode as u16,
|
|
format!("Expected boolean for JMP_IF_TRUE, got {:?}", val),
|
|
start_pc as u32,
|
|
));
|
|
}
|
|
}
|
|
}
|
|
OpCode::Trap => {
|
|
// Manual breakpoint instruction: consume cycles and signal a breakpoint
|
|
self.cycles += OpCode::Trap.cycles();
|
|
return Err(LogicalFrameEndingReason::Breakpoint);
|
|
}
|
|
OpCode::Spawn => {
|
|
// Operands: (fn_id, arg_count)
|
|
let (fn_id_u32, arg_count_u32) = instr
|
|
.imm_u32x2()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
let fn_id = fn_id_u32 as usize;
|
|
let arg_count = arg_count_u32 as usize;
|
|
|
|
let callee = self.program.functions.get(fn_id).ok_or_else(|| {
|
|
self.trap(
|
|
TRAP_INVALID_FUNC,
|
|
opcode as u16,
|
|
format!("Invalid func_id {} in SPAWN", fn_id),
|
|
start_pc as u32,
|
|
)
|
|
})?;
|
|
|
|
let param_slots: u16 = callee.param_slots;
|
|
let local_slots: u16 = callee.local_slots;
|
|
let entry_pc = callee.code_offset as usize;
|
|
|
|
if arg_count as u16 != param_slots {
|
|
return Err(self.trap(
|
|
TRAP_TYPE,
|
|
opcode as u16,
|
|
format!(
|
|
"SPAWN arg_count mismatch for func {}: expected {}, got {}",
|
|
fn_id, param_slots, arg_count
|
|
),
|
|
start_pc as u32,
|
|
));
|
|
}
|
|
|
|
if self.operand_stack.len() < arg_count {
|
|
return Err(LogicalFrameEndingReason::Panic(format!(
|
|
"Stack underflow during SPAWN to func {}: expected at least {} arguments, got {}",
|
|
fn_id,
|
|
arg_count,
|
|
self.operand_stack.len()
|
|
)));
|
|
}
|
|
|
|
// Pop args top-first, then reverse to logical order arg1..argN
|
|
let mut args: Vec<Value> = Vec::with_capacity(arg_count);
|
|
for _ in 0..arg_count {
|
|
args.push(self.pop().map_err(LogicalFrameEndingReason::Panic)?);
|
|
}
|
|
args.reverse();
|
|
|
|
// Build operand stack for the new coroutine: params followed by zeroed locals
|
|
let mut new_stack: Vec<Value> = Vec::with_capacity((param_slots + local_slots) as usize);
|
|
// Place user args as parameters
|
|
for v in args { new_stack.push(v); }
|
|
// Zero-init locals
|
|
for _ in 0..local_slots { new_stack.push(Value::Null); }
|
|
|
|
// Initial frame for the coroutine (sentinel-like return to end-of-rom)
|
|
let frames = vec![CallFrame { return_pc: self.program.rom.len() as u32, stack_base: 0, func_idx: fn_id }];
|
|
|
|
let href = self.heap.allocate_coroutine(
|
|
entry_pc,
|
|
CoroutineState::Ready,
|
|
0,
|
|
new_stack,
|
|
frames,
|
|
);
|
|
self.scheduler.enqueue_ready(href);
|
|
}
|
|
OpCode::Yield => {
|
|
// Cooperative yield: record intent; actual switching only at FRAME_SYNC.
|
|
self.yield_requested = true;
|
|
// Do not end the slice here; we continue executing until a safepoint.
|
|
}
|
|
OpCode::Sleep => {
|
|
// Immediate is duration in ticks.
|
|
//
|
|
// Canonical semantics:
|
|
// SLEEP N => suspend for exactly N full scheduler ticks AFTER the current
|
|
// FRAME_SYNC completes. If executed at tick T, resume in the frame whose
|
|
// end-of-frame tick will be T + N + 1.
|
|
//
|
|
// Implementation rule:
|
|
// wake_tick = current_tick + duration + 1
|
|
let duration = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))? as u64;
|
|
let wake = self
|
|
.current_tick
|
|
.saturating_add(duration)
|
|
.saturating_add(1);
|
|
self.sleep_requested_until = Some(wake);
|
|
|
|
// End the logical frame right after the instruction completes
|
|
// to ensure no further instructions run until at least next tick.
|
|
self.cycles += OpCode::FrameSync.cycles();
|
|
self.handle_safepoint();
|
|
return Err(LogicalFrameEndingReason::FrameSync);
|
|
}
|
|
OpCode::MakeClosure => {
|
|
// Immediate carries (fn_id, capture_count)
|
|
let (fn_id, cap_count) = instr
|
|
.imm_u32x2()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
|
|
// Pop cap_count values from the operand stack, top-first.
|
|
let mut temp: Vec<Value> = Vec::with_capacity(cap_count as usize);
|
|
for _ in 0..cap_count {
|
|
let v = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
temp.push(v);
|
|
}
|
|
// Preserve order so that env[0] corresponds to captured_1 (the bottom-most
|
|
// among the popped values): reverse the temp vector.
|
|
temp.reverse();
|
|
|
|
// Allocate closure on heap and push its reference.
|
|
let href = self.heap.alloc_closure(fn_id, &temp);
|
|
self.push(Value::HeapRef(href));
|
|
}
|
|
OpCode::CallClosure => {
|
|
// Operand carries the number of user-supplied arguments (arg1..argN).
|
|
let user_arg_count = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
|
|
// Pop the closure reference from the stack (top of stack).
|
|
let clos_val = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
let href = match clos_val {
|
|
Value::HeapRef(h) => h,
|
|
other => {
|
|
return Err(self.trap(
|
|
TRAP_TYPE,
|
|
opcode as u16,
|
|
format!(
|
|
"CALL_CLOSURE expects a closure handle at TOS, got {:?}",
|
|
other
|
|
),
|
|
start_pc as u32,
|
|
))
|
|
}
|
|
};
|
|
|
|
// Validate that the heap object is indeed a Closure.
|
|
let header = self.heap.header(href).ok_or_else(|| {
|
|
self.trap(
|
|
TRAP_OOB,
|
|
opcode as u16,
|
|
format!("Invalid heap handle in CALL_CLOSURE: {:?}", href),
|
|
start_pc as u32,
|
|
)
|
|
})?;
|
|
if header.kind != ObjectKind::Closure {
|
|
return Err(self.trap(
|
|
TRAP_TYPE,
|
|
opcode as u16,
|
|
format!(
|
|
"CALL_CLOSURE on non-closure object kind {:?}",
|
|
header.kind
|
|
),
|
|
start_pc as u32,
|
|
));
|
|
}
|
|
|
|
// Pop user arguments from the operand stack (top-first), then fix order.
|
|
let mut user_args: Vec<Value> = Vec::with_capacity(user_arg_count);
|
|
for _ in 0..user_arg_count {
|
|
user_args.push(self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?);
|
|
}
|
|
user_args.reverse(); // Now in logical order: arg1..argN
|
|
|
|
// Resolve target function id from the closure payload.
|
|
let fn_id = self.heap.closure_fn_id(href).ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic(
|
|
"Internal error: malformed closure object (missing fn_id)".into(),
|
|
)
|
|
})? as usize;
|
|
|
|
let callee = self.program.functions.get(fn_id).ok_or_else(|| {
|
|
self.trap(
|
|
TRAP_INVALID_FUNC,
|
|
opcode as u16,
|
|
format!("Invalid func_id {} from closure", fn_id),
|
|
start_pc as u32,
|
|
)
|
|
})?;
|
|
// Copy required fields to drop the immutable borrow before mutating self
|
|
let callee_param_slots = callee.param_slots as usize;
|
|
let callee_local_slots = callee.local_slots as usize;
|
|
let callee_code_offset = callee.code_offset as usize;
|
|
|
|
// Validate arity: param_slots must equal hidden arg0 + user_arg_count.
|
|
let expected_params = 1usize + user_arg_count;
|
|
if callee_param_slots != expected_params {
|
|
return Err(self.trap(
|
|
TRAP_TYPE,
|
|
opcode as u16,
|
|
format!(
|
|
"CALL_CLOSURE arg_count mismatch: function expects {} total params (including hidden arg0), got hidden+{}",
|
|
callee_param_slots, expected_params
|
|
),
|
|
start_pc as u32,
|
|
));
|
|
}
|
|
|
|
// Prepare the operand stack to match the direct CALL convention:
|
|
// push hidden arg0 (closure_ref) followed by arg1..argN.
|
|
self.push(Value::HeapRef(href));
|
|
for v in user_args.into_iter() { self.push(v); }
|
|
|
|
let stack_base = self
|
|
.operand_stack
|
|
.len()
|
|
.checked_sub(callee_param_slots)
|
|
.ok_or_else(|| LogicalFrameEndingReason::Panic("Stack underflow".into()))?;
|
|
|
|
// Allocate and zero-init local slots
|
|
for _ in 0..callee_local_slots { self.operand_stack.push(Value::Null); }
|
|
|
|
self.call_stack.push(CallFrame { return_pc: self.pc as u32, stack_base, func_idx: fn_id });
|
|
self.pc = callee_code_offset;
|
|
}
|
|
OpCode::PushConst => {
|
|
let idx = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
let val = self.program.constant_pool.get(idx).cloned().ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic("Invalid constant index".into())
|
|
})?;
|
|
self.push(val);
|
|
}
|
|
OpCode::PushI64 => {
|
|
let val = instr
|
|
.imm_i64()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
self.push(Value::Int64(val));
|
|
}
|
|
OpCode::PushI32 => {
|
|
let val = instr
|
|
.imm_i32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
self.push(Value::Int32(val));
|
|
}
|
|
OpCode::PushBounded => {
|
|
let val = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
if val > 0xFFFF {
|
|
return Err(self.trap(
|
|
TRAP_OOB,
|
|
opcode as u16,
|
|
format!("Bounded value overflow: {} > 0xFFFF", val),
|
|
start_pc as u32,
|
|
));
|
|
}
|
|
self.push(Value::Bounded(val));
|
|
}
|
|
OpCode::PushF64 => {
|
|
let val = instr
|
|
.imm_f64()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
self.push(Value::Float(val));
|
|
}
|
|
OpCode::PushBool => {
|
|
let val = instr
|
|
.imm_u8()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
self.push(Value::Boolean(val != 0));
|
|
}
|
|
OpCode::Pop => {
|
|
self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
}
|
|
OpCode::PopN => {
|
|
let n = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
for _ in 0..n {
|
|
self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
}
|
|
}
|
|
OpCode::Dup => {
|
|
let val = self.peek().map_err(|e| LogicalFrameEndingReason::Panic(e))?.clone();
|
|
self.push(val);
|
|
}
|
|
OpCode::Swap => {
|
|
let a = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
let b = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
self.push(a);
|
|
self.push(b);
|
|
}
|
|
OpCode::Add => self.binary_op(opcode, start_pc as u32, |a, b| match (&a, &b) {
|
|
(Value::String(_), _) | (_, Value::String(_)) => {
|
|
Ok(Value::String(format!("{}{}", a.to_string(), b.to_string())))
|
|
}
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a.wrapping_add(*b))),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a.wrapping_add(*b))),
|
|
(Value::Int32(a), Value::Int64(b)) => {
|
|
Ok(Value::Int64((*a as i64).wrapping_add(*b)))
|
|
}
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a.wrapping_add(*b as i64))),
|
|
(Value::Float(a), Value::Float(b)) => Ok(Value::Float(a + b)),
|
|
(Value::Int32(a), Value::Float(b)) => Ok(Value::Float(*a as f64 + b)),
|
|
(Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a + *b as f64)),
|
|
(Value::Int64(a), Value::Float(b)) => Ok(Value::Float(*a as f64 + b)),
|
|
(Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a + *b as f64)),
|
|
(Value::Bounded(a), Value::Bounded(b)) => {
|
|
let res = a.saturating_add(*b);
|
|
if res > 0xFFFF {
|
|
Err(OpError::Trap(
|
|
TRAP_OOB,
|
|
format!("Bounded addition overflow: {} + {} = {}", a, b, res),
|
|
))
|
|
} else {
|
|
Ok(Value::Bounded(res))
|
|
}
|
|
}
|
|
_ => Err(OpError::Panic("Invalid types for ADD".into())),
|
|
})?,
|
|
OpCode::Sub => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a.wrapping_sub(b))),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a.wrapping_sub(b))),
|
|
(Value::Int32(a), Value::Int64(b)) => Ok(Value::Int64((a as i64).wrapping_sub(b))),
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a.wrapping_sub(b as i64))),
|
|
(Value::Float(a), Value::Float(b)) => Ok(Value::Float(a - b)),
|
|
(Value::Int32(a), Value::Float(b)) => Ok(Value::Float(a as f64 - b)),
|
|
(Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a - b as f64)),
|
|
(Value::Int64(a), Value::Float(b)) => Ok(Value::Float(a as f64 - b)),
|
|
(Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a - b as f64)),
|
|
(Value::Bounded(a), Value::Bounded(b)) => {
|
|
if a < b {
|
|
Err(OpError::Trap(
|
|
TRAP_OOB,
|
|
format!("Bounded subtraction underflow: {} - {} < 0", a, b),
|
|
))
|
|
} else {
|
|
Ok(Value::Bounded(a - b))
|
|
}
|
|
}
|
|
_ => Err(OpError::Panic("Invalid types for SUB".into())),
|
|
})?,
|
|
OpCode::Mul => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a.wrapping_mul(b))),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a.wrapping_mul(b))),
|
|
(Value::Int32(a), Value::Int64(b)) => Ok(Value::Int64((a as i64).wrapping_mul(b))),
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a.wrapping_mul(b as i64))),
|
|
(Value::Float(a), Value::Float(b)) => Ok(Value::Float(a * b)),
|
|
(Value::Int32(a), Value::Float(b)) => Ok(Value::Float(a as f64 * b)),
|
|
(Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a * b as f64)),
|
|
(Value::Int64(a), Value::Float(b)) => Ok(Value::Float(a as f64 * b)),
|
|
(Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a * b as f64)),
|
|
(Value::Bounded(a), Value::Bounded(b)) => {
|
|
let res = a as u64 * b as u64;
|
|
if res > 0xFFFF {
|
|
Err(OpError::Trap(
|
|
TRAP_OOB,
|
|
format!("Bounded multiplication overflow: {} * {} = {}", a, b, res),
|
|
))
|
|
} else {
|
|
Ok(Value::Bounded(res as u32))
|
|
}
|
|
}
|
|
_ => Err(OpError::Panic("Invalid types for MUL".into())),
|
|
})?,
|
|
OpCode::Div => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(
|
|
TRAP_DIV_ZERO,
|
|
"Integer division by zero".into(),
|
|
));
|
|
}
|
|
Ok(Value::Int32(a / b))
|
|
}
|
|
(Value::Int64(a), Value::Int64(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(
|
|
TRAP_DIV_ZERO,
|
|
"Integer division by zero".into(),
|
|
));
|
|
}
|
|
Ok(Value::Int64(a / b))
|
|
}
|
|
(Value::Int32(a), Value::Int64(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(
|
|
TRAP_DIV_ZERO,
|
|
"Integer division by zero".into(),
|
|
));
|
|
}
|
|
Ok(Value::Int64(a as i64 / b))
|
|
}
|
|
(Value::Int64(a), Value::Int32(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(
|
|
TRAP_DIV_ZERO,
|
|
"Integer division by zero".into(),
|
|
));
|
|
}
|
|
Ok(Value::Int64(a / b as i64))
|
|
}
|
|
(Value::Float(a), Value::Float(b)) => {
|
|
if b == 0.0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Float division by zero".into()));
|
|
}
|
|
Ok(Value::Float(a / b))
|
|
}
|
|
(Value::Int32(a), Value::Float(b)) => {
|
|
if b == 0.0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Float division by zero".into()));
|
|
}
|
|
Ok(Value::Float(a as f64 / b))
|
|
}
|
|
(Value::Float(a), Value::Int32(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Float division by zero".into()));
|
|
}
|
|
Ok(Value::Float(a / b as f64))
|
|
}
|
|
(Value::Int64(a), Value::Float(b)) => {
|
|
if b == 0.0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Float division by zero".into()));
|
|
}
|
|
Ok(Value::Float(a as f64 / b))
|
|
}
|
|
(Value::Float(a), Value::Int64(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Float division by zero".into()));
|
|
}
|
|
Ok(Value::Float(a / b as f64))
|
|
}
|
|
(Value::Bounded(a), Value::Bounded(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(
|
|
TRAP_DIV_ZERO,
|
|
"Bounded division by zero".into(),
|
|
));
|
|
}
|
|
Ok(Value::Bounded(a / b))
|
|
}
|
|
_ => Err(OpError::Panic("Invalid types for DIV".into())),
|
|
})?,
|
|
OpCode::Mod => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Integer modulo by zero".into()));
|
|
}
|
|
Ok(Value::Int32(a % b))
|
|
}
|
|
(Value::Int64(a), Value::Int64(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Integer modulo by zero".into()));
|
|
}
|
|
Ok(Value::Int64(a % b))
|
|
}
|
|
(Value::Bounded(a), Value::Bounded(b)) => {
|
|
if b == 0 {
|
|
return Err(OpError::Trap(TRAP_DIV_ZERO, "Bounded modulo by zero".into()));
|
|
}
|
|
Ok(Value::Bounded(a % b))
|
|
}
|
|
_ => Err(OpError::Panic("Invalid types for MOD".into())),
|
|
})?,
|
|
OpCode::BoundToInt => {
|
|
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
|
|
if let Value::Bounded(b) = val {
|
|
self.push(Value::Int64(b as i64));
|
|
} else {
|
|
return Err(LogicalFrameEndingReason::Panic(
|
|
"Expected bounded for BOUND_TO_INT".into(),
|
|
));
|
|
}
|
|
}
|
|
OpCode::IntToBoundChecked => {
|
|
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
|
|
let int_val = val.as_integer().ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic(
|
|
"Expected integer for INT_TO_BOUND_CHECKED".into(),
|
|
)
|
|
})?;
|
|
if !(0..=0xFFFF).contains(&int_val) {
|
|
return Err(self.trap(
|
|
TRAP_OOB,
|
|
OpCode::IntToBoundChecked as u16,
|
|
format!("Integer to bounded conversion out of range: {}", int_val),
|
|
start_pc as u32,
|
|
));
|
|
}
|
|
self.push(Value::Bounded(int_val as u32));
|
|
}
|
|
OpCode::Eq => {
|
|
self.binary_op(opcode, start_pc as u32, |a, b| Ok(Value::Boolean(a == b)))?
|
|
}
|
|
OpCode::Neq => {
|
|
self.binary_op(opcode, start_pc as u32, |a, b| Ok(Value::Boolean(a != b)))?
|
|
}
|
|
OpCode::Lt => self.binary_op(opcode, start_pc as u32, |a, b| {
|
|
a.partial_cmp(&b)
|
|
.map(|o| Value::Boolean(o == std::cmp::Ordering::Less))
|
|
.ok_or_else(|| OpError::Panic("Invalid types for LT".into()))
|
|
})?,
|
|
OpCode::Gt => self.binary_op(opcode, start_pc as u32, |a, b| {
|
|
a.partial_cmp(&b)
|
|
.map(|o| Value::Boolean(o == std::cmp::Ordering::Greater))
|
|
.ok_or_else(|| OpError::Panic("Invalid types for GT".into()))
|
|
})?,
|
|
OpCode::Lte => self.binary_op(opcode, start_pc as u32, |a, b| {
|
|
a.partial_cmp(&b)
|
|
.map(|o| Value::Boolean(o != std::cmp::Ordering::Greater))
|
|
.ok_or_else(|| OpError::Panic("Invalid types for LTE".into()))
|
|
})?,
|
|
OpCode::Gte => self.binary_op(opcode, start_pc as u32, |a, b| {
|
|
a.partial_cmp(&b)
|
|
.map(|o| Value::Boolean(o != std::cmp::Ordering::Less))
|
|
.ok_or_else(|| OpError::Panic("Invalid types for GTE".into()))
|
|
})?,
|
|
OpCode::And => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Boolean(a), Value::Boolean(b)) => Ok(Value::Boolean(a && b)),
|
|
_ => Err(OpError::Panic("Invalid types for AND".into())),
|
|
})?,
|
|
OpCode::Or => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Boolean(a), Value::Boolean(b)) => Ok(Value::Boolean(a || b)),
|
|
_ => Err(OpError::Panic("Invalid types for OR".into())),
|
|
})?,
|
|
OpCode::Not => {
|
|
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
|
|
if let Value::Boolean(b) = val {
|
|
self.push(Value::Boolean(!b));
|
|
} else {
|
|
return Err(LogicalFrameEndingReason::Panic("Invalid type for NOT".into()));
|
|
}
|
|
}
|
|
OpCode::BitAnd => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a & b)),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a & b)),
|
|
(Value::Int32(a), Value::Int64(b)) => Ok(Value::Int64((a as i64) & b)),
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a & (b as i64))),
|
|
_ => Err(OpError::Panic("Invalid types for BitAnd".into())),
|
|
})?,
|
|
OpCode::BitOr => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a | b)),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a | b)),
|
|
(Value::Int32(a), Value::Int64(b)) => Ok(Value::Int64((a as i64) | b)),
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a | (b as i64))),
|
|
_ => Err(OpError::Panic("Invalid types for BitOr".into())),
|
|
})?,
|
|
OpCode::BitXor => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a ^ b)),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a ^ b)),
|
|
(Value::Int32(a), Value::Int64(b)) => Ok(Value::Int64((a as i64) ^ b)),
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a ^ (b as i64))),
|
|
_ => Err(OpError::Panic("Invalid types for BitXor".into())),
|
|
})?,
|
|
OpCode::Shl => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a.wrapping_shl(b as u32))),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a.wrapping_shl(b as u32))),
|
|
(Value::Int32(a), Value::Int64(b)) => {
|
|
Ok(Value::Int64((a as i64).wrapping_shl(b as u32)))
|
|
}
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a.wrapping_shl(b as u32))),
|
|
_ => Err(OpError::Panic("Invalid types for Shl".into())),
|
|
})?,
|
|
OpCode::Shr => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
|
|
(Value::Int32(a), Value::Int32(b)) => Ok(Value::Int32(a.wrapping_shr(b as u32))),
|
|
(Value::Int64(a), Value::Int64(b)) => Ok(Value::Int64(a.wrapping_shr(b as u32))),
|
|
(Value::Int32(a), Value::Int64(b)) => {
|
|
Ok(Value::Int64((a as i64).wrapping_shr(b as u32)))
|
|
}
|
|
(Value::Int64(a), Value::Int32(b)) => Ok(Value::Int64(a.wrapping_shr(b as u32))),
|
|
_ => Err(OpError::Panic("Invalid types for Shr".into())),
|
|
})?,
|
|
OpCode::Neg => {
|
|
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
|
|
match val {
|
|
Value::Int32(a) => self.push(Value::Int32(a.wrapping_neg())),
|
|
Value::Int64(a) => self.push(Value::Int64(a.wrapping_neg())),
|
|
Value::Float(a) => self.push(Value::Float(-a)),
|
|
_ => {
|
|
return Err(LogicalFrameEndingReason::Panic("Invalid type for Neg".into()));
|
|
}
|
|
}
|
|
}
|
|
OpCode::GetGlobal => {
|
|
let idx = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
let val = self.globals.get(idx).cloned().ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic("Invalid global index".into())
|
|
})?;
|
|
self.push(val);
|
|
}
|
|
OpCode::SetGlobal => {
|
|
let idx = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
let val = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
if idx >= self.globals.len() {
|
|
self.globals.resize(idx + 1, Value::Null);
|
|
}
|
|
self.globals[idx] = val;
|
|
}
|
|
OpCode::GetLocal => {
|
|
let slot = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
let frame = self.call_stack.last().ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic("No active call frame".into())
|
|
})?;
|
|
let func = &self.program.functions[frame.func_idx];
|
|
|
|
crate::local_addressing::check_local_slot(
|
|
func,
|
|
slot,
|
|
opcode as u16,
|
|
start_pc as u32,
|
|
)
|
|
.map_err(|trap_info| {
|
|
self.trap(trap_info.code, trap_info.opcode, trap_info.message, trap_info.pc)
|
|
})?;
|
|
|
|
let stack_idx = crate::local_addressing::local_index(frame, slot);
|
|
let val = self.operand_stack.get(stack_idx).cloned().ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic(
|
|
"Internal error: validated local slot not found in stack".into(),
|
|
)
|
|
})?;
|
|
self.push(val);
|
|
}
|
|
OpCode::SetLocal => {
|
|
let slot = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
let val = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
let frame = self.call_stack.last().ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic("No active call frame".into())
|
|
})?;
|
|
let func = &self.program.functions[frame.func_idx];
|
|
|
|
crate::local_addressing::check_local_slot(
|
|
func,
|
|
slot,
|
|
opcode as u16,
|
|
start_pc as u32,
|
|
)
|
|
.map_err(|trap_info| {
|
|
self.trap(trap_info.code, trap_info.opcode, trap_info.message, trap_info.pc)
|
|
})?;
|
|
|
|
let stack_idx = crate::local_addressing::local_index(frame, slot);
|
|
self.operand_stack[stack_idx] = val;
|
|
}
|
|
OpCode::Call => {
|
|
let func_id = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
|
|
as usize;
|
|
let callee = self.program.functions.get(func_id).ok_or_else(|| {
|
|
self.trap(
|
|
TRAP_INVALID_FUNC,
|
|
opcode as u16,
|
|
format!("Invalid func_id {}", func_id),
|
|
start_pc as u32,
|
|
)
|
|
})?;
|
|
|
|
if self.operand_stack.len() < callee.param_slots as usize {
|
|
return Err(LogicalFrameEndingReason::Panic(format!(
|
|
"Stack underflow during CALL to func {}: expected at least {} arguments, got {}",
|
|
func_id,
|
|
callee.param_slots,
|
|
self.operand_stack.len()
|
|
)));
|
|
}
|
|
|
|
let stack_base = self.operand_stack.len() - callee.param_slots as usize;
|
|
|
|
// Allocate and zero-init local_slots
|
|
for _ in 0..callee.local_slots {
|
|
self.operand_stack.push(Value::Null);
|
|
}
|
|
|
|
self.call_stack.push(CallFrame {
|
|
return_pc: self.pc as u32,
|
|
stack_base,
|
|
func_idx: func_id,
|
|
});
|
|
self.pc = callee.code_offset as usize;
|
|
}
|
|
OpCode::Ret => {
|
|
let frame = self.call_stack.pop().ok_or_else(|| {
|
|
LogicalFrameEndingReason::Panic("Call stack underflow".into())
|
|
})?;
|
|
let func = &self.program.functions[frame.func_idx];
|
|
let return_slots = func.return_slots as usize;
|
|
|
|
let current_height = self.operand_stack.len();
|
|
let expected_height = frame.stack_base
|
|
+ func.param_slots as usize
|
|
+ func.local_slots as usize
|
|
+ return_slots;
|
|
|
|
if current_height != expected_height {
|
|
return Err(self.trap(TRAP_BAD_RET_SLOTS, opcode as u16, format!(
|
|
"Incorrect stack height at RET in func {}: expected {} slots (stack_base={} + params={} + locals={} + returns={}), got {}",
|
|
frame.func_idx, expected_height, frame.stack_base, func.param_slots, func.local_slots, return_slots, current_height
|
|
), start_pc as u32));
|
|
}
|
|
|
|
// Copy return values (preserving order: pop return_slots values, then reverse to push back)
|
|
let mut return_vals = Vec::with_capacity(return_slots);
|
|
for _ in 0..return_slots {
|
|
return_vals.push(self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?);
|
|
}
|
|
return_vals.reverse();
|
|
|
|
self.operand_stack.truncate(frame.stack_base);
|
|
for val in return_vals {
|
|
self.push(val);
|
|
}
|
|
self.pc = frame.return_pc as usize;
|
|
}
|
|
OpCode::Syscall => {
|
|
let pc_at_syscall = start_pc as u32;
|
|
let id = instr
|
|
.imm_u32()
|
|
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
|
|
let syscall = prometeu_hal::syscalls::Syscall::from_u32(id).ok_or_else(|| {
|
|
self.trap(
|
|
TRAP_INVALID_SYSCALL,
|
|
OpCode::Syscall as u16,
|
|
format!("Unknown syscall: 0x{:08X}", id),
|
|
pc_at_syscall,
|
|
)
|
|
})?;
|
|
|
|
// Capability check before any side effects or argument consumption.
|
|
let meta = prometeu_hal::syscalls::meta_for(syscall);
|
|
if (self.capabilities & meta.caps) != meta.caps {
|
|
return Err(self.trap(
|
|
TRAP_INVALID_SYSCALL,
|
|
OpCode::Syscall as u16,
|
|
format!(
|
|
"Missing capability for syscall {} (required=0x{:X})",
|
|
syscall.name(), meta.caps
|
|
),
|
|
pc_at_syscall,
|
|
));
|
|
}
|
|
|
|
let args_count = syscall.args_count();
|
|
|
|
let mut args = Vec::with_capacity(args_count);
|
|
for _ in 0..args_count {
|
|
let v = self.pop().map_err(|_e| {
|
|
self.trap(
|
|
TRAP_STACK_UNDERFLOW,
|
|
OpCode::Syscall as u16,
|
|
"Syscall argument stack underflow".to_string(),
|
|
pc_at_syscall,
|
|
)
|
|
})?;
|
|
args.push(v);
|
|
}
|
|
args.reverse();
|
|
|
|
let stack_height_before = self.operand_stack.len();
|
|
let mut ret = crate::HostReturn::new(&mut self.operand_stack);
|
|
native.syscall(id, &args, &mut ret, ctx).map_err(|fault| match fault {
|
|
VmFault::Trap(code, msg) => {
|
|
self.trap(code, OpCode::Syscall as u16, msg, pc_at_syscall)
|
|
}
|
|
VmFault::Panic(msg) => LogicalFrameEndingReason::Panic(msg),
|
|
VmFault::Unavailable => {
|
|
LogicalFrameEndingReason::Panic("Host feature unavailable".into())
|
|
}
|
|
})?;
|
|
|
|
let stack_height_after = self.operand_stack.len();
|
|
let results_pushed = stack_height_after - stack_height_before;
|
|
if results_pushed != syscall.results_count() {
|
|
return Err(LogicalFrameEndingReason::Panic(format!(
|
|
"Syscall {} (0x{:08X}) results mismatch: expected {}, got {}",
|
|
syscall.name(),
|
|
id,
|
|
syscall.results_count(),
|
|
results_pushed
|
|
)));
|
|
}
|
|
}
|
|
OpCode::FrameSync => {
|
|
// Marks the logical end of a frame: consume cycles and signal to the driver
|
|
self.cycles += OpCode::FrameSync.cycles();
|
|
self.handle_safepoint();
|
|
return Err(LogicalFrameEndingReason::FrameSync);
|
|
}
|
|
}
|
|
|
|
// Apply the instruction cost to the cycle counter
|
|
self.cycles += opcode.cycles();
|
|
Ok(())
|
|
}
|
|
|
|
/// Perform safepoint duties that occur at logical frame boundaries.
|
|
/// Runs GC if thresholds are reached, clears cooperative yield flag,
|
|
/// and advances the logical tick counter.
|
|
fn handle_safepoint(&mut self) {
|
|
// 1) GC Safepoint: only at FRAME_SYNC-like boundaries
|
|
if self.gc_alloc_threshold > 0 {
|
|
let live_now = self.heap.len();
|
|
let since_last = live_now.saturating_sub(self.last_gc_live_count);
|
|
if since_last >= self.gc_alloc_threshold {
|
|
// Collect GC roots from VM state
|
|
struct CollectRoots(Vec<prometeu_bytecode::HeapRef>);
|
|
impl crate::roots::RootVisitor for CollectRoots {
|
|
fn visit_heap_ref(&mut self, r: prometeu_bytecode::HeapRef) { self.0.push(r); }
|
|
}
|
|
let mut collector = CollectRoots(Vec::new());
|
|
self.visit_roots(&mut collector);
|
|
// Add current coroutine and all suspended (ready/sleeping) coroutines as GC roots
|
|
if let Some(cur) = self.current_coro { collector.0.push(cur); }
|
|
let mut coro_roots = self.heap.suspended_coroutine_handles();
|
|
collector.0.append(&mut coro_roots);
|
|
|
|
// Run mark-sweep
|
|
self.heap.mark_from_roots(collector.0);
|
|
self.heap.sweep();
|
|
// Update baseline for next cycles
|
|
self.last_gc_live_count = self.heap.len();
|
|
}
|
|
}
|
|
|
|
// 2) Advance logical tick and wake sleepers
|
|
self.current_tick = self.current_tick.wrapping_add(1);
|
|
self.scheduler.wake_ready(self.current_tick);
|
|
|
|
// 3) Apply pending transitions for the current coroutine (yield/sleep/finished)
|
|
let mut switched_out = false;
|
|
if let Some(cur) = self.current_coro {
|
|
// Handle sleep request
|
|
if let Some(wake) = self.sleep_requested_until.take() {
|
|
if let Some(co) = self.heap.coroutine_data_mut(cur) {
|
|
// Save execution context into the coroutine object
|
|
co.pc = self.pc;
|
|
co.stack = std::mem::take(&mut self.operand_stack);
|
|
co.frames = std::mem::take(&mut self.call_stack);
|
|
co.state = CoroutineState::Sleeping;
|
|
co.wake_tick = wake;
|
|
}
|
|
self.scheduler.sleep_until(cur, wake);
|
|
self.current_coro = None;
|
|
self.scheduler.clear_current();
|
|
switched_out = true;
|
|
} else if self.yield_requested {
|
|
if let Some(co) = self.heap.coroutine_data_mut(cur) {
|
|
co.pc = self.pc;
|
|
co.stack = std::mem::take(&mut self.operand_stack);
|
|
co.frames = std::mem::take(&mut self.call_stack);
|
|
co.state = CoroutineState::Ready;
|
|
}
|
|
self.scheduler.enqueue_ready(cur);
|
|
self.current_coro = None;
|
|
self.scheduler.clear_current();
|
|
switched_out = true;
|
|
} else if self.halted || self.pc >= self.program.rom.len() {
|
|
// Current finished; save final context and mark Finished
|
|
if let Some(co) = self.heap.coroutine_data_mut(cur) {
|
|
co.pc = self.pc;
|
|
co.stack = std::mem::take(&mut self.operand_stack);
|
|
co.frames = std::mem::take(&mut self.call_stack);
|
|
co.state = CoroutineState::Finished;
|
|
}
|
|
self.current_coro = None;
|
|
self.scheduler.clear_current();
|
|
switched_out = true;
|
|
} else {
|
|
// Stays running; nothing to do
|
|
}
|
|
}
|
|
|
|
// 4) Select next coroutine if needed
|
|
if self.current_coro.is_none() {
|
|
if let Some(next) = self.scheduler.dequeue_next() {
|
|
// Load next context into the VM
|
|
if let Some(co) = self.heap.coroutine_data_mut(next) {
|
|
self.pc = co.pc;
|
|
self.operand_stack = std::mem::take(&mut co.stack);
|
|
self.call_stack = std::mem::take(&mut co.frames);
|
|
co.state = CoroutineState::Running;
|
|
}
|
|
self.current_coro = Some(next);
|
|
self.scheduler.set_current(self.current_coro);
|
|
} else {
|
|
// Nothing ready now. If there are sleeping coroutines, we keep VM idle until next frame tick.
|
|
// If there are no sleeping coroutines either (i.e., all finished), we can halt deterministically.
|
|
if switched_out && !self.scheduler.has_sleeping() {
|
|
self.halted = true;
|
|
}
|
|
}
|
|
} else {
|
|
// Keep current as scheduler current for observability
|
|
self.scheduler.set_current(self.current_coro);
|
|
}
|
|
|
|
// 5) Clear cooperative yield request at the safepoint boundary.
|
|
self.yield_requested = false;
|
|
}
|
|
|
|
// /// Save the currently running VM execution context back into its coroutine object.
|
|
// /// Must be called only at safepoints.
|
|
// fn save_current_context_into_coroutine(&mut self) {
|
|
// if let Some(cur) = self.current_coro {
|
|
// if let Some(co) = self.heap.coroutine_data_mut(cur) {
|
|
// co.pc = self.pc;
|
|
// co.stack = std::mem::take(&mut self.operand_stack);
|
|
// co.frames = std::mem::take(&mut self.call_stack);
|
|
// }
|
|
// }
|
|
// }
|
|
|
|
// /// Load a coroutine context from heap into the VM runtime state.
|
|
// /// Must be called only at safepoints.
|
|
// fn load_coroutine_context_into_vm(&mut self, coro: HeapRef) {
|
|
// if let Some(co) = self.heap.coroutine_data_mut(coro) {
|
|
// self.pc = co.pc;
|
|
// self.operand_stack = std::mem::take(&mut co.stack);
|
|
// self.call_stack = std::mem::take(&mut co.frames);
|
|
// co.state = CoroutineState::Running;
|
|
// }
|
|
// self.current_coro = Some(coro);
|
|
// self.scheduler.set_current(self.current_coro);
|
|
// }
|
|
|
|
pub fn trap(
|
|
&self,
|
|
code: u32,
|
|
opcode: u16,
|
|
message: String,
|
|
pc: u32,
|
|
) -> LogicalFrameEndingReason {
|
|
LogicalFrameEndingReason::Trap(self.program.create_trap(code, opcode, message, pc))
|
|
}
|
|
|
|
pub fn push(&mut self, val: Value) {
|
|
self.operand_stack.push(val);
|
|
}
|
|
|
|
pub fn pop(&mut self) -> Result<Value, String> {
|
|
self.operand_stack.pop().ok_or("Stack underflow".into())
|
|
}
|
|
|
|
pub fn pop_number(&mut self) -> Result<f64, String> {
|
|
let val = self.pop()?;
|
|
val.as_float().ok_or_else(|| "Expected number".into())
|
|
}
|
|
|
|
pub fn pop_integer(&mut self) -> Result<i64, String> {
|
|
let val = self.pop()?;
|
|
if let Value::Boolean(b) = val {
|
|
return Ok(if b { 1 } else { 0 });
|
|
}
|
|
val.as_integer().ok_or_else(|| "Expected integer".into())
|
|
}
|
|
|
|
pub fn peek(&self) -> Result<&Value, String> {
|
|
self.operand_stack.last().ok_or("Stack underflow".into())
|
|
}
|
|
|
|
fn binary_op<F>(
|
|
&mut self,
|
|
opcode: OpCode,
|
|
start_pc: u32,
|
|
f: F,
|
|
) -> Result<(), LogicalFrameEndingReason>
|
|
where
|
|
F: FnOnce(Value, Value) -> Result<Value, OpError>,
|
|
{
|
|
let b = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
let a = self.pop().map_err(|e| LogicalFrameEndingReason::Panic(e))?;
|
|
match f(a, b) {
|
|
Ok(res) => {
|
|
self.push(res);
|
|
Ok(())
|
|
}
|
|
Err(OpError::Trap(code, msg)) => Err(self.trap(code, opcode as u16, msg, start_pc)),
|
|
Err(OpError::Panic(msg)) => Err(LogicalFrameEndingReason::Panic(msg)),
|
|
}
|
|
}
|
|
|
|
/// Visit all GC roots reachable from the VM state.
|
|
/// This includes:
|
|
/// - Entire operand stack values
|
|
/// - Locals/args in each call frame (derived from `stack_base` and function layout)
|
|
/// - Global variables
|
|
pub fn visit_roots<V: RootVisitor + ?Sized>(&self, visitor: &mut V) {
|
|
// 1) Operand stack (all values are roots)
|
|
for v in &self.operand_stack {
|
|
visit_value_for_roots(v, visitor);
|
|
}
|
|
|
|
// 2) Call frames: iterate locals/args range for each frame
|
|
for frame in &self.call_stack {
|
|
if let Some(func_meta) = self.program.functions.get(frame.func_idx) {
|
|
let start = frame.stack_base;
|
|
let frame_slots = (func_meta.param_slots as usize) + (func_meta.local_slots as usize);
|
|
let mut end = start.saturating_add(frame_slots);
|
|
// Clamp to current stack height just in case
|
|
if end > self.operand_stack.len() { end = self.operand_stack.len(); }
|
|
for i in start..end {
|
|
if let Some(v) = self.operand_stack.get(i) {
|
|
visit_value_for_roots(v, visitor);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// 3) Globals
|
|
for g in &self.globals {
|
|
visit_value_for_roots(g, visitor);
|
|
}
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
fn new_test_vm(rom: Vec<u8>, constant_pool: Vec<Value>) -> VirtualMachine {
|
|
let rom_len = rom.len() as u32;
|
|
let mut vm = VirtualMachine::new(rom, constant_pool);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom_len,
|
|
..Default::default()
|
|
}]);
|
|
// Ensure tests start with a properly initialized main coroutine at func 0
|
|
vm.prepare_call("0");
|
|
vm
|
|
}
|
|
use crate::HostReturn;
|
|
use prometeu_bytecode::{FunctionMeta, TRAP_INVALID_LOCAL, TRAP_STACK_UNDERFLOW};
|
|
use prometeu_hal::expect_int;
|
|
|
|
struct MockNative;
|
|
impl NativeInterface for MockNative {
|
|
fn syscall(
|
|
&mut self,
|
|
_id: u32,
|
|
_args: &[Value],
|
|
_ret: &mut HostReturn,
|
|
_ctx: &mut HostContext,
|
|
) -> Result<(), VmFault> {
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn sleep_delays_execution_by_ticks() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Program:
|
|
// SLEEP 2
|
|
// PUSH_I32 123
|
|
// FRAME_SYNC
|
|
// HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(2u32).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&123i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom, vec![]);
|
|
|
|
// Frame 1: executing SLEEP 2 will force a frame end and advance tick to 1
|
|
let rep1 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok");
|
|
assert!(matches!(rep1.reason, LogicalFrameEndingReason::FrameSync));
|
|
assert!(vm.operand_stack.is_empty());
|
|
assert_eq!(vm.current_tick, 1);
|
|
|
|
// Frame 2: still sleeping (tick 1 < wake 3), immediate FrameSync, tick -> 2
|
|
let rep2 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok");
|
|
assert!(matches!(rep2.reason, LogicalFrameEndingReason::FrameSync));
|
|
// In the per-coroutine model, the VM may keep current context intact across idle frames;
|
|
// we must not observe any new values pushed before wake. Stack height must be unchanged.
|
|
assert_eq!(vm.operand_stack.len(), 0);
|
|
assert_eq!(vm.current_tick, 2);
|
|
|
|
// Frame 3: still sleeping (tick 2 < wake 3), immediate FrameSync, tick -> 3
|
|
let rep3 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok");
|
|
assert!(matches!(rep3.reason, LogicalFrameEndingReason::FrameSync));
|
|
assert_eq!(vm.operand_stack.len(), 0);
|
|
assert_eq!(vm.current_tick, 3);
|
|
|
|
// Frame 4: wake condition met (current_tick >= wake), execute PUSH_I32 then FRAME_SYNC
|
|
let rep4 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok");
|
|
assert!(matches!(rep4.reason, LogicalFrameEndingReason::FrameSync));
|
|
// Value should now be on the stack
|
|
assert_eq!(vm.peek().unwrap(), &Value::Int32(123));
|
|
|
|
// Next frame should hit HALT without errors
|
|
let res = vm.run_budget(100, &mut native, &mut ctx);
|
|
assert!(res.is_ok());
|
|
}
|
|
|
|
#[test]
|
|
fn test_deterministic_pc_and_tick_trace_across_runs() {
|
|
// Program:
|
|
// PUSH_I32 1; YIELD; FrameSync;
|
|
// PUSH_I32 2; YIELD; FrameSync;
|
|
// PUSH_I32 3; FrameSync; HALT
|
|
// We collect (pc, tick, stack_height) after each run_budget slice and
|
|
// compare two independent VMs initialized from the same ROM.
|
|
let mut rom = Vec::new();
|
|
// PUSH 1
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1i32.to_le_bytes());
|
|
// YIELD + FrameSync
|
|
rom.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
// PUSH 2
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&2i32.to_le_bytes());
|
|
// YIELD + FrameSync
|
|
rom.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
// PUSH 3 + FrameSync + HALT
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&3i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm1 = new_test_vm(rom.clone(), vec![]);
|
|
let mut vm2 = new_test_vm(rom.clone(), vec![]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx1 = HostContext::new(None);
|
|
let mut ctx2 = HostContext::new(None);
|
|
|
|
let mut trace1 = Vec::new();
|
|
let mut trace2 = Vec::new();
|
|
|
|
// Run both VMs in lockstep slices until both halt
|
|
for _ in 0..10 {
|
|
if !vm1.halted {
|
|
let rep = vm1.run_budget(4, &mut native, &mut ctx1).expect("vm1 ok");
|
|
trace1.push((vm1.pc, vm1.current_tick, vm1.operand_stack.len(), format!("{:?}", rep.reason)));
|
|
}
|
|
if !vm2.halted {
|
|
let rep = vm2.run_budget(4, &mut native, &mut ctx2).expect("vm2 ok");
|
|
trace2.push((vm2.pc, vm2.current_tick, vm2.operand_stack.len(), format!("{:?}", rep.reason)));
|
|
}
|
|
if vm1.halted && vm2.halted { break; }
|
|
}
|
|
|
|
assert!(vm1.halted && vm2.halted, "Both VMs should reach HALT deterministically");
|
|
assert_eq!(trace1, trace2, "Per-slice traces must be identical across runs");
|
|
// Also verify final stack content deterministic
|
|
assert_eq!(vm1.pop().unwrap(), Value::Int32(3));
|
|
assert_eq!(vm1.pop().unwrap(), Value::Int32(2));
|
|
assert_eq!(vm1.pop().unwrap(), Value::Int32(1));
|
|
assert!(vm1.operand_stack.is_empty());
|
|
assert_eq!(vm2.pop().unwrap(), Value::Int32(3));
|
|
assert_eq!(vm2.pop().unwrap(), Value::Int32(2));
|
|
assert_eq!(vm2.pop().unwrap(), Value::Int32(1));
|
|
assert!(vm2.operand_stack.is_empty());
|
|
}
|
|
|
|
#[test]
|
|
fn test_sleep_wake_determinism_across_runs() {
|
|
// Program:
|
|
// SLEEP 2; PUSH 7; FrameSync; HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(2u32).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&7i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut native = MockNative;
|
|
let mut vm_a = new_test_vm(rom.clone(), vec![]);
|
|
let mut vm_b = new_test_vm(rom.clone(), vec![]);
|
|
let mut ctx_a = HostContext::new(None);
|
|
let mut ctx_b = HostContext::new(None);
|
|
|
|
let mut ticks_a = Vec::new();
|
|
let mut ticks_b = Vec::new();
|
|
|
|
// Slice 1
|
|
let ra1 = vm_a.run_budget(100, &mut native, &mut ctx_a).unwrap();
|
|
let rb1 = vm_b.run_budget(100, &mut native, &mut ctx_b).unwrap();
|
|
ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra1.reason)));
|
|
ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb1.reason)));
|
|
|
|
// Slice 2
|
|
let ra2 = vm_a.run_budget(100, &mut native, &mut ctx_a).unwrap();
|
|
let rb2 = vm_b.run_budget(100, &mut native, &mut ctx_b).unwrap();
|
|
ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra2.reason)));
|
|
ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb2.reason)));
|
|
|
|
// Slice 3
|
|
let ra3 = vm_a.run_budget(100, &mut native, &mut ctx_a).unwrap();
|
|
let rb3 = vm_b.run_budget(100, &mut native, &mut ctx_b).unwrap();
|
|
ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra3.reason)));
|
|
ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb3.reason)));
|
|
|
|
// Slice 4 (wakes and pushes)
|
|
let ra4 = vm_a.run_budget(100, &mut native, &mut ctx_a).unwrap();
|
|
let rb4 = vm_b.run_budget(100, &mut native, &mut ctx_b).unwrap();
|
|
ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra4.reason)));
|
|
ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb4.reason)));
|
|
|
|
assert_eq!(ticks_a, ticks_b, "Sleep/wake slices must match across runs");
|
|
assert_eq!(vm_a.peek().unwrap(), &Value::Int32(7));
|
|
assert_eq!(vm_b.peek().unwrap(), &Value::Int32(7));
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_many_coroutines_and_wake_order_determinism() {
|
|
use crate::heap::{CoroutineState};
|
|
use crate::object::ObjectKind;
|
|
|
|
// ROM: FrameSync; FrameSync; Halt (two deterministic safepoints back-to-back)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.gc_alloc_threshold = 1; // force GC at first safepoint
|
|
|
|
// Allocate many coroutine objects: half Ready, half Sleeping with differing wake ticks.
|
|
let coro_count = 128u32;
|
|
for i in 0..coro_count {
|
|
let state = if i % 2 == 0 { CoroutineState::Ready } else { CoroutineState::Sleeping };
|
|
let wake = if state == CoroutineState::Sleeping { (i / 2) as u64 } else { 0 };
|
|
let _c = vm.heap.allocate_coroutine(0, state, wake, vec![], vec![]);
|
|
// Also allocate a tiny byte object to increase GC pressure.
|
|
let _b = vm.heap.allocate_object(ObjectKind::Bytes, &[i as u8]);
|
|
}
|
|
|
|
// Sanity: allocations present
|
|
assert!(vm.heap.len() as u32 >= coro_count, "heap should contain coroutine objects and bytes");
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Reaching FrameSync should run GC; Ready/Sleeping coroutines are treated as roots, so
|
|
// only unreferenced byte objects can be reclaimed. We just assert determinism: heap size
|
|
// should be stable across two consecutive FrameSyncs with no new allocations in between.
|
|
let before = vm.heap.len();
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
let after_first = vm.heap.len();
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
let after_second = vm.heap.len();
|
|
|
|
// GC effect should be deterministic and idempotent at steady state
|
|
assert!(after_first <= before);
|
|
assert_eq!(after_second, after_first);
|
|
}
|
|
// fn test_arithmetic_chain() {
|
|
// let mut native = MockNative;
|
|
// let mut ctx = HostContext::new(None);
|
|
//
|
|
// // (10 + 20) * 2 / 5 % 4 = 12 * 2 / 5 % 4 = 60 / 5 % 4 = 12 % 4 = 0
|
|
// // wait: (10 + 20) = 30. 30 * 2 = 60. 60 / 5 = 12. 12 % 4 = 0.
|
|
// let mut rom = Vec::new();
|
|
// rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&10i32.to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&20i32.to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::Add as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&2i32.to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::Mul as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&5i32.to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::Div as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&4i32.to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::Mod as u16).to_le_bytes());
|
|
// rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
//
|
|
// let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
// vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
//
|
|
// assert_eq!(vm.pop().unwrap(), Value::Int32(0));
|
|
// }
|
|
|
|
#[test]
|
|
fn test_div_by_zero_trap() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&10i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Div as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_DIV_ZERO);
|
|
assert_eq!(trap.opcode, OpCode::Div as u16);
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_int_to_bound_checked_trap() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&70000i32.to_le_bytes()); // > 65535
|
|
rom.extend_from_slice(&(OpCode::IntToBoundChecked as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_OOB);
|
|
assert_eq!(trap.opcode, OpCode::IntToBoundChecked as u16);
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_bounded_add_overflow_trap() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushBounded as u16).to_le_bytes());
|
|
rom.extend_from_slice(&60000u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushBounded as u16).to_le_bytes());
|
|
rom.extend_from_slice(&10000u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Add as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_OOB);
|
|
assert_eq!(trap.opcode, OpCode::Add as u16);
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_comparisons_polymorphic() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// 10 < 20.5 (true)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&10i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushF64 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&20.5f64.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Lt as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.pop().unwrap(), Value::Boolean(true));
|
|
}
|
|
|
|
#[test]
|
|
fn test_push_i64_immediate() {
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI64 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i64.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.peek().unwrap(), &Value::Int64(42));
|
|
}
|
|
|
|
#[test]
|
|
fn test_push_f64_immediate() {
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushF64 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&3.14f64.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.peek().unwrap(), &Value::Float(3.14));
|
|
}
|
|
|
|
#[test]
|
|
fn test_push_bool_immediate() {
|
|
let mut rom = Vec::new();
|
|
// True
|
|
rom.extend_from_slice(&(OpCode::PushBool as u16).to_le_bytes());
|
|
rom.push(1);
|
|
// False
|
|
rom.extend_from_slice(&(OpCode::PushBool as u16).to_le_bytes());
|
|
rom.push(0);
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.step(&mut native, &mut ctx).unwrap(); // Push true
|
|
assert_eq!(vm.peek().unwrap(), &Value::Boolean(true));
|
|
vm.step(&mut native, &mut ctx).unwrap(); // Push false
|
|
assert_eq!(vm.peek().unwrap(), &Value::Boolean(false));
|
|
}
|
|
|
|
#[test]
|
|
fn test_push_const_string() {
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushConst as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let cp = vec![Value::String("hello".into())];
|
|
let mut vm = new_test_vm(rom, cp);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.peek().unwrap(), &Value::String("hello".into()));
|
|
}
|
|
|
|
#[test]
|
|
fn test_call_ret_scope_separation() {
|
|
let mut rom = Vec::new();
|
|
|
|
// entrypoint:
|
|
// PUSH_I64 10
|
|
// CALL func_id 1
|
|
// HALT
|
|
rom.extend_from_slice(&(OpCode::PushI64 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&10i64.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes()); // func_id 1
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let func_addr = rom.len();
|
|
|
|
// func:
|
|
// PUSH_I64 20
|
|
// GET_LOCAL 0 -- should be 10 (arg)
|
|
// ADD -- 10 + 20 = 30
|
|
// SET_LOCAL 0 -- store result in local 0 (the arg slot)
|
|
// GET_LOCAL 0 -- read 30 back
|
|
// RET
|
|
rom.extend_from_slice(&(OpCode::PushI64 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&20i64.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::GetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Add as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::SetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::GetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
|
|
let functions = vec![
|
|
FunctionMeta { code_offset: 0, code_len: func_addr as u32, ..Default::default() },
|
|
FunctionMeta {
|
|
code_offset: func_addr as u32,
|
|
code_len: (rom.len() - func_addr) as u32,
|
|
param_slots: 1,
|
|
return_slots: 1,
|
|
..Default::default()
|
|
},
|
|
];
|
|
|
|
let mut vm = VirtualMachine {
|
|
program: ProgramImage::new(
|
|
rom,
|
|
vec![],
|
|
functions,
|
|
None,
|
|
std::collections::HashMap::new(),
|
|
),
|
|
..Default::default()
|
|
};
|
|
vm.prepare_call("0");
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Run until Halt
|
|
let mut steps = 0;
|
|
while !vm.halted && steps < 100 {
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
steps += 1;
|
|
}
|
|
|
|
assert!(vm.halted);
|
|
assert_eq!(vm.pop_integer().unwrap(), 30);
|
|
assert_eq!(vm.operand_stack.len(), 0);
|
|
assert_eq!(vm.call_stack.len(), 1);
|
|
// Scope frames removed: no scope stack to assert on
|
|
}
|
|
|
|
#[test]
|
|
fn test_ret_mandatory_value() {
|
|
let mut rom = Vec::new();
|
|
// entrypoint: CALL func_id 1; HALT
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes()); // func_id 1
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let func_addr = rom.len();
|
|
// func: RET (SEM VALOR ANTES)
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
|
|
let functions = vec![
|
|
FunctionMeta { code_offset: 0, code_len: func_addr as u32, ..Default::default() },
|
|
FunctionMeta {
|
|
code_offset: func_addr as u32,
|
|
code_len: (rom.len() - func_addr) as u32,
|
|
param_slots: 0,
|
|
return_slots: 1,
|
|
..Default::default()
|
|
},
|
|
];
|
|
|
|
let mut vm = VirtualMachine {
|
|
program: ProgramImage::new(
|
|
rom,
|
|
vec![],
|
|
functions,
|
|
None,
|
|
std::collections::HashMap::new(),
|
|
),
|
|
..Default::default()
|
|
};
|
|
vm.prepare_call("0");
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.step(&mut native, &mut ctx).unwrap(); // CALL
|
|
let res = vm.step(&mut native, &mut ctx); // RET -> should fail
|
|
assert!(res.is_err());
|
|
match res.unwrap_err() {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_BAD_RET_SLOTS);
|
|
}
|
|
_ => panic!("Expected Trap(TRAP_BAD_RET_SLOTS)"),
|
|
}
|
|
|
|
// Agora com valor de retorno
|
|
let mut rom2 = Vec::new();
|
|
rom2.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom2.extend_from_slice(&1u32.to_le_bytes());
|
|
rom2.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let func_addr2 = rom2.len();
|
|
rom2.extend_from_slice(&(OpCode::PushI64 as u16).to_le_bytes());
|
|
rom2.extend_from_slice(&123i64.to_le_bytes());
|
|
rom2.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
|
|
let functions2 = vec![
|
|
FunctionMeta { code_offset: 0, code_len: func_addr2 as u32, ..Default::default() },
|
|
FunctionMeta {
|
|
code_offset: func_addr2 as u32,
|
|
code_len: (rom2.len() - func_addr2) as u32,
|
|
param_slots: 0,
|
|
return_slots: 1,
|
|
..Default::default()
|
|
},
|
|
];
|
|
|
|
let mut vm2 = VirtualMachine {
|
|
program: ProgramImage::new(
|
|
rom2,
|
|
vec![],
|
|
functions2,
|
|
None,
|
|
std::collections::HashMap::new(),
|
|
),
|
|
..Default::default()
|
|
};
|
|
vm2.prepare_call("0");
|
|
vm2.step(&mut native, &mut ctx).unwrap(); // CALL
|
|
vm2.step(&mut native, &mut ctx).unwrap(); // PUSH_I64
|
|
vm2.step(&mut native, &mut ctx).unwrap(); // RET
|
|
|
|
assert_eq!(vm2.operand_stack.len(), 1);
|
|
assert_eq!(vm2.pop().unwrap(), Value::Int64(123));
|
|
}
|
|
|
|
// Scope tests removed under PR-2.1 (scope frames eliminated)
|
|
|
|
#[test]
|
|
fn test_push_i32() {
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.peek().unwrap(), &Value::Int32(42));
|
|
}
|
|
|
|
#[test]
|
|
fn test_bitwise_promotion() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// i32 & i32 -> i32
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0xF0i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0x0Fi32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::BitAnd as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.pop().unwrap(), Value::Int32(0));
|
|
|
|
// i32 | i64 -> i64
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0xF0i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI64 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0x0Fi64.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::BitOr as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.pop().unwrap(), Value::Int64(0xFF));
|
|
}
|
|
|
|
#[test]
|
|
fn test_comparisons_lte_gte() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// 10 <= 20 (true)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&10i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&20i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Lte as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.pop().unwrap(), Value::Boolean(true));
|
|
|
|
// 20 >= 20 (true)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&20i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&20i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Gte as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.pop().unwrap(), Value::Boolean(true));
|
|
}
|
|
|
|
#[test]
|
|
fn test_negation() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Neg as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.pop().unwrap(), Value::Int32(-42));
|
|
}
|
|
|
|
#[test]
|
|
fn test_jmp_if_true() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Corrected Calculations:
|
|
// 0-1: PushBool
|
|
// 2: 1 (u8)
|
|
// 3-4: JmpIfTrue
|
|
// 5-8: addr (u32)
|
|
// 9-10: Halt (Offset 9)
|
|
// 11-12: PushI32 (Offset 11)
|
|
// 13-16: 100 (i32)
|
|
// 17-18: Halt
|
|
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushBool as u16).to_le_bytes());
|
|
rom.push(1);
|
|
rom.extend_from_slice(&(OpCode::JmpIfTrue as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(11u32).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes()); // Offset 9
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes()); // Offset 11
|
|
rom.extend_from_slice(&100i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.step(&mut native, &mut ctx).unwrap(); // PushBool
|
|
vm.step(&mut native, &mut ctx).unwrap(); // JmpIfTrue
|
|
assert_eq!(vm.pc, 11);
|
|
vm.step(&mut native, &mut ctx).unwrap(); // PushI32
|
|
assert_eq!(vm.pop().unwrap(), Value::Int32(100));
|
|
}
|
|
|
|
#[test]
|
|
fn test_trap_opcode() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Trap as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Breakpoint);
|
|
assert_eq!(vm.pc, 8); // PushI32 (6 bytes) + Trap (2 bytes)
|
|
assert_eq!(vm.peek().unwrap(), &Value::Int32(42));
|
|
}
|
|
|
|
#[test]
|
|
fn test_pop_n_opcode() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&2i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&3i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PopN as u16).to_le_bytes());
|
|
rom.extend_from_slice(&2u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
assert_eq!(vm.pop().unwrap(), Value::Int32(1));
|
|
assert!(vm.pop().is_err()); // Stack should be empty
|
|
}
|
|
|
|
|
|
#[test]
|
|
fn test_entry_point_ret_with_prepare_call() {
|
|
// PushI32 0 (0x17), then Ret (0x51)
|
|
let rom = vec![
|
|
0x17, 0x00, // PushI32
|
|
0x00, 0x00, 0x00, 0x00, // value 0
|
|
0x11, 0x00, // Pop
|
|
0x51, 0x00, // Ret
|
|
];
|
|
let mut vm = VirtualMachine::new(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
let mut ctx = HostContext::new(None);
|
|
struct TestNative;
|
|
impl NativeInterface for TestNative {
|
|
fn syscall(
|
|
&mut self,
|
|
_id: u32,
|
|
_args: &[Value],
|
|
_ret: &mut HostReturn,
|
|
_ctx: &mut HostContext,
|
|
) -> Result<(), VmFault> {
|
|
Ok(())
|
|
}
|
|
}
|
|
let mut native = TestNative;
|
|
|
|
vm.prepare_call("0");
|
|
let result = vm.run_budget(100, &mut native, &mut ctx).expect("VM run failed");
|
|
assert_eq!(result.reason, LogicalFrameEndingReason::EndOfRom);
|
|
}
|
|
|
|
#[test]
|
|
fn test_syscall_abi_multi_slot_return() {
|
|
let rom = vec![
|
|
0x70, 0x00, // Syscall + Reserved
|
|
0x01, 0x00, 0x00, 0x00, // Syscall ID 1
|
|
];
|
|
|
|
struct MultiReturnNative;
|
|
impl NativeInterface for MultiReturnNative {
|
|
fn syscall(
|
|
&mut self,
|
|
_id: u32,
|
|
_args: &[Value],
|
|
ret: &mut HostReturn,
|
|
_ctx: &mut HostContext,
|
|
) -> Result<(), VmFault> {
|
|
ret.push_bool(true);
|
|
ret.push_int(42);
|
|
ret.push_bounded(255)?;
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
let mut vm = VirtualMachine::new(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
let mut native = MultiReturnNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
// Ensure we have SYSTEM capability so we pass capability gate
|
|
vm.set_capabilities(prometeu_hal::syscalls::caps::SYSTEM);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
// Under PR5, VM enforces return-slot count based on SyscallMeta during syscall
|
|
// execution. A mismatch yields a Panic with a descriptive message.
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Panic(msg) => {
|
|
assert!(msg.contains("results mismatch"));
|
|
}
|
|
_ => panic!("Expected Panic with results mismatch, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_syscall_abi_void_return() {
|
|
let rom = vec![
|
|
0x70, 0x00, // Syscall + Reserved
|
|
0x01, 0x00, 0x00, 0x00, // Syscall ID 1
|
|
];
|
|
|
|
struct VoidReturnNative;
|
|
impl NativeInterface for VoidReturnNative {
|
|
fn syscall(
|
|
&mut self,
|
|
_id: u32,
|
|
_args: &[Value],
|
|
_ret: &mut HostReturn,
|
|
_ctx: &mut HostContext,
|
|
) -> Result<(), VmFault> {
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
let mut vm = VirtualMachine::new(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
let mut native = VoidReturnNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
vm.operand_stack.push(Value::Int32(100));
|
|
vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
assert_eq!(vm.pop().unwrap(), Value::Int32(100));
|
|
assert!(vm.operand_stack.is_empty());
|
|
}
|
|
|
|
#[test]
|
|
fn test_syscall_arg_type_mismatch_trap() {
|
|
// GfxClear (0x1001) takes 1 argument
|
|
let rom = vec![
|
|
0x16, 0x00, // PushBool + Reserved
|
|
0x01, // value 1 (true)
|
|
0x70, 0x00, // Syscall + Reserved
|
|
0x01, 0x10, 0x00, 0x00, // Syscall ID 0x1001
|
|
];
|
|
|
|
struct ArgCheckNative;
|
|
impl NativeInterface for ArgCheckNative {
|
|
fn syscall(
|
|
&mut self,
|
|
_id: u32,
|
|
args: &[Value],
|
|
_ret: &mut HostReturn,
|
|
_ctx: &mut HostContext,
|
|
) -> Result<(), VmFault> {
|
|
expect_int(args, 0)?;
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
let mut vm = VirtualMachine::new(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
let mut native = ArgCheckNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
// Ensure we have GFX capability so we reach type checking inside native handler
|
|
vm.set_capabilities(prometeu_hal::syscalls::caps::GFX);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_TYPE);
|
|
assert_eq!(trap.opcode, OpCode::Syscall as u16);
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_invalid_syscall_trap() {
|
|
let rom = vec![
|
|
0x70, 0x00, // Syscall + Reserved
|
|
0xEF, 0xBE, 0xAD, 0xDE, // 0xDEADBEEF
|
|
];
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
// Grant GFX capability so arg underflow is checked (capability gate is first)
|
|
vm.set_capabilities(prometeu_hal::syscalls::caps::GFX);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_INVALID_SYSCALL);
|
|
assert_eq!(trap.opcode, OpCode::Syscall as u16);
|
|
assert!(trap.message.contains("Unknown syscall"));
|
|
assert_eq!(trap.pc, 0);
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_syscall_arg_underflow_trap() {
|
|
// GfxClear (0x1001) expects 1 arg
|
|
let rom = vec![
|
|
0x70, 0x00, // Syscall + Reserved
|
|
0x01, 0x10, 0x00, 0x00, // Syscall ID 0x1001
|
|
];
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
// Grant GFX capability so arg underflow is checked (capability gate is first)
|
|
vm.set_capabilities(prometeu_hal::syscalls::caps::GFX);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_STACK_UNDERFLOW);
|
|
assert_eq!(trap.opcode, OpCode::Syscall as u16);
|
|
assert!(trap.message.contains("underflow"));
|
|
assert_eq!(trap.pc, 0);
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_syscall_missing_capability_trap() {
|
|
// Program: directly call GfxClear (0x1001). We check caps before args, so no underflow.
|
|
let rom = vec![
|
|
0x70, 0x00, // Syscall + Reserved
|
|
0x01, 0x10, 0x00, 0x00, // Syscall ID 0x1001 (LE)
|
|
];
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
// Remove all capabilities
|
|
vm.set_capabilities(0);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_INVALID_SYSCALL);
|
|
assert_eq!(trap.opcode, OpCode::Syscall as u16);
|
|
assert!(trap.message.contains("Missing capability"));
|
|
assert_eq!(trap.pc, 0);
|
|
}
|
|
other => panic!("Expected Trap, got {:?}", other),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_syscall_with_capability_success() {
|
|
// Program: push arg 0; call GfxClear (0x1001)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Syscall as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0x1001u32.to_le_bytes()); // GfxClear
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
// Grant only GFX capability
|
|
vm.set_capabilities(prometeu_hal::syscalls::caps::GFX);
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
// Any non-trap outcome is considered success here
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => panic!("Unexpected trap: {:?}", trap),
|
|
_ => {}
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_syscall_results_count_mismatch_panic() {
|
|
// GfxClear565 (0x1010) expects 0 results
|
|
let rom = vec![
|
|
0x17, 0x00, // PushI32
|
|
0x00, 0x00, 0x00, 0x00, // value 0
|
|
0x70, 0x00, // Syscall + Reserved
|
|
0x10, 0x10, 0x00, 0x00, // Syscall ID 0x1010
|
|
];
|
|
|
|
struct BadNative;
|
|
impl NativeInterface for BadNative {
|
|
fn syscall(
|
|
&mut self,
|
|
_id: u32,
|
|
_args: &[Value],
|
|
ret: &mut HostReturn,
|
|
_ctx: &mut HostContext,
|
|
) -> Result<(), VmFault> {
|
|
// Wrong: GfxClear565 is void but we push something
|
|
ret.push_int(42);
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
// Grant GFX capability so results mismatch path is exercised
|
|
vm.set_capabilities(prometeu_hal::syscalls::caps::GFX);
|
|
let mut native = BadNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Panic(msg) => assert!(msg.contains("results mismatch")),
|
|
_ => panic!("Expected Panic, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_host_return_bounded_overflow_trap() {
|
|
let mut stack = Vec::new();
|
|
let mut ret = HostReturn::new(&mut stack);
|
|
let res = ret.push_bounded(65536);
|
|
assert!(res.is_err());
|
|
match res.err().unwrap() {
|
|
VmFault::Trap(code, _) => {
|
|
assert_eq!(code, TRAP_OOB);
|
|
}
|
|
_ => panic!("Expected Trap"),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_loader_hardening_invalid_magic() {
|
|
let mut vm = VirtualMachine::default();
|
|
let res = vm.initialize(vec![0, 0, 0, 0], "");
|
|
assert_eq!(res, Err(VmInitError::InvalidFormat));
|
|
// VM should remain empty
|
|
assert_eq!(vm.program.rom.len(), 0);
|
|
}
|
|
|
|
#[test]
|
|
fn test_loader_hardening_unsupported_version() {
|
|
let mut vm = VirtualMachine::default();
|
|
let mut header = vec![0u8; 32];
|
|
header[0..4].copy_from_slice(b"PBS\0");
|
|
header[4..6].copy_from_slice(&1u16.to_le_bytes()); // version 1 (unsupported)
|
|
|
|
let res = vm.initialize(header, "");
|
|
assert_eq!(res, Err(VmInitError::UnsupportedFormat));
|
|
}
|
|
|
|
#[test]
|
|
fn test_loader_hardening_malformed_pbs_v0() {
|
|
let mut vm = VirtualMachine::default();
|
|
let mut header = vec![0u8; 32];
|
|
header[0..4].copy_from_slice(b"PBS\0");
|
|
header[8..12].copy_from_slice(&1u32.to_le_bytes()); // 1 section claimed but none provided
|
|
|
|
let res = vm.initialize(header, "");
|
|
match res {
|
|
Err(VmInitError::ImageLoadFailed(prometeu_bytecode::LoadError::UnexpectedEof)) => {}
|
|
_ => panic!("Expected PbsV0LoadFailed(UnexpectedEof), got {:?}", res),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_loader_hardening_entrypoint_not_found() {
|
|
let mut vm = VirtualMachine::default();
|
|
// Valid empty PBS v0 module
|
|
let mut header = vec![0u8; 32];
|
|
header[0..4].copy_from_slice(b"PBS\0");
|
|
|
|
// Try to initialize with numeric entrypoint 10 (out of bounds for empty ROM)
|
|
let res = vm.initialize(header, "10");
|
|
assert_eq!(res, Err(VmInitError::EntrypointNotFound));
|
|
|
|
// VM state should not be updated
|
|
assert_eq!(vm.pc, 0);
|
|
assert_eq!(vm.program.rom.len(), 0);
|
|
}
|
|
|
|
#[test]
|
|
fn test_loader_hardening_successful_init() {
|
|
let mut vm = VirtualMachine::default();
|
|
vm.pc = 123; // Pollution
|
|
|
|
let mut header = vec![0u8; 32];
|
|
header[0..4].copy_from_slice(b"PBS\0");
|
|
|
|
let res = vm.initialize(header, "");
|
|
assert!(res.is_ok());
|
|
assert_eq!(vm.pc, 0);
|
|
assert_eq!(vm.program.rom.len(), 0);
|
|
assert_eq!(vm.cycles, 0);
|
|
}
|
|
|
|
#[test]
|
|
fn test_calling_convention_add() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// F0 (entry):
|
|
// PUSH_I32 10
|
|
// PUSH_I32 20
|
|
// CALL 1 (add)
|
|
// HALT
|
|
// F1 (add):
|
|
// GET_LOCAL 0 (a)
|
|
// GET_LOCAL 1 (b)
|
|
// ADD
|
|
// RET (1 slot)
|
|
|
|
let mut rom = Vec::new();
|
|
// F0
|
|
let f0_start = 0;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&10i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&20i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
// F1
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::GetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::GetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Add as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta {
|
|
code_offset: f0_start as u32,
|
|
code_len: f0_len as u32,
|
|
..Default::default()
|
|
},
|
|
FunctionMeta {
|
|
code_offset: f1_start,
|
|
code_len: f1_len,
|
|
param_slots: 2,
|
|
return_slots: 1,
|
|
..Default::default()
|
|
},
|
|
]);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
assert_eq!(vm.operand_stack.last().unwrap(), &Value::Int32(30));
|
|
}
|
|
|
|
#[test]
|
|
fn test_calling_convention_multi_slot_return() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// F0:
|
|
// CALL 1
|
|
// HALT
|
|
// F1:
|
|
// PUSH_I32 100
|
|
// PUSH_I32 200
|
|
// RET (2 slots)
|
|
|
|
let mut rom = Vec::new();
|
|
// F0
|
|
let f0_start = 0;
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
// F1
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&100i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&200i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta {
|
|
code_offset: f0_start as u32,
|
|
code_len: f0_len as u32,
|
|
..Default::default()
|
|
},
|
|
FunctionMeta {
|
|
code_offset: f1_start,
|
|
code_len: f1_len,
|
|
param_slots: 0,
|
|
return_slots: 2,
|
|
..Default::default()
|
|
},
|
|
]);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
// Stack should be [100, 200]
|
|
assert_eq!(vm.operand_stack.len(), 2);
|
|
assert_eq!(vm.operand_stack[0], Value::Int32(100));
|
|
assert_eq!(vm.operand_stack[1], Value::Int32(200));
|
|
}
|
|
|
|
#[test]
|
|
fn test_calling_convention_void_call() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// F0:
|
|
// PUSH_I32 42
|
|
// CALL 1
|
|
// HALT
|
|
// F1:
|
|
// POP
|
|
// RET (0 slots)
|
|
|
|
let mut rom = Vec::new();
|
|
let f0_start = 0;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta {
|
|
code_offset: f0_start as u32,
|
|
code_len: f0_len as u32,
|
|
..Default::default()
|
|
},
|
|
FunctionMeta {
|
|
code_offset: f1_start,
|
|
code_len: f1_len,
|
|
param_slots: 1,
|
|
return_slots: 0,
|
|
..Default::default()
|
|
},
|
|
]);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
assert_eq!(vm.operand_stack.len(), 0);
|
|
}
|
|
|
|
#[test]
|
|
fn test_trap_invalid_func() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// CALL 99 (invalid)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&99u32.to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_INVALID_FUNC);
|
|
assert_eq!(trap.opcode, OpCode::Call as u16);
|
|
}
|
|
_ => panic!("Expected Trap(TRAP_INVALID_FUNC), got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_trap_bad_ret_slots() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// F0: CALL 1; HALT
|
|
// F1: PUSH_I32 42; RET (expected 0 slots)
|
|
|
|
let mut rom = Vec::new();
|
|
let f0_start = 0;
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta {
|
|
code_offset: f0_start as u32,
|
|
code_len: f0_len as u32,
|
|
..Default::default()
|
|
},
|
|
FunctionMeta {
|
|
code_offset: f1_start,
|
|
code_len: f1_len,
|
|
param_slots: 0,
|
|
return_slots: 0, // ERROR: function pushes 42 but returns 0
|
|
..Default::default()
|
|
},
|
|
]);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_BAD_RET_SLOTS);
|
|
assert_eq!(trap.opcode, OpCode::Ret as u16);
|
|
assert!(trap.message.contains("Incorrect stack height"));
|
|
}
|
|
_ => panic!("Expected Trap(TRAP_BAD_RET_SLOTS), got {:?}", report.reason),
|
|
}
|
|
}
|
|
#[test]
|
|
fn test_locals_round_trip() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// PUSH_I32 42
|
|
// SET_LOCAL 0
|
|
// PUSH_I32 0 (garbage)
|
|
// GET_LOCAL 0
|
|
// RET (1 slot)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::SetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Pop as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::GetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: 20,
|
|
local_slots: 1,
|
|
return_slots: 1,
|
|
..Default::default()
|
|
}]);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::EndOfRom);
|
|
// RET pops return values and pushes them back on the caller stack (which is the sentinel frame's stack here).
|
|
assert_eq!(vm.operand_stack, vec![Value::Int32(42)]);
|
|
}
|
|
|
|
#[test]
|
|
fn test_locals_per_call_isolation() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Function 0 (entry):
|
|
// CALL 1
|
|
// POP
|
|
// CALL 1
|
|
// HALT
|
|
// Function 1:
|
|
// GET_LOCAL 0 (should be Null initially)
|
|
// PUSH_I32 42
|
|
// SET_LOCAL 0
|
|
// RET (1 slot: the initial Null)
|
|
|
|
let mut rom = Vec::new();
|
|
// F0
|
|
let f0_start = 0;
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Pop as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Call as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
// F1
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::GetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::SetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta {
|
|
code_offset: f0_start as u32,
|
|
code_len: f0_len as u32,
|
|
..Default::default()
|
|
},
|
|
FunctionMeta {
|
|
code_offset: f1_start,
|
|
code_len: f1_len,
|
|
local_slots: 1,
|
|
return_slots: 1,
|
|
..Default::default()
|
|
},
|
|
]);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
|
|
// The last value on stack is the return of the second CALL 1,
|
|
// which should be Value::Null because locals are zero-initialized on each call.
|
|
assert_eq!(vm.operand_stack.last().unwrap(), &Value::Null);
|
|
}
|
|
|
|
#[test]
|
|
fn test_invalid_local_index_traps() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Function with 0 params, 1 local.
|
|
// GET_LOCAL 1 (OOB)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::GetLocal as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: 8,
|
|
local_slots: 1,
|
|
..Default::default()
|
|
}]);
|
|
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_INVALID_LOCAL);
|
|
assert_eq!(trap.opcode, OpCode::GetLocal as u16);
|
|
assert!(trap.message.contains("out of bounds"));
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_nested_if() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// if (true) {
|
|
// if (false) {
|
|
// PUSH 1
|
|
// } else {
|
|
// PUSH 2
|
|
// }
|
|
// } else {
|
|
// PUSH 3
|
|
// }
|
|
// HALT
|
|
let mut rom = Vec::new();
|
|
// 0: PUSH_BOOL true
|
|
rom.extend_from_slice(&(OpCode::PushBool as u16).to_le_bytes());
|
|
rom.push(1);
|
|
// 3: JMP_IF_FALSE -> ELSE1 (offset 42)
|
|
rom.extend_from_slice(&(OpCode::JmpIfFalse as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42u32.to_le_bytes());
|
|
|
|
// INNER IF:
|
|
// 9: PUSH_BOOL false
|
|
rom.extend_from_slice(&(OpCode::PushBool as u16).to_le_bytes());
|
|
rom.push(0);
|
|
// 12: JMP_IF_FALSE -> ELSE2 (offset 30)
|
|
rom.extend_from_slice(&(OpCode::JmpIfFalse as u16).to_le_bytes());
|
|
rom.extend_from_slice(&30u32.to_le_bytes());
|
|
// 18: PUSH_I32 1
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1i32.to_le_bytes());
|
|
// 24: JMP -> END (offset 48)
|
|
rom.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
rom.extend_from_slice(&48u32.to_le_bytes());
|
|
|
|
// ELSE2:
|
|
// 30: PUSH_I32 2
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&2i32.to_le_bytes());
|
|
// 36: JMP -> END (offset 48)
|
|
rom.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
rom.extend_from_slice(&48u32.to_le_bytes());
|
|
|
|
// ELSE1:
|
|
// 42: PUSH_I32 3
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&3i32.to_le_bytes());
|
|
|
|
// END:
|
|
// 48: HALT
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
// We need to set up the function meta for absolute jumps to work correctly
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: 50,
|
|
..Default::default()
|
|
}]);
|
|
vm.prepare_call("0");
|
|
|
|
vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.pop().unwrap(), Value::Int32(2));
|
|
}
|
|
|
|
#[test]
|
|
fn test_if_with_empty_branches() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// PUSH_BOOL true
|
|
// JMP_IF_FALSE -> ELSE (offset 15)
|
|
// // Empty then
|
|
// JMP -> END (offset 15)
|
|
// ELSE:
|
|
// // Empty else
|
|
// END:
|
|
// HALT
|
|
let mut rom = Vec::new();
|
|
// 0-2: PUSH_BOOL true
|
|
rom.extend_from_slice(&(OpCode::PushBool as u16).to_le_bytes());
|
|
rom.push(1);
|
|
// 3-8: JMP_IF_FALSE -> 15
|
|
rom.extend_from_slice(&(OpCode::JmpIfFalse as u16).to_le_bytes());
|
|
rom.extend_from_slice(&15u32.to_le_bytes());
|
|
// 9-14: JMP -> 15
|
|
rom.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
rom.extend_from_slice(&15u32.to_le_bytes());
|
|
// 15-16: HALT
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: 17,
|
|
..Default::default()
|
|
}]);
|
|
vm.prepare_call("0");
|
|
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
assert_eq!(vm.operand_stack.len(), 0);
|
|
}
|
|
|
|
#[test]
|
|
fn test_jmp_if_non_boolean_trap() {
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// PUSH_I32 1
|
|
// JMP_IF_TRUE 9
|
|
// HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::JmpIfTrue as u16).to_le_bytes());
|
|
rom.extend_from_slice(&9u32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: 14,
|
|
..Default::default()
|
|
}]);
|
|
vm.prepare_call("0");
|
|
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(trap) => {
|
|
assert_eq!(trap.code, TRAP_TYPE);
|
|
assert_eq!(trap.opcode, OpCode::JmpIfTrue as u16);
|
|
assert!(trap.message.contains("Expected boolean"));
|
|
}
|
|
_ => panic!("Expected Trap, got {:?}", report.reason),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_triggers_only_at_frame_sync() {
|
|
use crate::object::ObjectKind;
|
|
|
|
// ROM: NOP; FRAME_SYNC; HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::Nop as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
// Set a very low threshold to trigger GC as soon as we hit FRAME_SYNC
|
|
vm.gc_alloc_threshold = 1;
|
|
|
|
// Allocate an unreachable object (no roots referencing it)
|
|
let _orphan = vm.heap.allocate_object(ObjectKind::Bytes, &[1, 2, 3]);
|
|
// +1 for the main coroutine allocated by new_test_vm
|
|
assert_eq!(vm.heap.len(), 2);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Step 1: NOP — should not run GC
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.heap.len(), 2, "GC must not run except at safepoints");
|
|
|
|
// Step 2: FRAME_SYNC — GC should run and reclaim the unreachable object
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
// Main coroutine remains
|
|
assert_eq!(vm.heap.len(), 1, "Unreachable object must be reclaimed at FRAME_SYNC");
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_keeps_roots_and_collects_unreachable_at_frame_sync() {
|
|
use crate::object::ObjectKind;
|
|
|
|
// ROM: FRAME_SYNC; FRAME_SYNC; HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
vm.gc_alloc_threshold = 1;
|
|
|
|
// Allocate two objects; make one a root by placing it on the operand stack
|
|
let rooted = vm.heap.allocate_object(ObjectKind::Bytes, &[9, 9]);
|
|
let unreachable = vm.heap.allocate_object(ObjectKind::Bytes, &[8, 8, 8]);
|
|
// +1 for main coroutine
|
|
assert_eq!(vm.heap.len(), 3);
|
|
vm.operand_stack.push(Value::HeapRef(rooted));
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Execute FRAME_SYNC: should trigger GC
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
|
|
// Rooted must survive; unreachable must be collected; main coroutine remains
|
|
assert_eq!(vm.heap.len(), 2);
|
|
assert!(vm.heap.is_valid(rooted));
|
|
assert!(!vm.heap.is_valid(unreachable));
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_simple_allocation_collection_cycle() {
|
|
use crate::object::ObjectKind;
|
|
|
|
// ROM: FRAME_SYNC; FRAME_SYNC; HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
// Make GC trigger on any allocation delta
|
|
vm.gc_alloc_threshold = 1;
|
|
|
|
// Cycle 1: allocate one unreachable object
|
|
let _h1 = vm.heap.allocate_object(ObjectKind::Bytes, &[1]);
|
|
// +1 for main coroutine
|
|
assert_eq!(vm.heap.len(), 2);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// FRAME_SYNC should collect it (first FRAME_SYNC)
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
// Main coroutine remains
|
|
assert_eq!(vm.heap.len(), 1);
|
|
|
|
// Cycle 2: allocate again and collect again deterministically
|
|
let _h2 = vm.heap.allocate_object(ObjectKind::Bytes, &[2]);
|
|
assert_eq!(vm.heap.len(), 2);
|
|
// Second FRAME_SYNC should also be reached deterministically
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
assert_eq!(vm.heap.len(), 1);
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_many_short_lived_objects_stress() {
|
|
use crate::object::ObjectKind;
|
|
|
|
// ROM: FRAME_SYNC; HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
// Deterministic: trigger collection when any growth since last sweep occurs
|
|
vm.gc_alloc_threshold = 1;
|
|
|
|
// Allocate many small, unreferenced objects
|
|
let count = 2048usize; // stress but still quick
|
|
for i in 0..count {
|
|
let byte = (i & 0xFF) as u8;
|
|
let _ = vm.heap.allocate_object(ObjectKind::Bytes, &[byte]);
|
|
}
|
|
// +1 for main coroutine
|
|
assert_eq!(vm.heap.len(), count + 1);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Single FRAME_SYNC should reclaim all since there are no roots
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
|
|
assert_eq!(vm.heap.len(), 1, "All short-lived objects except main coroutine must be reclaimed deterministically");
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_keeps_objects_captured_by_suspended_coroutines() {
|
|
use crate::object::ObjectKind;
|
|
use crate::heap::CoroutineState;
|
|
|
|
// ROM: FRAME_SYNC; HALT (trigger GC at safepoint)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
// Trigger GC at first FRAME_SYNC
|
|
vm.gc_alloc_threshold = 1;
|
|
|
|
// Allocate a heap object and a suspended coroutine that captures it on its stack
|
|
let captured = vm.heap.allocate_object(ObjectKind::Bytes, &[0xAA, 0xBB]);
|
|
let _coro = vm.heap.allocate_coroutine(
|
|
0,
|
|
CoroutineState::Ready,
|
|
0,
|
|
vec![Value::HeapRef(captured)],
|
|
vec![],
|
|
);
|
|
|
|
assert_eq!(vm.heap.len(), 3, "object + suspended coroutine + main coroutine must be allocated");
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// FRAME_SYNC: GC runs and should keep both alive via suspended coroutine root
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
|
|
assert!(vm.heap.is_valid(captured), "captured object must remain alive");
|
|
// Captured object + suspended coroutine + main coroutine
|
|
assert_eq!(vm.heap.len(), 3, "both coroutine and captured object must survive (plus main)");
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_collects_finished_coroutine() {
|
|
use crate::heap::CoroutineState;
|
|
|
|
// ROM: FRAME_SYNC; HALT (trigger GC at safepoint)
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![prometeu_bytecode::FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
vm.gc_alloc_threshold = 1;
|
|
|
|
// Allocate a finished coroutine with no external references
|
|
let finished = vm.heap.allocate_coroutine(0, CoroutineState::Finished, 0, vec![], vec![]);
|
|
assert!(vm.heap.is_valid(finished));
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// FRAME_SYNC: GC should collect the finished coroutine since it's not a root
|
|
match vm.step(&mut native, &mut ctx) {
|
|
Err(LogicalFrameEndingReason::FrameSync) => {}
|
|
other => panic!("Expected FrameSync, got {:?}", other),
|
|
}
|
|
|
|
assert!(!vm.heap.is_valid(finished), "finished coroutine must be collected");
|
|
// Main coroutine remains allocated
|
|
assert_eq!(vm.heap.len(), 1, "only main coroutine should remain");
|
|
}
|
|
|
|
#[test]
|
|
fn test_coroutines_strict_alternation_with_yield() {
|
|
use prometeu_bytecode::FunctionMeta;
|
|
|
|
// Build function A: PUSH 1; YIELD; FRAME_SYNC; JMP 0 (loop)
|
|
let mut fn_a = Vec::new();
|
|
fn_a.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&1i32.to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&0u32.to_le_bytes());
|
|
|
|
// Build function B: PUSH 2; YIELD; FRAME_SYNC; JMP 0 (loop)
|
|
let mut fn_b = Vec::new();
|
|
fn_b.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&2i32.to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&0u32.to_le_bytes());
|
|
|
|
// Main: SPAWN A; SPAWN B; SLEEP 100; HALT
|
|
let mut main = Vec::new();
|
|
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
|
|
main.extend_from_slice(&1u32.to_le_bytes()); // fn A idx
|
|
main.extend_from_slice(&0u32.to_le_bytes()); // arg count
|
|
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
|
|
main.extend_from_slice(&2u32.to_le_bytes()); // fn B idx
|
|
main.extend_from_slice(&0u32.to_le_bytes()); // arg count
|
|
main.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
|
|
main.extend_from_slice(&100u32.to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
// Assemble ROM: [main][A][B]
|
|
let off_main = 0usize;
|
|
let off_a = main.len();
|
|
let off_b = off_a + fn_a.len();
|
|
let mut rom = Vec::with_capacity(main.len() + fn_a.len() + fn_b.len());
|
|
rom.extend_from_slice(&main);
|
|
rom.extend_from_slice(&fn_a);
|
|
rom.extend_from_slice(&fn_b);
|
|
|
|
// VM with three functions (0=main, 1=A, 2=B)
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Frame 1: main sleeps; from now on A and B should strictly alternate.
|
|
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
|
|
// Locate coroutine handles for A (fn_idx=1) and B (fn_idx=2)
|
|
let mut a_href = None;
|
|
let mut b_href = None;
|
|
// Consider currently running coroutine
|
|
if let Some(cur) = vm.current_coro {
|
|
if let Some(f) = vm.call_stack.last() {
|
|
if f.func_idx == 1 { a_href = Some(cur); }
|
|
if f.func_idx == 2 { b_href = Some(cur); }
|
|
}
|
|
}
|
|
// And also consider suspended (Ready/Sleeping) coroutines
|
|
for h in vm.heap.suspended_coroutine_handles() {
|
|
if let Some(co) = vm.heap.coroutine_data(h) {
|
|
if let Some(f) = co.frames.last() {
|
|
if f.func_idx == 1 { a_href = Some(h); }
|
|
if f.func_idx == 2 { b_href = Some(h); }
|
|
}
|
|
}
|
|
}
|
|
let a_href = a_href.expect("coroutine A not found");
|
|
let b_href = b_href.expect("coroutine B not found");
|
|
|
|
let mut prev_a = vm.heap.coroutine_data(a_href).unwrap().stack.len();
|
|
let mut prev_b = vm.heap.coroutine_data(b_href).unwrap().stack.len();
|
|
|
|
let mut trace = Vec::new();
|
|
for _ in 0..6 {
|
|
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
let a_now = vm.heap.coroutine_data(a_href).unwrap().stack.len();
|
|
let b_now = vm.heap.coroutine_data(b_href).unwrap().stack.len();
|
|
if a_now > prev_a { trace.push(1); }
|
|
else if b_now > prev_b { trace.push(2); }
|
|
else { panic!("no coroutine progress detected this frame"); }
|
|
prev_a = a_now; prev_b = b_now;
|
|
}
|
|
|
|
assert_eq!(trace, vec![1, 2, 1, 2, 1, 2], "Coroutines must strictly alternate under Yield");
|
|
}
|
|
|
|
#[test]
|
|
fn test_sleep_does_not_stall_others_and_wakes_at_exact_tick() {
|
|
use prometeu_bytecode::FunctionMeta;
|
|
|
|
// Function A: SLEEP N; PUSH 100; YIELD; FRAME_SYNC; HALT
|
|
let sleep_n: u32 = 3;
|
|
let mut fn_a = Vec::new();
|
|
fn_a.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&sleep_n.to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&100i32.to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
// Function B: PUSH 1; YIELD; FRAME_SYNC; JMP 0 (increments every frame)
|
|
let mut fn_b = Vec::new();
|
|
fn_b.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&1i32.to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&0u32.to_le_bytes());
|
|
|
|
// Main: SPAWN A; SPAWN B; SLEEP big; HALT
|
|
let mut main = Vec::new();
|
|
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
|
|
main.extend_from_slice(&1u32.to_le_bytes());
|
|
main.extend_from_slice(&0u32.to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
|
|
main.extend_from_slice(&2u32.to_le_bytes());
|
|
main.extend_from_slice(&0u32.to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
|
|
main.extend_from_slice(&100u32.to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let off_main = 0usize;
|
|
let off_a = main.len();
|
|
let off_b = off_a + fn_a.len();
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&main);
|
|
rom.extend_from_slice(&fn_a);
|
|
rom.extend_from_slice(&fn_b);
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Frame 1: main sleeps, tick -> 1
|
|
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(vm.current_tick, 1);
|
|
|
|
// Identify A and B coroutine handles (consider both running and suspended)
|
|
let mut a_href = None;
|
|
let mut b_href = None;
|
|
if let Some(cur) = vm.current_coro {
|
|
if let Some(f) = vm.call_stack.last() {
|
|
if f.func_idx == 1 { a_href = Some(cur); }
|
|
if f.func_idx == 2 { b_href = Some(cur); }
|
|
}
|
|
}
|
|
for h in vm.heap.suspended_coroutine_handles() {
|
|
if let Some(co) = vm.heap.coroutine_data(h) {
|
|
if let Some(f) = co.frames.last() {
|
|
if f.func_idx == 1 { a_href = Some(h); }
|
|
if f.func_idx == 2 { b_href = Some(h); }
|
|
}
|
|
}
|
|
}
|
|
let a_href = a_href.expect("A not found");
|
|
let b_href = b_href.expect("B not found");
|
|
// Count how many frames B runs while A sleeps using the scheduler's next-to-run handle.
|
|
// Stop when A is scheduled to run, then execute that frame and record its end-of-frame tick.
|
|
let mut ones_before = 0usize;
|
|
let mut woke_at_tick = 0u64;
|
|
let mut seen_a_once = false;
|
|
for _ in 0..1000 {
|
|
if let Some(next) = vm.scheduler.current() {
|
|
if next == a_href {
|
|
if seen_a_once {
|
|
// A has slept before and is about to run again (wake). Run and record.
|
|
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
woke_at_tick = vm.current_tick;
|
|
break;
|
|
} else {
|
|
// First time A runs (to execute SLEEP N); do not count as wake yet.
|
|
seen_a_once = true;
|
|
}
|
|
} else if next == b_href {
|
|
ones_before += 1;
|
|
}
|
|
}
|
|
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
}
|
|
|
|
// Canonical semantics: wake_tick = current_tick_at_sleep + N + 1.
|
|
// The scheduler wakes sleepers at the end of that tick, so the coroutine runs
|
|
// in the following frame, and we observe its heap stack update at end tick = wake_tick + 1.
|
|
// A executes SLEEP at its first run (tick 1), so wake_tick = 1 + N + 1, observed tick = +1.
|
|
let expected_observed_end_tick = 1u64 + sleep_n as u64 + 2u64;
|
|
assert_eq!(woke_at_tick, expected_observed_end_tick, "A must wake at the exact tick (+1 frame to observe)");
|
|
// And B must have produced at least N items (one per frame) before A's wake.
|
|
assert!(ones_before as u64 >= sleep_n as u64, "B must keep running while A sleeps");
|
|
}
|
|
|
|
#[test]
|
|
fn test_multi_coroutine_determinism_across_runs() {
|
|
use prometeu_bytecode::FunctionMeta;
|
|
|
|
// Reuse alternation program from previous test
|
|
let mut fn_a = Vec::new();
|
|
fn_a.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&1i32.to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
fn_a.extend_from_slice(&0u32.to_le_bytes());
|
|
|
|
let mut fn_b = Vec::new();
|
|
fn_b.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&2i32.to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
|
|
fn_b.extend_from_slice(&0u32.to_le_bytes());
|
|
|
|
let mut main = Vec::new();
|
|
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
|
|
main.extend_from_slice(&1u32.to_le_bytes());
|
|
main.extend_from_slice(&0u32.to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
|
|
main.extend_from_slice(&2u32.to_le_bytes());
|
|
main.extend_from_slice(&0u32.to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
|
|
main.extend_from_slice(&100u32.to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let off_main = 0usize;
|
|
let off_a = main.len();
|
|
let off_b = off_a + fn_a.len();
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&main);
|
|
rom.extend_from_slice(&fn_a);
|
|
rom.extend_from_slice(&fn_b);
|
|
|
|
let mut vm1 = new_test_vm(rom.clone(), vec![]);
|
|
let mut vm2 = new_test_vm(rom.clone(), vec![]);
|
|
let fm: std::sync::Arc<[prometeu_bytecode::FunctionMeta]> = std::sync::Arc::from(vec![
|
|
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
]);
|
|
vm1.program.functions = fm.clone();
|
|
vm2.program.functions = fm;
|
|
|
|
let mut native = MockNative;
|
|
let mut c1 = HostContext::new(None);
|
|
let mut c2 = HostContext::new(None);
|
|
|
|
// Burn first frame (main sleeps)
|
|
let _ = vm1.run_budget(100, &mut native, &mut c1).unwrap();
|
|
let _ = vm2.run_budget(100, &mut native, &mut c2).unwrap();
|
|
|
|
// Discover A/B handles in both VMs
|
|
let find_ab = |vm: &VirtualMachine| {
|
|
let mut a = None; let mut b = None;
|
|
// running
|
|
if let Some(cur) = vm.current_coro {
|
|
if let Some(f) = vm.call_stack.last() {
|
|
if f.func_idx == 1 { a = Some(cur); }
|
|
if f.func_idx == 2 { b = Some(cur); }
|
|
}
|
|
}
|
|
// suspended
|
|
for h in vm.heap.suspended_coroutine_handles() {
|
|
if let Some(co) = vm.heap.coroutine_data(h) {
|
|
if let Some(f) = co.frames.last() {
|
|
if f.func_idx == 1 { a = Some(h); }
|
|
if f.func_idx == 2 { b = Some(h); }
|
|
}
|
|
}
|
|
}
|
|
(a.expect("A missing"), b.expect("B missing"))
|
|
};
|
|
let (a1, b1) = find_ab(&vm1);
|
|
let (a2, b2) = find_ab(&vm2);
|
|
|
|
let mut a1_prev = vm1.heap.coroutine_data(a1).unwrap().stack.len();
|
|
let mut b1_prev = vm1.heap.coroutine_data(b1).unwrap().stack.len();
|
|
let mut a2_prev = vm2.heap.coroutine_data(a2).unwrap().stack.len();
|
|
let mut b2_prev = vm2.heap.coroutine_data(b2).unwrap().stack.len();
|
|
|
|
let mut trace1 = Vec::new();
|
|
let mut trace2 = Vec::new();
|
|
for _ in 0..8 {
|
|
let _ = vm1.run_budget(100, &mut native, &mut c1).unwrap();
|
|
let a_now = vm1.heap.coroutine_data(a1).unwrap().stack.len();
|
|
let b_now = vm1.heap.coroutine_data(b1).unwrap().stack.len();
|
|
if a_now > a1_prev { trace1.push(1); } else if b_now > b1_prev { trace1.push(2); } else { panic!("no progress 1"); }
|
|
a1_prev = a_now; b1_prev = b_now;
|
|
|
|
let _ = vm2.run_budget(100, &mut native, &mut c2).unwrap();
|
|
let a2_now = vm2.heap.coroutine_data(a2).unwrap().stack.len();
|
|
let b2_now = vm2.heap.coroutine_data(b2).unwrap().stack.len();
|
|
if a2_now > a2_prev { trace2.push(1); } else if b2_now > b2_prev { trace2.push(2); } else { panic!("no progress 2"); }
|
|
a2_prev = a2_now; b2_prev = b2_now;
|
|
}
|
|
|
|
assert_eq!(trace1, trace2, "Execution trace (coroutine IDs) must match exactly across runs");
|
|
}
|
|
|
|
#[test]
|
|
fn test_gc_with_suspended_coroutine_runtime() {
|
|
use crate::object::ObjectKind;
|
|
use prometeu_bytecode::FunctionMeta;
|
|
|
|
// Function F (idx 1): SLEEP 10; FRAME_SYNC; HALT
|
|
let mut fn_f = Vec::new();
|
|
fn_f.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
|
|
fn_f.extend_from_slice(&10u32.to_le_bytes());
|
|
fn_f.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
fn_f.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
// Main (idx 0): SPAWN F with 1 argument (the HeapRef we preload); FRAME_SYNC; HALT
|
|
let mut main = Vec::new();
|
|
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
|
|
main.extend_from_slice(&1u32.to_le_bytes()); // func idx
|
|
main.extend_from_slice(&1u32.to_le_bytes()); // arg count = 1
|
|
main.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
|
|
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let off_main = 0usize;
|
|
let off_f = main.len();
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&main);
|
|
rom.extend_from_slice(&fn_f);
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
// Function F takes 1 parameter (the HeapRef) which stays on its stack while sleeping
|
|
FunctionMeta { code_offset: off_f as u32, code_len: fn_f.len() as u32, param_slots: 1, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
|
|
]);
|
|
|
|
// Force GC at first safepoint to stress retention
|
|
vm.gc_alloc_threshold = 1;
|
|
|
|
// Allocate a heap object and preload it onto main's operand stack so SPAWN consumes it as arg.
|
|
let captured = vm.heap.allocate_object(ObjectKind::Bytes, &[0xAB, 0xCD, 0xEF]);
|
|
vm.operand_stack.push(Value::HeapRef(captured));
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
|
|
// Run main: SPAWN consumes the HeapRef as arg and creates sleeping coroutine; FRAME_SYNC triggers GC
|
|
let rep = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert!(matches!(rep.reason, LogicalFrameEndingReason::FrameSync));
|
|
|
|
// The captured object must remain alive because it is referenced by the sleeping coroutine's stack
|
|
assert!(vm.heap.is_valid(captured), "captured object must remain alive while coroutine sleeps");
|
|
}
|
|
|
|
#[test]
|
|
fn test_make_closure_zero_captures() {
|
|
use prometeu_bytecode::{FunctionMeta, Value};
|
|
|
|
// ROM: MAKE_CLOSURE fn_id=7, cap=0; HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::MakeClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&7u32.to_le_bytes()); // fn_id
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // capture_count
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
// step MAKE_CLOSURE
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
// step HALT
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
|
|
assert!(vm.halted);
|
|
assert_eq!(vm.operand_stack.len(), 1);
|
|
let top = vm.peek().unwrap().clone();
|
|
let href = match top { Value::HeapRef(h) => h, _ => panic!("Expected HeapRef on stack") };
|
|
assert!(vm.heap.is_valid(href));
|
|
assert_eq!(vm.heap.closure_fn_id(href), Some(7));
|
|
let env = vm.heap.closure_env_slice(href).expect("env slice");
|
|
assert_eq!(env.len(), 0);
|
|
}
|
|
|
|
#[test]
|
|
fn test_make_closure_multiple_captures_and_order() {
|
|
use prometeu_bytecode::{FunctionMeta, Value};
|
|
|
|
// Build ROM:
|
|
// PUSH_I32 1; PUSH_I32 2; PUSH_I32 3; // Stack: [1,2,3]
|
|
// MAKE_CLOSURE fn_id=9, cap=3; // Pops 3 (3,2,1), env = [1,2,3]
|
|
// HALT
|
|
let mut rom = Vec::new();
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&2i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&3i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::MakeClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&9u32.to_le_bytes()); // fn_id
|
|
rom.extend_from_slice(&3u32.to_le_bytes()); // capture_count
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
|
|
code_offset: 0,
|
|
code_len: rom.len() as u32,
|
|
..Default::default()
|
|
}]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
// Execute instructions until HALT
|
|
while !vm.halted {
|
|
vm.step(&mut native, &mut ctx).unwrap();
|
|
}
|
|
|
|
// After HALT, stack must contain only the closure ref
|
|
assert_eq!(vm.operand_stack.len(), 1);
|
|
let href = match vm.pop().unwrap() { Value::HeapRef(h) => h, _ => panic!("Expected HeapRef") };
|
|
assert_eq!(vm.heap.closure_fn_id(href), Some(9));
|
|
let env = vm.heap.closure_env_slice(href).expect("env slice");
|
|
assert_eq!(env.len(), 3);
|
|
assert_eq!(env[0], Value::Int32(1));
|
|
assert_eq!(env[1], Value::Int32(2));
|
|
assert_eq!(env[2], Value::Int32(3));
|
|
}
|
|
|
|
#[test]
|
|
fn test_call_closure_returns_constant() {
|
|
use prometeu_bytecode::{FunctionMeta, Value};
|
|
|
|
// F0 (entry): MAKE_CLOSURE fn=1, cap=0; CALL_CLOSURE argc=0; HALT
|
|
// F1 (callee): PUSH_I32 7; RET
|
|
let mut rom = Vec::new();
|
|
let f0_start = 0usize;
|
|
rom.extend_from_slice(&(OpCode::MakeClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes()); // fn_id
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // capture_count
|
|
rom.extend_from_slice(&(OpCode::CallClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // argc = 0 user args
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
// F1 code
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&7i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() },
|
|
FunctionMeta { code_offset: f1_start, code_len: f1_len, param_slots: 1, return_slots: 1, ..Default::default() },
|
|
]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
assert_eq!(vm.operand_stack.len(), 1);
|
|
assert_eq!(vm.operand_stack[0], Value::Int32(7));
|
|
}
|
|
|
|
#[test]
|
|
fn test_call_closure_with_captures_ignored() {
|
|
use prometeu_bytecode::{FunctionMeta, Value};
|
|
|
|
// F0: PUSH_I32 123; MAKE_CLOSURE fn=1 cap=1; CALL_CLOSURE 0; HALT
|
|
// F1: PUSH_I32 42; RET
|
|
let mut rom = Vec::new();
|
|
let f0_start = 0usize;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&123i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::MakeClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes()); // fn_id
|
|
rom.extend_from_slice(&1u32.to_le_bytes()); // capture_count
|
|
rom.extend_from_slice(&(OpCode::CallClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // argc = 0
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&42i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() },
|
|
FunctionMeta { code_offset: f1_start, code_len: f1_len, param_slots: 1, return_slots: 1, ..Default::default() },
|
|
]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
assert_eq!(vm.operand_stack, vec![Value::Int32(42)]);
|
|
}
|
|
|
|
#[test]
|
|
fn test_call_closure_on_non_closure_traps() {
|
|
use prometeu_bytecode::FunctionMeta;
|
|
|
|
// F0: PUSH_I32 1; CALL_CLOSURE 0; HALT -> should TRAP_TYPE on CALL_CLOSURE
|
|
let mut rom = Vec::new();
|
|
let f0_start = 0usize;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::CallClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes());
|
|
// Leave HALT for after run to ensure we trap before
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() }]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(10, &mut native, &mut ctx).unwrap();
|
|
match report.reason {
|
|
LogicalFrameEndingReason::Trap(info) => {
|
|
assert_eq!(info.code, TRAP_TYPE);
|
|
assert_eq!(info.opcode, OpCode::CallClosure as u16);
|
|
}
|
|
other => panic!("Expected Trap(TYPE) from CALL_CLOSURE on non-closure, got {:?}", other),
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_nested_call_closure() {
|
|
use prometeu_bytecode::{FunctionMeta, Value};
|
|
|
|
// F0: MAKE_CLOSURE fn=1 cap=0; CALL_CLOSURE 0; CALL_CLOSURE 0; HALT
|
|
// F1: MAKE_CLOSURE fn=2 cap=0; RET // returns a closure
|
|
// F2: PUSH_I32 55; RET // returns constant
|
|
let mut rom = Vec::new();
|
|
// F0
|
|
let f0_start = 0usize;
|
|
rom.extend_from_slice(&(OpCode::MakeClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&1u32.to_le_bytes()); // fn_id = 1
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // cap=0
|
|
rom.extend_from_slice(&(OpCode::CallClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // argc=0 -> pushes a closure from F1
|
|
rom.extend_from_slice(&(OpCode::CallClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // argc=0 -> call returned closure F2
|
|
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
|
|
let f0_len = rom.len() - f0_start;
|
|
|
|
// F1
|
|
let f1_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::MakeClosure as u16).to_le_bytes());
|
|
rom.extend_from_slice(&2u32.to_le_bytes()); // fn_id = 2
|
|
rom.extend_from_slice(&0u32.to_le_bytes()); // cap=0
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes()); // return the HeapRef on stack
|
|
let f1_len = rom.len() as u32 - f1_start;
|
|
|
|
// F2
|
|
let f2_start = rom.len() as u32;
|
|
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
|
|
rom.extend_from_slice(&55i32.to_le_bytes());
|
|
rom.extend_from_slice(&(OpCode::Ret as u16).to_le_bytes());
|
|
let f2_len = rom.len() as u32 - f2_start;
|
|
|
|
let mut vm = new_test_vm(rom.clone(), vec![]);
|
|
vm.program.functions = std::sync::Arc::from(vec![
|
|
FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() },
|
|
FunctionMeta { code_offset: f1_start, code_len: f1_len, param_slots: 1, return_slots: 1, ..Default::default() },
|
|
FunctionMeta { code_offset: f2_start, code_len: f2_len, param_slots: 1, return_slots: 1, ..Default::default() },
|
|
]);
|
|
|
|
let mut native = MockNative;
|
|
let mut ctx = HostContext::new(None);
|
|
vm.prepare_call("0");
|
|
let report = vm.run_budget(200, &mut native, &mut ctx).unwrap();
|
|
assert_eq!(report.reason, LogicalFrameEndingReason::Halted);
|
|
assert_eq!(vm.operand_stack, vec![Value::Int32(55)]);
|
|
}
|
|
}
|