This commit is contained in:
bQUARKz 2026-02-20 14:21:11 +00:00
parent f6d33cd8e5
commit c07a1cc230
Signed by: bquarkz
SSH Key Fingerprint: SHA256:Z7dgqoglWwoK6j6u4QC87OveEq74WOhFN+gitsxtkf8
2 changed files with 416 additions and 11 deletions

View File

@ -52,7 +52,8 @@ impl Scheduler {
// ---------- Sleeping operations ---------- // ---------- Sleeping operations ----------
/// Put a coroutine to sleep until `wake_tick` (inclusive). /// Put a coroutine to sleep until `wake_tick`.
/// A coroutine is woken when `current_tick >= wake_tick`.
/// The sleeping list is kept stably ordered to guarantee determinism. /// The sleeping list is kept stably ordered to guarantee determinism.
pub fn sleep_until(&mut self, coro: HeapRef, wake_tick: u64) { pub fn sleep_until(&mut self, coro: HeapRef, wake_tick: u64) {
let entry = SleepEntry { wake_tick, seq: self.next_seq, coro }; let entry = SleepEntry { wake_tick, seq: self.next_seq, coro };

View File

@ -89,7 +89,16 @@ pub struct VirtualMachine {
/// Cooperative scheduler: set to true when `YIELD` opcode is executed. /// Cooperative scheduler: set to true when `YIELD` opcode is executed.
/// The runtime/scheduler should only act on this at safepoints (FRAME_SYNC). /// The runtime/scheduler should only act on this at safepoints (FRAME_SYNC).
pub yield_requested: bool, pub yield_requested: bool,
/// If set, the current coroutine requested to sleep until this tick (inclusive). /// Absolute wake tick requested by the currently running coroutine (when it executes `SLEEP`).
///
/// Canonical rule (authoritative):
/// - `SLEEP N` suspends the coroutine for exactly N full scheduler ticks AFTER the current
/// `FRAME_SYNC` completes. If `SLEEP` is executed during tick `T`, the coroutine must resume
/// in the frame whose end-of-frame tick will be `T + N + 1`.
/// - Implementation detail: we compute `wake_tick = current_tick + duration + 1` at the time
/// `SLEEP` executes. The scheduler wakes sleeping coroutines when `current_tick >= wake_tick`.
///
/// This definition is deterministic and eliminates off-by-one ambiguity.
pub sleep_requested_until: Option<u64>, pub sleep_requested_until: Option<u64>,
/// Logical tick counter advanced at each FRAME_SYNC boundary. /// Logical tick counter advanced at each FRAME_SYNC boundary.
pub current_tick: u64, pub current_tick: u64,
@ -261,14 +270,22 @@ impl VirtualMachine {
func_idx, func_idx,
}); });
// Initialize the main coroutine to own the current execution context. // Initialize the main coroutine object.
// State = Running; not enqueued into ready queue. // IMPORTANT INVARIANT:
// - The RUNNING coroutine's authoritative execution state lives in the VM fields
// (pc, operand_stack, call_stack).
// - The heap-side coroutine object is authoritative ONLY when the coroutine is suspended
// (Ready/Sleeping/Finished). While running, its `stack`/`frames` should be empty.
//
// Therefore we do NOT clone the VM stacks into the heap here. We create the main
// coroutine object with empty stack/frames and mark it as Running, and the VM already
// holds the live execution context initialized above.
let main_href = self.heap.allocate_coroutine( let main_href = self.heap.allocate_coroutine(
self.pc, self.pc,
CoroutineState::Running, CoroutineState::Running,
0, 0,
self.operand_stack.clone(), Vec::new(),
self.call_stack.clone(), Vec::new(),
); );
self.current_coro = Some(main_href); self.current_coro = Some(main_href);
self.scheduler.set_current(self.current_coro); self.scheduler.set_current(self.current_coro);
@ -524,11 +541,22 @@ impl VirtualMachine {
// Do not end the slice here; we continue executing until a safepoint. // Do not end the slice here; we continue executing until a safepoint.
} }
OpCode::Sleep => { OpCode::Sleep => {
// Immediate is duration in ticks // Immediate is duration in ticks.
//
// Canonical semantics:
// SLEEP N => suspend for exactly N full scheduler ticks AFTER the current
// FRAME_SYNC completes. If executed at tick T, resume in the frame whose
// end-of-frame tick will be T + N + 1.
//
// Implementation rule:
// wake_tick = current_tick + duration + 1
let duration = instr let duration = instr
.imm_u32() .imm_u32()
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))? as u64; .map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))? as u64;
let wake = self.current_tick.saturating_add(duration); let wake = self
.current_tick
.saturating_add(duration)
.saturating_add(1);
self.sleep_requested_until = Some(wake); self.sleep_requested_until = Some(wake);
// End the logical frame right after the instruction completes // End the logical frame right after the instruction completes
@ -1521,7 +1549,7 @@ mod tests {
assert!(vm.operand_stack.is_empty()); assert!(vm.operand_stack.is_empty());
assert_eq!(vm.current_tick, 1); assert_eq!(vm.current_tick, 1);
// Frame 2: still sleeping (tick 1 < wake 2), immediate FrameSync, tick -> 2 // Frame 2: still sleeping (tick 1 < wake 3), immediate FrameSync, tick -> 2
let rep2 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok"); let rep2 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok");
assert!(matches!(rep2.reason, LogicalFrameEndingReason::FrameSync)); assert!(matches!(rep2.reason, LogicalFrameEndingReason::FrameSync));
// In the per-coroutine model, the VM may keep current context intact across idle frames; // In the per-coroutine model, the VM may keep current context intact across idle frames;
@ -1529,9 +1557,15 @@ mod tests {
assert_eq!(vm.operand_stack.len(), 0); assert_eq!(vm.operand_stack.len(), 0);
assert_eq!(vm.current_tick, 2); assert_eq!(vm.current_tick, 2);
// Frame 3: wake condition met (current_tick >= wake), execute PUSH_I32 then FRAME_SYNC // Frame 3: still sleeping (tick 2 < wake 3), immediate FrameSync, tick -> 3
let rep3 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok"); let rep3 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok");
assert!(matches!(rep3.reason, LogicalFrameEndingReason::FrameSync)); assert!(matches!(rep3.reason, LogicalFrameEndingReason::FrameSync));
assert_eq!(vm.operand_stack.len(), 0);
assert_eq!(vm.current_tick, 3);
// Frame 4: wake condition met (current_tick >= wake), execute PUSH_I32 then FRAME_SYNC
let rep4 = vm.run_budget(100, &mut native, &mut ctx).expect("run ok");
assert!(matches!(rep4.reason, LogicalFrameEndingReason::FrameSync));
// Value should now be on the stack // Value should now be on the stack
assert_eq!(vm.peek().unwrap(), &Value::Int32(123)); assert_eq!(vm.peek().unwrap(), &Value::Int32(123));
@ -1636,12 +1670,18 @@ mod tests {
ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra2.reason))); ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra2.reason)));
ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb2.reason))); ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb2.reason)));
// Slice 3 (wakes and pushes) // Slice 3
let ra3 = vm_a.run_budget(100, &mut native, &mut ctx_a).unwrap(); let ra3 = vm_a.run_budget(100, &mut native, &mut ctx_a).unwrap();
let rb3 = vm_b.run_budget(100, &mut native, &mut ctx_b).unwrap(); let rb3 = vm_b.run_budget(100, &mut native, &mut ctx_b).unwrap();
ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra3.reason))); ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra3.reason)));
ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb3.reason))); ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb3.reason)));
// Slice 4 (wakes and pushes)
let ra4 = vm_a.run_budget(100, &mut native, &mut ctx_a).unwrap();
let rb4 = vm_b.run_budget(100, &mut native, &mut ctx_b).unwrap();
ticks_a.push((vm_a.pc, vm_a.current_tick, format!("{:?}", ra4.reason)));
ticks_b.push((vm_b.pc, vm_b.current_tick, format!("{:?}", rb4.reason)));
assert_eq!(ticks_a, ticks_b, "Sleep/wake slices must match across runs"); assert_eq!(ticks_a, ticks_b, "Sleep/wake slices must match across runs");
assert_eq!(vm_a.peek().unwrap(), &Value::Int32(7)); assert_eq!(vm_a.peek().unwrap(), &Value::Int32(7));
assert_eq!(vm_b.peek().unwrap(), &Value::Int32(7)); assert_eq!(vm_b.peek().unwrap(), &Value::Int32(7));
@ -3374,6 +3414,370 @@ mod tests {
assert_eq!(vm.heap.len(), 1, "only main coroutine should remain"); assert_eq!(vm.heap.len(), 1, "only main coroutine should remain");
} }
#[test]
fn test_coroutines_strict_alternation_with_yield() {
use prometeu_bytecode::FunctionMeta;
// Build function A: PUSH 1; YIELD; FRAME_SYNC; JMP 0 (loop)
let mut fn_a = Vec::new();
fn_a.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
fn_a.extend_from_slice(&1i32.to_le_bytes());
fn_a.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
fn_a.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
fn_a.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
fn_a.extend_from_slice(&0u32.to_le_bytes());
// Build function B: PUSH 2; YIELD; FRAME_SYNC; JMP 0 (loop)
let mut fn_b = Vec::new();
fn_b.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
fn_b.extend_from_slice(&2i32.to_le_bytes());
fn_b.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
fn_b.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
fn_b.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
fn_b.extend_from_slice(&0u32.to_le_bytes());
// Main: SPAWN A; SPAWN B; SLEEP 100; HALT
let mut main = Vec::new();
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
main.extend_from_slice(&1u32.to_le_bytes()); // fn A idx
main.extend_from_slice(&0u32.to_le_bytes()); // arg count
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
main.extend_from_slice(&2u32.to_le_bytes()); // fn B idx
main.extend_from_slice(&0u32.to_le_bytes()); // arg count
main.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
main.extend_from_slice(&100u32.to_le_bytes());
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
// Assemble ROM: [main][A][B]
let off_main = 0usize;
let off_a = main.len();
let off_b = off_a + fn_a.len();
let mut rom = Vec::with_capacity(main.len() + fn_a.len() + fn_b.len());
rom.extend_from_slice(&main);
rom.extend_from_slice(&fn_a);
rom.extend_from_slice(&fn_b);
// VM with three functions (0=main, 1=A, 2=B)
let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
]);
let mut native = MockNative;
let mut ctx = HostContext::new(None);
// Frame 1: main sleeps; from now on A and B should strictly alternate.
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
// Locate coroutine handles for A (fn_idx=1) and B (fn_idx=2)
let mut a_href = None;
let mut b_href = None;
// Consider currently running coroutine
if let Some(cur) = vm.current_coro {
if let Some(f) = vm.call_stack.last() {
if f.func_idx == 1 { a_href = Some(cur); }
if f.func_idx == 2 { b_href = Some(cur); }
}
}
// And also consider suspended (Ready/Sleeping) coroutines
for h in vm.heap.suspended_coroutine_handles() {
if let Some(co) = vm.heap.coroutine_data(h) {
if let Some(f) = co.frames.last() {
if f.func_idx == 1 { a_href = Some(h); }
if f.func_idx == 2 { b_href = Some(h); }
}
}
}
let a_href = a_href.expect("coroutine A not found");
let b_href = b_href.expect("coroutine B not found");
let mut prev_a = vm.heap.coroutine_data(a_href).unwrap().stack.len();
let mut prev_b = vm.heap.coroutine_data(b_href).unwrap().stack.len();
let mut trace = Vec::new();
for _ in 0..6 {
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
let a_now = vm.heap.coroutine_data(a_href).unwrap().stack.len();
let b_now = vm.heap.coroutine_data(b_href).unwrap().stack.len();
if a_now > prev_a { trace.push(1); }
else if b_now > prev_b { trace.push(2); }
else { panic!("no coroutine progress detected this frame"); }
prev_a = a_now; prev_b = b_now;
}
assert_eq!(trace, vec![1, 2, 1, 2, 1, 2], "Coroutines must strictly alternate under Yield");
}
#[test]
fn test_sleep_does_not_stall_others_and_wakes_at_exact_tick() {
use prometeu_bytecode::FunctionMeta;
// Function A: SLEEP N; PUSH 100; YIELD; FRAME_SYNC; HALT
let sleep_n: u32 = 3;
let mut fn_a = Vec::new();
fn_a.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
fn_a.extend_from_slice(&sleep_n.to_le_bytes());
fn_a.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
fn_a.extend_from_slice(&100i32.to_le_bytes());
fn_a.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
fn_a.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
fn_a.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
// Function B: PUSH 1; YIELD; FRAME_SYNC; JMP 0 (increments every frame)
let mut fn_b = Vec::new();
fn_b.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
fn_b.extend_from_slice(&1i32.to_le_bytes());
fn_b.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
fn_b.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
fn_b.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
fn_b.extend_from_slice(&0u32.to_le_bytes());
// Main: SPAWN A; SPAWN B; SLEEP big; HALT
let mut main = Vec::new();
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
main.extend_from_slice(&1u32.to_le_bytes());
main.extend_from_slice(&0u32.to_le_bytes());
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
main.extend_from_slice(&2u32.to_le_bytes());
main.extend_from_slice(&0u32.to_le_bytes());
main.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
main.extend_from_slice(&100u32.to_le_bytes());
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
let off_main = 0usize;
let off_a = main.len();
let off_b = off_a + fn_a.len();
let mut rom = Vec::new();
rom.extend_from_slice(&main);
rom.extend_from_slice(&fn_a);
rom.extend_from_slice(&fn_b);
let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
]);
let mut native = MockNative;
let mut ctx = HostContext::new(None);
// Frame 1: main sleeps, tick -> 1
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
assert_eq!(vm.current_tick, 1);
// Identify A and B coroutine handles (consider both running and suspended)
let mut a_href = None;
let mut b_href = None;
if let Some(cur) = vm.current_coro {
if let Some(f) = vm.call_stack.last() {
if f.func_idx == 1 { a_href = Some(cur); }
if f.func_idx == 2 { b_href = Some(cur); }
}
}
for h in vm.heap.suspended_coroutine_handles() {
if let Some(co) = vm.heap.coroutine_data(h) {
if let Some(f) = co.frames.last() {
if f.func_idx == 1 { a_href = Some(h); }
if f.func_idx == 2 { b_href = Some(h); }
}
}
}
let a_href = a_href.expect("A not found");
let b_href = b_href.expect("B not found");
// Count how many frames B runs while A sleeps using the scheduler's next-to-run handle.
// Stop when A is scheduled to run, then execute that frame and record its end-of-frame tick.
let mut ones_before = 0usize;
let mut woke_at_tick = 0u64;
let mut seen_a_once = false;
for _ in 0..1000 {
if let Some(next) = vm.scheduler.current() {
if next == a_href {
if seen_a_once {
// A has slept before and is about to run again (wake). Run and record.
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
woke_at_tick = vm.current_tick;
break;
} else {
// First time A runs (to execute SLEEP N); do not count as wake yet.
seen_a_once = true;
}
} else if next == b_href {
ones_before += 1;
}
}
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
}
// Canonical semantics: wake_tick = current_tick_at_sleep + N + 1.
// The scheduler wakes sleepers at the end of that tick, so the coroutine runs
// in the following frame, and we observe its heap stack update at end tick = wake_tick + 1.
// A executes SLEEP at its first run (tick 1), so wake_tick = 1 + N + 1, observed tick = +1.
let expected_observed_end_tick = 1u64 + sleep_n as u64 + 2u64;
assert_eq!(woke_at_tick, expected_observed_end_tick, "A must wake at the exact tick (+1 frame to observe)");
// And B must have produced at least N items (one per frame) before A's wake.
assert!(ones_before as u64 >= sleep_n as u64, "B must keep running while A sleeps");
}
#[test]
fn test_multi_coroutine_determinism_across_runs() {
use prometeu_bytecode::FunctionMeta;
// Reuse alternation program from previous test
let mut fn_a = Vec::new();
fn_a.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
fn_a.extend_from_slice(&1i32.to_le_bytes());
fn_a.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
fn_a.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
fn_a.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
fn_a.extend_from_slice(&0u32.to_le_bytes());
let mut fn_b = Vec::new();
fn_b.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
fn_b.extend_from_slice(&2i32.to_le_bytes());
fn_b.extend_from_slice(&(OpCode::Yield as u16).to_le_bytes());
fn_b.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
fn_b.extend_from_slice(&(OpCode::Jmp as u16).to_le_bytes());
fn_b.extend_from_slice(&0u32.to_le_bytes());
let mut main = Vec::new();
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
main.extend_from_slice(&1u32.to_le_bytes());
main.extend_from_slice(&0u32.to_le_bytes());
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
main.extend_from_slice(&2u32.to_le_bytes());
main.extend_from_slice(&0u32.to_le_bytes());
main.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
main.extend_from_slice(&100u32.to_le_bytes());
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
let off_main = 0usize;
let off_a = main.len();
let off_b = off_a + fn_a.len();
let mut rom = Vec::new();
rom.extend_from_slice(&main);
rom.extend_from_slice(&fn_a);
rom.extend_from_slice(&fn_b);
let mut vm1 = new_test_vm(rom.clone(), vec![]);
let mut vm2 = new_test_vm(rom.clone(), vec![]);
let fm: std::sync::Arc<[prometeu_bytecode::FunctionMeta]> = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
]);
vm1.program.functions = fm.clone();
vm2.program.functions = fm;
let mut native = MockNative;
let mut c1 = HostContext::new(None);
let mut c2 = HostContext::new(None);
// Burn first frame (main sleeps)
let _ = vm1.run_budget(100, &mut native, &mut c1).unwrap();
let _ = vm2.run_budget(100, &mut native, &mut c2).unwrap();
// Discover A/B handles in both VMs
let find_ab = |vm: &VirtualMachine| {
let mut a = None; let mut b = None;
// running
if let Some(cur) = vm.current_coro {
if let Some(f) = vm.call_stack.last() {
if f.func_idx == 1 { a = Some(cur); }
if f.func_idx == 2 { b = Some(cur); }
}
}
// suspended
for h in vm.heap.suspended_coroutine_handles() {
if let Some(co) = vm.heap.coroutine_data(h) {
if let Some(f) = co.frames.last() {
if f.func_idx == 1 { a = Some(h); }
if f.func_idx == 2 { b = Some(h); }
}
}
}
(a.expect("A missing"), b.expect("B missing"))
};
let (a1, b1) = find_ab(&vm1);
let (a2, b2) = find_ab(&vm2);
let mut a1_prev = vm1.heap.coroutine_data(a1).unwrap().stack.len();
let mut b1_prev = vm1.heap.coroutine_data(b1).unwrap().stack.len();
let mut a2_prev = vm2.heap.coroutine_data(a2).unwrap().stack.len();
let mut b2_prev = vm2.heap.coroutine_data(b2).unwrap().stack.len();
let mut trace1 = Vec::new();
let mut trace2 = Vec::new();
for _ in 0..8 {
let _ = vm1.run_budget(100, &mut native, &mut c1).unwrap();
let a_now = vm1.heap.coroutine_data(a1).unwrap().stack.len();
let b_now = vm1.heap.coroutine_data(b1).unwrap().stack.len();
if a_now > a1_prev { trace1.push(1); } else if b_now > b1_prev { trace1.push(2); } else { panic!("no progress 1"); }
a1_prev = a_now; b1_prev = b_now;
let _ = vm2.run_budget(100, &mut native, &mut c2).unwrap();
let a2_now = vm2.heap.coroutine_data(a2).unwrap().stack.len();
let b2_now = vm2.heap.coroutine_data(b2).unwrap().stack.len();
if a2_now > a2_prev { trace2.push(1); } else if b2_now > b2_prev { trace2.push(2); } else { panic!("no progress 2"); }
a2_prev = a2_now; b2_prev = b2_now;
}
assert_eq!(trace1, trace2, "Execution trace (coroutine IDs) must match exactly across runs");
}
#[test]
fn test_gc_with_suspended_coroutine_runtime() {
use crate::object::ObjectKind;
use prometeu_bytecode::FunctionMeta;
// Function F (idx 1): SLEEP 10; FRAME_SYNC; HALT
let mut fn_f = Vec::new();
fn_f.extend_from_slice(&(OpCode::Sleep as u16).to_le_bytes());
fn_f.extend_from_slice(&10u32.to_le_bytes());
fn_f.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
fn_f.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
// Main (idx 0): SPAWN F with 1 argument (the HeapRef we preload); FRAME_SYNC; HALT
let mut main = Vec::new();
main.extend_from_slice(&(OpCode::Spawn as u16).to_le_bytes());
main.extend_from_slice(&1u32.to_le_bytes()); // func idx
main.extend_from_slice(&1u32.to_le_bytes()); // arg count = 1
main.extend_from_slice(&(OpCode::FrameSync as u16).to_le_bytes());
main.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
let off_main = 0usize;
let off_f = main.len();
let mut rom = Vec::new();
rom.extend_from_slice(&main);
rom.extend_from_slice(&fn_f);
let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
// Function F takes 1 parameter (the HeapRef) which stays on its stack while sleeping
FunctionMeta { code_offset: off_f as u32, code_len: fn_f.len() as u32, param_slots: 1, local_slots: 0, return_slots: 0, max_stack_slots: 8 },
]);
// Force GC at first safepoint to stress retention
vm.gc_alloc_threshold = 1;
// Allocate a heap object and preload it onto main's operand stack so SPAWN consumes it as arg.
let captured = vm.heap.allocate_object(ObjectKind::Bytes, &[0xAB, 0xCD, 0xEF]);
vm.operand_stack.push(Value::HeapRef(captured));
let mut native = MockNative;
let mut ctx = HostContext::new(None);
// Run main: SPAWN consumes the HeapRef as arg and creates sleeping coroutine; FRAME_SYNC triggers GC
let rep = vm.run_budget(100, &mut native, &mut ctx).unwrap();
assert!(matches!(rep.reason, LogicalFrameEndingReason::FrameSync));
// The captured object must remain alive because it is referenced by the sleeping coroutine's stack
assert!(vm.heap.is_valid(captured), "captured object must remain alive while coroutine sleeps");
}
#[test] #[test]
fn test_make_closure_zero_captures() { fn test_make_closure_zero_captures() {
use prometeu_bytecode::{FunctionMeta, Value}; use prometeu_bytecode::{FunctionMeta, Value};