removed bounded from VM, bytecode and specs

This commit is contained in:
bQUARKz 2026-03-01 22:35:30 +00:00
parent a04f462939
commit cfdca93160
Signed by: bquarkz
SSH Key Fingerprint: SHA256:Z7dgqoglWwoK6j6u4QC87OveEq74WOhFN+gitsxtkf8
18 changed files with 894 additions and 507 deletions

View File

@ -128,7 +128,7 @@ The verifier statically checks bytecode for structural safety and stackshape
- Metadatadriven - Metadatadriven
- `SyscallMeta` defines expected arity and return slot counts. The verifier checks IDs/arity/returnslot counts against this metadata. - `SyscallMeta` defines expected arity and return slot counts. The verifier checks IDs/arity/returnslot counts against this metadata.
- Arguments and returns - Arguments and returns
- Arguments are taken from the operand stack in the order defined by the ABI. Returns use bounded multislot results via a hostside return buffer (`HostReturn`) which the VM copies back onto the stack, or zero slots for “void”. A mismatch in result counts is a fault/panic per current hardening logic. - Arguments are taken from the operand stack in the order defined by the ABI. Returns use multislot results via a hostside return buffer (`HostReturn`) which the VM copies back onto the stack, or zero slots for “void”. A mismatch in result counts is a fault/panic per current hardening logic.
- Capabilities - Capabilities
- Each VM instance has capability flags. Invoking a syscall without the required capability traps. - Each VM instance has capability flags. Invoking a syscall without the required capability traps.

View File

@ -14,15 +14,29 @@ pub enum AsmError {
InvalidOperand(String), InvalidOperand(String),
} }
fn emit_u16(v: u16, out: &mut Vec<u8>) { out.extend_from_slice(&v.to_le_bytes()); } fn emit_u16(v: u16, out: &mut Vec<u8>) {
fn emit_u32(v: u32, out: &mut Vec<u8>) { out.extend_from_slice(&v.to_le_bytes()); } out.extend_from_slice(&v.to_le_bytes());
fn emit_i32(v: i32, out: &mut Vec<u8>) { out.extend_from_slice(&v.to_le_bytes()); } }
fn emit_i64(v: i64, out: &mut Vec<u8>) { out.extend_from_slice(&v.to_le_bytes()); } fn emit_u32(v: u32, out: &mut Vec<u8>) {
fn emit_f64_bits(bits: u64, out: &mut Vec<u8>) { out.extend_from_slice(&bits.to_le_bytes()); } out.extend_from_slice(&v.to_le_bytes());
}
fn emit_i32(v: i32, out: &mut Vec<u8>) {
out.extend_from_slice(&v.to_le_bytes());
}
fn emit_i64(v: i64, out: &mut Vec<u8>) {
out.extend_from_slice(&v.to_le_bytes());
}
fn emit_f64_bits(bits: u64, out: &mut Vec<u8>) {
out.extend_from_slice(&bits.to_le_bytes());
}
fn parse_u32_any(s: &str) -> Result<u32, AsmError> { fn parse_u32_any(s: &str) -> Result<u32, AsmError> {
let s = s.trim(); let s = s.trim();
if let Some(rest) = s.strip_prefix("0x") { u32::from_str_radix(rest, 16).map_err(|_| AsmError::InvalidOperand(s.into())) } else { s.parse::<u32>().map_err(|_| AsmError::InvalidOperand(s.into())) } if let Some(rest) = s.strip_prefix("0x") {
u32::from_str_radix(rest, 16).map_err(|_| AsmError::InvalidOperand(s.into()))
} else {
s.parse::<u32>().map_err(|_| AsmError::InvalidOperand(s.into()))
}
} }
fn parse_i32_any(s: &str) -> Result<i32, AsmError> { fn parse_i32_any(s: &str) -> Result<i32, AsmError> {
@ -37,7 +51,9 @@ fn parse_f64_bits(s: &str) -> Result<u64, AsmError> {
let s = s.trim(); let s = s.trim();
let s = s.strip_prefix("f64:").ok_or_else(|| AsmError::InvalidOperand(s.into()))?; let s = s.strip_prefix("f64:").ok_or_else(|| AsmError::InvalidOperand(s.into()))?;
let hex = s.strip_prefix("0x").ok_or_else(|| AsmError::InvalidOperand(s.into()))?; let hex = s.strip_prefix("0x").ok_or_else(|| AsmError::InvalidOperand(s.into()))?;
if hex.len() != 16 { return Err(AsmError::InvalidOperand(s.into())); } if hex.len() != 16 {
return Err(AsmError::InvalidOperand(s.into()));
}
u64::from_str_radix(hex, 16).map_err(|_| AsmError::InvalidOperand(s.into())) u64::from_str_radix(hex, 16).map_err(|_| AsmError::InvalidOperand(s.into()))
} }
@ -46,14 +62,18 @@ fn parse_keyvals(s: &str) -> Result<(&str, &str), AsmError> {
let mut parts = s.split(','); let mut parts = s.split(',');
let a = parts.next().ok_or_else(|| AsmError::MissingOperand(s.into()))?.trim(); let a = parts.next().ok_or_else(|| AsmError::MissingOperand(s.into()))?.trim();
let b = parts.next().ok_or_else(|| AsmError::MissingOperand(s.into()))?.trim(); let b = parts.next().ok_or_else(|| AsmError::MissingOperand(s.into()))?.trim();
if parts.next().is_some() { return Err(AsmError::InvalidOperand(s.into())); } if parts.next().is_some() {
return Err(AsmError::InvalidOperand(s.into()));
}
Ok((a, b)) Ok((a, b))
} }
fn parse_pair<'a>(a: &'a str, ka: &str, b: &'a str, kb: &str) -> Result<(u32,u32), AsmError> { fn parse_pair<'a>(a: &'a str, ka: &str, b: &'a str, kb: &str) -> Result<(u32, u32), AsmError> {
let (ka_l, va_s) = a.split_once('=').ok_or_else(|| AsmError::InvalidOperand(a.into()))?; let (ka_l, va_s) = a.split_once('=').ok_or_else(|| AsmError::InvalidOperand(a.into()))?;
let (kb_l, vb_s) = b.split_once('=').ok_or_else(|| AsmError::InvalidOperand(b.into()))?; let (kb_l, vb_s) = b.split_once('=').ok_or_else(|| AsmError::InvalidOperand(b.into()))?;
if ka_l.trim() != ka || kb_l.trim() != kb { return Err(AsmError::InvalidOperand(format!("expected keys {} and {}", ka, kb))); } if ka_l.trim() != ka || kb_l.trim() != kb {
return Err(AsmError::InvalidOperand(format!("expected keys {} and {}", ka, kb)));
}
let va = parse_u32_any(va_s)?; let va = parse_u32_any(va_s)?;
let vb = parse_u32_any(vb_s)?; let vb = parse_u32_any(vb_s)?;
Ok((va, vb)) Ok((va, vb))
@ -73,114 +93,257 @@ pub fn assemble(src: &str) -> Result<Vec<u8>, AsmError> {
let mut out = Vec::new(); let mut out = Vec::new();
for raw_line in src.lines() { for raw_line in src.lines() {
let line = raw_line.trim(); let line = raw_line.trim();
if line.is_empty() { continue; } if line.is_empty() {
continue;
}
let (mn, ops) = parse_mnemonic(line); let (mn, ops) = parse_mnemonic(line);
match mn { match mn {
// Zero-operand // Zero-operand
"NOP" => { emit_u16(CoreOpCode::Nop as u16, &mut out); } "NOP" => {
"HALT" => { emit_u16(CoreOpCode::Halt as u16, &mut out); } emit_u16(CoreOpCode::Nop as u16, &mut out);
"TRAP" => { emit_u16(CoreOpCode::Trap as u16, &mut out); } }
"DUP" => { emit_u16(CoreOpCode::Dup as u16, &mut out); } "HALT" => {
"SWAP" => { emit_u16(CoreOpCode::Swap as u16, &mut out); } emit_u16(CoreOpCode::Halt as u16, &mut out);
"ADD" => { emit_u16(CoreOpCode::Add as u16, &mut out); } }
"SUB" => { emit_u16(CoreOpCode::Sub as u16, &mut out); } "TRAP" => {
"MUL" => { emit_u16(CoreOpCode::Mul as u16, &mut out); } emit_u16(CoreOpCode::Trap as u16, &mut out);
"DIV" => { emit_u16(CoreOpCode::Div as u16, &mut out); } }
"MOD" => { emit_u16(CoreOpCode::Mod as u16, &mut out); } "DUP" => {
"NEG" => { emit_u16(CoreOpCode::Neg as u16, &mut out); } emit_u16(CoreOpCode::Dup as u16, &mut out);
"EQ" => { emit_u16(CoreOpCode::Eq as u16, &mut out); } }
"NEQ" => { emit_u16(CoreOpCode::Neq as u16, &mut out); } "SWAP" => {
"LT" => { emit_u16(CoreOpCode::Lt as u16, &mut out); } emit_u16(CoreOpCode::Swap as u16, &mut out);
"LTE" => { emit_u16(CoreOpCode::Lte as u16, &mut out); } }
"GT" => { emit_u16(CoreOpCode::Gt as u16, &mut out); } "ADD" => {
"GTE" => { emit_u16(CoreOpCode::Gte as u16, &mut out); } emit_u16(CoreOpCode::Add as u16, &mut out);
"AND" => { emit_u16(CoreOpCode::And as u16, &mut out); } }
"OR" => { emit_u16(CoreOpCode::Or as u16, &mut out); } "SUB" => {
"NOT" => { emit_u16(CoreOpCode::Not as u16, &mut out); } emit_u16(CoreOpCode::Sub as u16, &mut out);
"BIT_AND" => { emit_u16(CoreOpCode::BitAnd as u16, &mut out); } }
"BIT_OR" => { emit_u16(CoreOpCode::BitOr as u16, &mut out); } "MUL" => {
"BIT_XOR" => { emit_u16(CoreOpCode::BitXor as u16, &mut out); } emit_u16(CoreOpCode::Mul as u16, &mut out);
"SHL" => { emit_u16(CoreOpCode::Shl as u16, &mut out); } }
"SHR" => { emit_u16(CoreOpCode::Shr as u16, &mut out); } "DIV" => {
"RET" => { emit_u16(CoreOpCode::Ret as u16, &mut out); } emit_u16(CoreOpCode::Div as u16, &mut out);
"YIELD" => { emit_u16(CoreOpCode::Yield as u16, &mut out); } }
"FRAME_SYNC" => { emit_u16(CoreOpCode::FrameSync as u16, &mut out); } "MOD" => {
emit_u16(CoreOpCode::Mod as u16, &mut out);
}
"NEG" => {
emit_u16(CoreOpCode::Neg as u16, &mut out);
}
"EQ" => {
emit_u16(CoreOpCode::Eq as u16, &mut out);
}
"NEQ" => {
emit_u16(CoreOpCode::Neq as u16, &mut out);
}
"LT" => {
emit_u16(CoreOpCode::Lt as u16, &mut out);
}
"LTE" => {
emit_u16(CoreOpCode::Lte as u16, &mut out);
}
"GT" => {
emit_u16(CoreOpCode::Gt as u16, &mut out);
}
"GTE" => {
emit_u16(CoreOpCode::Gte as u16, &mut out);
}
"AND" => {
emit_u16(CoreOpCode::And as u16, &mut out);
}
"OR" => {
emit_u16(CoreOpCode::Or as u16, &mut out);
}
"NOT" => {
emit_u16(CoreOpCode::Not as u16, &mut out);
}
"BIT_AND" => {
emit_u16(CoreOpCode::BitAnd as u16, &mut out);
}
"BIT_OR" => {
emit_u16(CoreOpCode::BitOr as u16, &mut out);
}
"BIT_XOR" => {
emit_u16(CoreOpCode::BitXor as u16, &mut out);
}
"SHL" => {
emit_u16(CoreOpCode::Shl as u16, &mut out);
}
"SHR" => {
emit_u16(CoreOpCode::Shr as u16, &mut out);
}
"RET" => {
emit_u16(CoreOpCode::Ret as u16, &mut out);
}
"YIELD" => {
emit_u16(CoreOpCode::Yield as u16, &mut out);
}
"FRAME_SYNC" => {
emit_u16(CoreOpCode::FrameSync as u16, &mut out);
}
// One u32 immediate (decimal or hex accepted for SYSCALL only; others decimal ok) // One u32 immediate (decimal or hex accepted for SYSCALL only; others decimal ok)
"JMP" => { "JMP" => {
if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } if ops.is_empty() {
emit_u16(CoreOpCode::Jmp as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::Jmp as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"JMP_IF_FALSE" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "JMP_IF_FALSE" => {
emit_u16(CoreOpCode::JmpIfFalse as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::JmpIfFalse as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"JMP_IF_TRUE" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "JMP_IF_TRUE" => {
emit_u16(CoreOpCode::JmpIfTrue as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::JmpIfTrue as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"PUSH_CONST" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "PUSH_CONST" => {
emit_u16(CoreOpCode::PushConst as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::PushConst as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"PUSH_I64" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "PUSH_I64" => {
emit_u16(CoreOpCode::PushI64 as u16, &mut out); emit_i64(parse_i64_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::PushI64 as u16, &mut out);
emit_i64(parse_i64_any(ops)?, &mut out);
} }
"PUSH_F64" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "PUSH_F64" => {
emit_u16(CoreOpCode::PushF64 as u16, &mut out); emit_f64_bits(parse_f64_bits(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::PushF64 as u16, &mut out);
emit_f64_bits(parse_f64_bits(ops)?, &mut out);
} }
"PUSH_BOOL" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "PUSH_BOOL" => {
let v = parse_u32_any(ops)? as u8; emit_u16(CoreOpCode::PushBool as u16, &mut out); out.push(v); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
let v = parse_u32_any(ops)? as u8;
emit_u16(CoreOpCode::PushBool as u16, &mut out);
out.push(v);
} }
"PUSH_I32" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "PUSH_I32" => {
emit_u16(CoreOpCode::PushI32 as u16, &mut out); emit_i32(parse_i32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::PushI32 as u16, &mut out);
emit_i32(parse_i32_any(ops)?, &mut out);
} }
"POP_N" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "POP_N" => {
emit_u16(CoreOpCode::PopN as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::PopN as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"PUSH_BOUNDED" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "GET_GLOBAL" => {
emit_u16(CoreOpCode::PushBounded as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::GetGlobal as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"GET_GLOBAL" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "SET_GLOBAL" => {
emit_u16(CoreOpCode::GetGlobal as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::SetGlobal as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"SET_GLOBAL" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "GET_LOCAL" => {
emit_u16(CoreOpCode::SetGlobal as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::GetLocal as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"GET_LOCAL" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "SET_LOCAL" => {
emit_u16(CoreOpCode::GetLocal as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::SetLocal as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"SET_LOCAL" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "CALL" => {
emit_u16(CoreOpCode::SetLocal as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::Call as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"CALL" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "CALL_CLOSURE" => {
emit_u16(CoreOpCode::Call as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
let (k, v) =
ops.split_once('=').ok_or_else(|| AsmError::InvalidOperand(ops.into()))?;
if k.trim() != "argc" {
return Err(AsmError::InvalidOperand(ops.into()));
}
emit_u16(CoreOpCode::CallClosure as u16, &mut out);
emit_u32(parse_u32_any(v)?, &mut out);
} }
"CALL_CLOSURE" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "MAKE_CLOSURE" => {
let (k, v) = ops.split_once('=').ok_or_else(|| AsmError::InvalidOperand(ops.into()))?; if k.trim() != "argc" { return Err(AsmError::InvalidOperand(ops.into())); } if ops.is_empty() {
emit_u16(CoreOpCode::CallClosure as u16, &mut out); emit_u32(parse_u32_any(v)?, &mut out); return Err(AsmError::MissingOperand(line.into()));
} }
"MAKE_CLOSURE" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } let (a, b) = parse_keyvals(ops)?;
let (a,b) = parse_keyvals(ops)?;
// Accept either order but require exact key names // Accept either order but require exact key names
let (fn_id, captures) = if a.starts_with("fn=") && b.starts_with("captures=") { let (fn_id, captures) = if a.starts_with("fn=") && b.starts_with("captures=") {
parse_pair(a, "fn", b, "captures")? parse_pair(a, "fn", b, "captures")?
} else if a.starts_with("captures=") && b.starts_with("fn=") { } else if a.starts_with("captures=") && b.starts_with("fn=") {
let (cap, fid) = parse_pair(a, "captures", b, "fn")?; (fid, cap) let (cap, fid) = parse_pair(a, "captures", b, "fn")?;
} else { return Err(AsmError::InvalidOperand(ops.into())); }; (fid, cap)
emit_u16(CoreOpCode::MakeClosure as u16, &mut out); emit_u32(fn_id, &mut out); emit_u32(captures, &mut out); } else {
return Err(AsmError::InvalidOperand(ops.into()));
};
emit_u16(CoreOpCode::MakeClosure as u16, &mut out);
emit_u32(fn_id, &mut out);
emit_u32(captures, &mut out);
} }
"SPAWN" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "SPAWN" => {
let (a,b) = parse_keyvals(ops)?; if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
let (a, b) = parse_keyvals(ops)?;
let (fn_id, argc) = if a.starts_with("fn=") && b.starts_with("argc=") { let (fn_id, argc) = if a.starts_with("fn=") && b.starts_with("argc=") {
parse_pair(a, "fn", b, "argc")? parse_pair(a, "fn", b, "argc")?
} else if a.starts_with("argc=") && b.starts_with("fn=") { } else if a.starts_with("argc=") && b.starts_with("fn=") {
let (ac, fid) = parse_pair(a, "argc", b, "fn")?; (fid, ac) let (ac, fid) = parse_pair(a, "argc", b, "fn")?;
} else { return Err(AsmError::InvalidOperand(ops.into())); }; (fid, ac)
emit_u16(CoreOpCode::Spawn as u16, &mut out); emit_u32(fn_id, &mut out); emit_u32(argc, &mut out); } else {
return Err(AsmError::InvalidOperand(ops.into()));
};
emit_u16(CoreOpCode::Spawn as u16, &mut out);
emit_u32(fn_id, &mut out);
emit_u32(argc, &mut out);
} }
"SLEEP" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "SLEEP" => {
emit_u16(CoreOpCode::Sleep as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::Sleep as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
"SYSCALL" => { if ops.is_empty() { return Err(AsmError::MissingOperand(line.into())); } "SYSCALL" => {
emit_u16(CoreOpCode::Syscall as u16, &mut out); emit_u32(parse_u32_any(ops)?, &mut out); if ops.is_empty() {
return Err(AsmError::MissingOperand(line.into()));
}
emit_u16(CoreOpCode::Syscall as u16, &mut out);
emit_u32(parse_u32_any(ops)?, &mut out);
} }
other => return Err(AsmError::UnknownMnemonic(other.into())), other => return Err(AsmError::UnknownMnemonic(other.into())),

View File

@ -53,7 +53,6 @@ fn format_operand(op: CoreOpCode, imm: &[u8]) -> String {
} }
CoreOpCode::PopN CoreOpCode::PopN
| CoreOpCode::PushConst | CoreOpCode::PushConst
| CoreOpCode::PushBounded
| CoreOpCode::GetGlobal | CoreOpCode::GetGlobal
| CoreOpCode::SetGlobal | CoreOpCode::SetGlobal
| CoreOpCode::GetLocal | CoreOpCode::GetLocal

View File

@ -551,7 +551,6 @@ fn validate_module(module: &BytecodeModule) -> Result<(), LoadError> {
pos += 4; pos += 4;
} }
OpCode::PushI32 OpCode::PushI32
| OpCode::PushBounded
| OpCode::Jmp | OpCode::Jmp
| OpCode::JmpIfFalse | OpCode::JmpIfFalse
| OpCode::JmpIfTrue | OpCode::JmpIfTrue

View File

@ -54,11 +54,6 @@ pub enum OpCode {
/// Removes `n` values from the stack. /// Removes `n` values from the stack.
/// Operand: n (u32) /// Operand: n (u32)
PopN = 0x18, PopN = 0x18,
/// Pushes a 16-bit bounded integer literal onto the stack.
/// Operand: value (u32, must be <= 0xFFFF)
/// Stack: [] -> [bounded]
PushBounded = 0x19,
// --- 6.3 Arithmetic --- // --- 6.3 Arithmetic ---
/// Adds the two top values (a + b). /// Adds the two top values (a + b).
/// Stack: [a, b] -> [result] /// Stack: [a, b] -> [result]
@ -75,13 +70,6 @@ pub enum OpCode {
/// Remainder of the division of the second top value by the top one (a % b). /// Remainder of the division of the second top value by the top one (a % b).
/// Stack: [a, b] -> [result] /// Stack: [a, b] -> [result]
Mod = 0x24, Mod = 0x24,
/// Converts a bounded value to a 64-bit integer.
/// Stack: [bounded] -> [int64]
BoundToInt = 0x25,
/// Converts an integer to a bounded value, trapping if out of range (0..65535).
/// Stack: [int] -> [bounded]
IntToBoundChecked = 0x26,
// --- 6.4 Comparison and Logic --- // --- 6.4 Comparison and Logic ---
/// Checks if a equals b. /// Checks if a equals b.
/// Stack: [a, b] -> [bool] /// Stack: [a, b] -> [bool]
@ -226,14 +214,11 @@ impl TryFrom<u16> for OpCode {
0x16 => Ok(OpCode::PushBool), 0x16 => Ok(OpCode::PushBool),
0x17 => Ok(OpCode::PushI32), 0x17 => Ok(OpCode::PushI32),
0x18 => Ok(OpCode::PopN), 0x18 => Ok(OpCode::PopN),
0x19 => Ok(OpCode::PushBounded),
0x20 => Ok(OpCode::Add), 0x20 => Ok(OpCode::Add),
0x21 => Ok(OpCode::Sub), 0x21 => Ok(OpCode::Sub),
0x22 => Ok(OpCode::Mul), 0x22 => Ok(OpCode::Mul),
0x23 => Ok(OpCode::Div), 0x23 => Ok(OpCode::Div),
0x24 => Ok(OpCode::Mod), 0x24 => Ok(OpCode::Mod),
0x25 => Ok(OpCode::BoundToInt),
0x26 => Ok(OpCode::IntToBoundChecked),
0x30 => Ok(OpCode::Eq), 0x30 => Ok(OpCode::Eq),
0x31 => Ok(OpCode::Neq), 0x31 => Ok(OpCode::Neq),
0x32 => Ok(OpCode::Lt), 0x32 => Ok(OpCode::Lt),
@ -287,14 +272,11 @@ impl OpCode {
OpCode::PushF64 => 2, OpCode::PushF64 => 2,
OpCode::PushBool => 2, OpCode::PushBool => 2,
OpCode::PushI32 => 2, OpCode::PushI32 => 2,
OpCode::PushBounded => 2,
OpCode::Add => 2, OpCode::Add => 2,
OpCode::Sub => 2, OpCode::Sub => 2,
OpCode::Mul => 4, OpCode::Mul => 4,
OpCode::Div => 6, OpCode::Div => 6,
OpCode::Mod => 6, OpCode::Mod => 6,
OpCode::BoundToInt => 1,
OpCode::IntToBoundChecked => 1,
OpCode::Eq => 2, OpCode::Eq => 2,
OpCode::Neq => 2, OpCode::Neq => 2,
OpCode::Lt => 2, OpCode::Lt => 2,

View File

@ -172,16 +172,6 @@ impl OpCodeSpecExt for OpCode {
may_trap: false, may_trap: false,
is_safepoint: false, is_safepoint: false,
}, },
OpCode::PushBounded => OpcodeSpec {
name: "PUSH_BOUNDED",
imm_bytes: 4,
pops: 0,
pushes: 1,
is_branch: false,
is_terminator: false,
may_trap: true,
is_safepoint: false,
},
OpCode::Add => OpcodeSpec { OpCode::Add => OpcodeSpec {
name: "ADD", name: "ADD",
imm_bytes: 0, imm_bytes: 0,
@ -232,26 +222,6 @@ impl OpCodeSpecExt for OpCode {
may_trap: true, may_trap: true,
is_safepoint: false, is_safepoint: false,
}, },
OpCode::BoundToInt => OpcodeSpec {
name: "BOUND_TO_INT",
imm_bytes: 0,
pops: 1,
pushes: 1,
is_branch: false,
is_terminator: false,
may_trap: false,
is_safepoint: false,
},
OpCode::IntToBoundChecked => OpcodeSpec {
name: "INT_TO_BOUND_CHECKED",
imm_bytes: 0,
pops: 1,
pushes: 1,
is_branch: false,
is_terminator: false,
may_trap: true,
is_safepoint: false,
},
OpCode::Eq => OpcodeSpec { OpCode::Eq => OpcodeSpec {
name: "EQ", name: "EQ",
imm_bytes: 0, imm_bytes: 0,

View File

@ -101,7 +101,6 @@ impl From<ProgramImage> for BytecodeModule {
Value::Boolean(v) => ConstantPoolEntry::Boolean(*v), Value::Boolean(v) => ConstantPoolEntry::Boolean(*v),
Value::String(v) => ConstantPoolEntry::String(v.clone()), Value::String(v) => ConstantPoolEntry::String(v.clone()),
Value::Int32(v) => ConstantPoolEntry::Int32(*v), Value::Int32(v) => ConstantPoolEntry::Int32(*v),
Value::Bounded(v) => ConstantPoolEntry::Int32(*v as i32),
Value::HeapRef(_) => ConstantPoolEntry::Null, Value::HeapRef(_) => ConstantPoolEntry::Null,
}) })
.collect(); .collect();

View File

@ -27,8 +27,6 @@ pub enum Value {
Boolean(bool), Boolean(bool),
/// UTF-8 string. Strings are immutable and usually come from the Constant Pool. /// UTF-8 string. Strings are immutable and usually come from the Constant Pool.
String(String), String(String),
/// Bounded 16-bit-ish integer.
Bounded(u32),
/// A handle to an object on the heap (opaque reference). /// A handle to an object on the heap (opaque reference).
HeapRef(HeapRef), HeapRef(HeapRef),
/// Represents the absence of a value (equivalent to `null` or `undefined`). /// Represents the absence of a value (equivalent to `null` or `undefined`).
@ -49,7 +47,6 @@ impl PartialEq for Value {
(Value::Float(a), Value::Int64(b)) => *a == *b as f64, (Value::Float(a), Value::Int64(b)) => *a == *b as f64,
(Value::Boolean(a), Value::Boolean(b)) => a == b, (Value::Boolean(a), Value::Boolean(b)) => a == b,
(Value::String(a), Value::String(b)) => a == b, (Value::String(a), Value::String(b)) => a == b,
(Value::Bounded(a), Value::Bounded(b)) => a == b,
(Value::HeapRef(a), Value::HeapRef(b)) => a == b, (Value::HeapRef(a), Value::HeapRef(b)) => a == b,
(Value::Null, Value::Null) => true, (Value::Null, Value::Null) => true,
_ => false, _ => false,
@ -65,7 +62,6 @@ impl PartialOrd for Value {
(Value::Int32(a), Value::Int64(b)) => (*a as i64).partial_cmp(b), (Value::Int32(a), Value::Int64(b)) => (*a as i64).partial_cmp(b),
(Value::Int64(a), Value::Int32(b)) => a.partial_cmp(&(*b as i64)), (Value::Int64(a), Value::Int32(b)) => a.partial_cmp(&(*b as i64)),
(Value::Float(a), Value::Float(b)) => a.partial_cmp(b), (Value::Float(a), Value::Float(b)) => a.partial_cmp(b),
(Value::Bounded(a), Value::Bounded(b)) => a.partial_cmp(b),
(Value::Int32(a), Value::Float(b)) => (*a as f64).partial_cmp(b), (Value::Int32(a), Value::Float(b)) => (*a as f64).partial_cmp(b),
(Value::Float(a), Value::Int32(b)) => a.partial_cmp(&(*b as f64)), (Value::Float(a), Value::Int32(b)) => a.partial_cmp(&(*b as f64)),
(Value::Int64(a), Value::Float(b)) => (*a as f64).partial_cmp(b), (Value::Int64(a), Value::Float(b)) => (*a as f64).partial_cmp(b),
@ -83,7 +79,6 @@ impl Value {
Value::Int32(i) => Some(*i as f64), Value::Int32(i) => Some(*i as f64),
Value::Int64(i) => Some(*i as f64), Value::Int64(i) => Some(*i as f64),
Value::Float(f) => Some(*f), Value::Float(f) => Some(*f),
Value::Bounded(b) => Some(*b as f64),
_ => None, _ => None,
} }
} }
@ -93,7 +88,6 @@ impl Value {
Value::Int32(i) => Some(*i as i64), Value::Int32(i) => Some(*i as i64),
Value::Int64(i) => Some(*i), Value::Int64(i) => Some(*i),
Value::Float(f) => Some(*f as i64), Value::Float(f) => Some(*f as i64),
Value::Bounded(b) => Some(*b as i64),
_ => None, _ => None,
} }
} }
@ -104,7 +98,6 @@ impl Value {
Value::Int32(i) => i.to_string(), Value::Int32(i) => i.to_string(),
Value::Int64(i) => i.to_string(), Value::Int64(i) => i.to_string(),
Value::Float(f) => f.to_string(), Value::Float(f) => f.to_string(),
Value::Bounded(b) => format!("{}b", b),
Value::Boolean(b) => b.to_string(), Value::Boolean(b) => b.to_string(),
Value::String(s) => s.clone(), Value::String(s) => s.clone(),
Value::HeapRef(r) => format!("[HeapRef {}]", r.0), Value::HeapRef(r) => format!("[HeapRef {}]", r.0),

View File

@ -10,7 +10,9 @@ fn encode_instr(op: CoreOpCode, imm: Option<&[u8]>) -> Vec<u8> {
match (need, imm) { match (need, imm) {
(0, None) => {} (0, None) => {}
(n, Some(bytes)) if bytes.len() == n => out.extend_from_slice(bytes), (n, Some(bytes)) if bytes.len() == n => out.extend_from_slice(bytes),
(n, Some(bytes)) => panic!("immediate size mismatch for {:?}: expected {}, got {}", op, n, bytes.len()), (n, Some(bytes)) => {
panic!("immediate size mismatch for {:?}: expected {}, got {}", op, n, bytes.len())
}
(n, None) => panic!("missing immediate for {:?}: need {} bytes", op, n), (n, None) => panic!("missing immediate for {:?}: need {} bytes", op, n),
} }
out out
@ -37,7 +39,7 @@ fn disasm(bytes: &[u8]) -> String {
CoreOpCode::PushF64 => format!("{}", instr.imm_f64().unwrap()), CoreOpCode::PushF64 => format!("{}", instr.imm_f64().unwrap()),
CoreOpCode::PushBool => format!("{}", instr.imm_u8().unwrap()), CoreOpCode::PushBool => format!("{}", instr.imm_u8().unwrap()),
CoreOpCode::PushI32 => format!("{}", instr.imm_i32().unwrap()), CoreOpCode::PushI32 => format!("{}", instr.imm_i32().unwrap()),
CoreOpCode::PopN | CoreOpCode::PushConst | CoreOpCode::PushBounded => { CoreOpCode::PopN | CoreOpCode::PushConst => {
format!("{}", instr.imm_u32().unwrap()) format!("{}", instr.imm_u32().unwrap())
} }
_ => format!("0x{}", hex::encode(instr.imm)), _ => format!("0x{}", hex::encode(instr.imm)),

View File

@ -1,5 +1,4 @@
use crate::vm_fault::VmFault; use prometeu_bytecode::{HeapRef, Value};
use prometeu_bytecode::{HeapRef, TRAP_OOB, Value};
pub struct HostReturn<'a> { pub struct HostReturn<'a> {
stack: &'a mut Vec<Value>, stack: &'a mut Vec<Value>,
@ -15,13 +14,6 @@ impl<'a> HostReturn<'a> {
pub fn push_int(&mut self, v: i64) { pub fn push_int(&mut self, v: i64) {
self.stack.push(Value::Int64(v)); self.stack.push(Value::Int64(v));
} }
pub fn push_bounded(&mut self, v: u32) -> Result<(), VmFault> {
if v > 0xFFFF {
return Err(VmFault::Trap(TRAP_OOB, "Bounded value overflow".into()));
}
self.stack.push(Value::Bounded(v));
Ok(())
}
pub fn push_null(&mut self) { pub fn push_null(&mut self) {
self.stack.push(Value::Null); self.stack.push(Value::Null);
} }

View File

@ -34,7 +34,7 @@ pub use hardware_bridge::HardwareBridge;
pub use host_context::{HostContext, HostContextProvider}; pub use host_context::{HostContext, HostContextProvider};
pub use host_return::HostReturn; pub use host_return::HostReturn;
pub use input_signals::InputSignals; pub use input_signals::InputSignals;
pub use native_helpers::{expect_bool, expect_bounded, expect_int}; pub use native_helpers::{expect_bool, expect_int};
pub use native_interface::{NativeInterface, SyscallId}; pub use native_interface::{NativeInterface, SyscallId};
pub use pad_bridge::PadBridge; pub use pad_bridge::PadBridge;
pub use touch_bridge::TouchBridge; pub use touch_bridge::TouchBridge;

View File

@ -1,15 +1,6 @@
use crate::vm_fault::VmFault; use crate::vm_fault::VmFault;
use prometeu_bytecode::{TRAP_TYPE, Value}; use prometeu_bytecode::{TRAP_TYPE, Value};
pub fn expect_bounded(args: &[Value], idx: usize) -> Result<u32, VmFault> {
args.get(idx)
.and_then(|v| match v {
Value::Bounded(b) => Some(*b),
_ => None,
})
.ok_or_else(|| VmFault::Trap(TRAP_TYPE, format!("Expected bounded at index {}", idx)))
}
pub fn expect_int(args: &[Value], idx: usize) -> Result<i64, VmFault> { pub fn expect_int(args: &[Value], idx: usize) -> Result<i64, VmFault> {
args.get(idx) args.get(idx)
.and_then(|v| v.as_integer()) .and_then(|v| v.as_integer())

View File

@ -635,10 +635,7 @@ impl NativeInterface for VirtualMachineRuntime {
Syscall::GfxClear565 => { Syscall::GfxClear565 => {
let color_val = expect_int(args, 0)? as u32; let color_val = expect_int(args, 0)? as u32;
if color_val > 0xFFFF { if color_val > 0xFFFF {
return Err(VmFault::Trap( return Err(VmFault::Trap(TRAP_OOB, "Color value out of bounds".into()));
TRAP_OOB,
"Color value out of bounds (bounded)".into(),
));
} }
let color = Color::from_raw(color_val as u16); let color = Color::from_raw(color_val as u16);
hw.gfx_mut().clear(color); hw.gfx_mut().clear(color);
@ -854,7 +851,6 @@ impl NativeInterface for VirtualMachineRuntime {
Value::Float(f) => *f, Value::Float(f) => *f,
Value::Int32(i) => *i as f64, Value::Int32(i) => *i as f64,
Value::Int64(i) => *i as f64, Value::Int64(i) => *i as f64,
Value::Bounded(b) => *b as f64,
_ => return Err(VmFault::Trap(TRAP_TYPE, "Expected number for pitch".into())), _ => return Err(VmFault::Trap(TRAP_TYPE, "Expected number for pitch".into())),
}; };
@ -892,7 +888,6 @@ impl NativeInterface for VirtualMachineRuntime {
Value::Float(f) => *f, Value::Float(f) => *f,
Value::Int32(i) => *i as f64, Value::Int32(i) => *i as f64,
Value::Int64(i) => *i as f64, Value::Int64(i) => *i as f64,
Value::Bounded(b) => *b as f64,
_ => return Err(VmFault::Trap(TRAP_TYPE, "Expected number for pitch".into())), _ => return Err(VmFault::Trap(TRAP_TYPE, "Expected number for pitch".into())),
}; };
let loop_mode = match expect_int(args, 6)? { let loop_mode = match expect_int(args, 6)? {

View File

@ -8,34 +8,98 @@ use std::collections::{HashMap, HashSet, VecDeque};
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub enum VerifierError { pub enum VerifierError {
UnknownOpcode { pc: usize, opcode: u16 }, UnknownOpcode {
TruncatedOpcode { pc: usize }, pc: usize,
TruncatedImmediate { pc: usize, opcode: OpCode, need: usize, have: usize }, opcode: u16,
InvalidJumpTarget { pc: usize, target: usize }, },
JumpToMidInstruction { pc: usize, target: usize }, TruncatedOpcode {
StackUnderflow { pc: usize, opcode: OpCode }, pc: usize,
StackOverflow { pc: usize, height: u16, limit: u16 }, },
StackMismatchJoin { pc: usize, target: usize, height_in: u16, height_target: u16 }, TruncatedImmediate {
BadRetStackHeight { pc: usize, height: u16, expected: u16 }, pc: usize,
FunctionOutOfBounds { func_idx: usize, start: usize, end: usize, code_len: usize }, opcode: OpCode,
InvalidSyscallId { pc: usize, id: u32 }, need: usize,
TrailingBytes { func_idx: usize, at_pc: usize }, have: usize,
InvalidFuncId { pc: usize, id: u32 }, },
InvalidJumpTarget {
pc: usize,
target: usize,
},
JumpToMidInstruction {
pc: usize,
target: usize,
},
StackUnderflow {
pc: usize,
opcode: OpCode,
},
StackOverflow {
pc: usize,
height: u16,
limit: u16,
},
StackMismatchJoin {
pc: usize,
target: usize,
height_in: u16,
height_target: u16,
},
BadRetStackHeight {
pc: usize,
height: u16,
expected: u16,
},
FunctionOutOfBounds {
func_idx: usize,
start: usize,
end: usize,
code_len: usize,
},
InvalidSyscallId {
pc: usize,
id: u32,
},
TrailingBytes {
func_idx: usize,
at_pc: usize,
},
InvalidFuncId {
pc: usize,
id: u32,
},
/// Execution can fall through past the end of the function without a valid terminator /// Execution can fall through past the end of the function without a valid terminator
/// (e.g., RET, JMP to end, HALT/TRAP). Verifier requires every reachable path to end /// (e.g., RET, JMP to end, HALT/TRAP). Verifier requires every reachable path to end
/// in a terminator. /// in a terminator.
UnterminatedPath { func_idx: usize, at_pc: usize }, UnterminatedPath {
func_idx: usize,
at_pc: usize,
},
// --- Closure-specific errors --- // --- Closure-specific errors ---
/// Top of stack is not a closure value on CALL_CLOSURE /// Top of stack is not a closure value on CALL_CLOSURE
NotAClosureOnCallClosure { pc: usize }, NotAClosureOnCallClosure {
pc: usize,
},
/// CALL_CLOSURE used with a closure whose callee function is not known at verify time /// CALL_CLOSURE used with a closure whose callee function is not known at verify time
UnknownClosureCallee { pc: usize }, UnknownClosureCallee {
pc: usize,
},
/// User-provided arg_count for CALL_CLOSURE does not match callee signature /// User-provided arg_count for CALL_CLOSURE does not match callee signature
BadClosureArgCount { pc: usize, expected: u16, got: u16 }, BadClosureArgCount {
pc: usize,
expected: u16,
got: u16,
},
/// YIELD executed in an invalid context (minimal safety rule violation) /// YIELD executed in an invalid context (minimal safety rule violation)
InvalidYieldContext { pc: usize, height: u16 }, InvalidYieldContext {
pc: usize,
height: u16,
},
/// SPAWN arg_count does not match callee param_slots /// SPAWN arg_count does not match callee param_slots
BadSpawnArgCount { pc: usize, expected: u16, got: u16 }, BadSpawnArgCount {
pc: usize,
expected: u16,
got: u16,
},
} }
pub struct Verifier; pub struct Verifier;
@ -208,8 +272,13 @@ impl Verifier {
} }
// Coroutine safety: forbid YIELD when operand stack is not empty (minimal rule) // Coroutine safety: forbid YIELD when operand stack is not empty (minimal rule)
if let OpCode::Yield = instr.opcode && in_height != 0 { if let OpCode::Yield = instr.opcode
return Err(VerifierError::InvalidYieldContext { pc: func_start + pc, height: in_height }); && in_height != 0
{
return Err(VerifierError::InvalidYieldContext {
pc: func_start + pc,
height: in_height,
});
} }
// Compute out types vector with closure-aware rules // Compute out types vector with closure-aware rules
@ -259,17 +328,24 @@ impl Verifier {
dynamic_pushes = Some(callee.return_slots); dynamic_pushes = Some(callee.return_slots);
} }
NonClosure => { NonClosure => {
return Err(VerifierError::NotAClosureOnCallClosure { pc: func_start + pc }); return Err(VerifierError::NotAClosureOnCallClosure {
pc: func_start + pc,
});
} }
Unknown => { Unknown => {
// We cannot determine return arity; be strict and reject // We cannot determine return arity; be strict and reject
return Err(VerifierError::UnknownClosureCallee { pc: func_start + pc }); return Err(VerifierError::UnknownClosureCallee {
pc: func_start + pc,
});
} }
} }
} }
// Immediates and known non-closure _pushes // Immediates and known non-closure _pushes
OpCode::PushConst | OpCode::PushI64 | OpCode::PushF64 | OpCode::PushBool OpCode::PushConst
| OpCode::PushI32 | OpCode::PushBounded => { | OpCode::PushI64
| OpCode::PushF64
| OpCode::PushBool
| OpCode::PushI32 => {
out_types.push(NonClosure); out_types.push(NonClosure);
} }
// Dup duplicates TOS type // Dup duplicates TOS type
@ -277,14 +353,20 @@ impl Verifier {
let tos = in_types.last().copied().unwrap_or(Unknown); let tos = in_types.last().copied().unwrap_or(Unknown);
if matches!(tos, Unknown) && in_height == 0 { if matches!(tos, Unknown) && in_height == 0 {
// Will already have underflowed on pops check for other ops; for Dup, enforce explicitly // Will already have underflowed on pops check for other ops; for Dup, enforce explicitly
return Err(VerifierError::StackUnderflow { pc: func_start + pc, opcode: OpCode::Dup }); return Err(VerifierError::StackUnderflow {
pc: func_start + pc,
opcode: OpCode::Dup,
});
} }
out_types.push(tos); out_types.push(tos);
} }
// Swap swaps top-2 types; ensure enough stack // Swap swaps top-2 types; ensure enough stack
OpCode::Swap => { OpCode::Swap => {
if in_types.len() < 2 { if in_types.len() < 2 {
return Err(VerifierError::StackUnderflow { pc: func_start + pc, opcode: OpCode::Swap }); return Err(VerifierError::StackUnderflow {
pc: func_start + pc,
opcode: OpCode::Swap,
});
} }
let len = out_types.len(); let len = out_types.len();
out_types.swap(len - 1, len - 2); out_types.swap(len - 1, len - 2);
@ -486,9 +568,13 @@ mod golden_ext {
use prometeu_bytecode::FunctionMeta; use prometeu_bytecode::FunctionMeta;
use prometeu_bytecode::isa::core::CoreOpCode as OpCode; use prometeu_bytecode::isa::core::CoreOpCode as OpCode;
fn enc_op(op: OpCode) -> [u8; 2] { (op as u16).to_le_bytes() } fn enc_op(op: OpCode) -> [u8; 2] {
(op as u16).to_le_bytes()
}
fn func(meta: FunctionMeta) -> Vec<FunctionMeta> { vec![meta] } fn func(meta: FunctionMeta) -> Vec<FunctionMeta> {
vec![meta]
}
// A minimal selection from the golden suite (full file migrated from // A minimal selection from the golden suite (full file migrated from
// integration tests). Keeping names to avoid confusion. // integration tests). Keeping names to avoid confusion.
@ -511,7 +597,12 @@ mod golden_ext {
code.extend_from_slice(&enc_op(OpCode::Add)); code.extend_from_slice(&enc_op(OpCode::Add));
code.extend_from_slice(&enc_op(OpCode::Ret)); code.extend_from_slice(&enc_op(OpCode::Ret));
let functions = func(FunctionMeta { code_offset: 0, code_len: code.len() as u32, return_slots: 1, ..Default::default() }); let functions = func(FunctionMeta {
code_offset: 0,
code_len: code.len() as u32,
return_slots: 1,
..Default::default()
});
let res = Verifier::verify(&code, &functions).unwrap(); let res = Verifier::verify(&code, &functions).unwrap();
assert!(res[0] >= 2); assert!(res[0] >= 2);
} }
@ -529,11 +620,20 @@ mod golden_ext {
let mut code = Vec::new(); let mut code = Vec::new();
code.extend_from_slice(&enc_op(OpCode::PushI32)); code.extend_from_slice(&enc_op(OpCode::PushI32));
code.push(0xAA); code.push(0xAA);
let functions = func(FunctionMeta { code_offset: 0, code_len: code.len() as u32, ..Default::default() }); let functions = func(FunctionMeta {
code_offset: 0,
code_len: code.len() as u32,
..Default::default()
});
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
assert_eq!( assert_eq!(
res, res,
Err(VerifierError::TruncatedImmediate { pc: 0, opcode: OpCode::PushI32, need: 4, have: 1 }) Err(VerifierError::TruncatedImmediate {
pc: 0,
opcode: OpCode::PushI32,
need: 4,
have: 1
})
); );
} }
@ -542,7 +642,11 @@ mod golden_ext {
let mut code = Vec::new(); let mut code = Vec::new();
code.extend_from_slice(&enc_op(OpCode::Jmp)); code.extend_from_slice(&enc_op(OpCode::Jmp));
code.extend_from_slice(&100u32.to_le_bytes()); code.extend_from_slice(&100u32.to_le_bytes());
let functions = func(FunctionMeta { code_offset: 0, code_len: code.len() as u32, ..Default::default() }); let functions = func(FunctionMeta {
code_offset: 0,
code_len: code.len() as u32,
..Default::default()
});
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
assert_eq!(res, Err(VerifierError::InvalidJumpTarget { pc: 0, target: 100 })); assert_eq!(res, Err(VerifierError::InvalidJumpTarget { pc: 0, target: 100 }));
} }
@ -553,28 +657,46 @@ mod golden_ext {
fn closure_call_valid_passes() { fn closure_call_valid_passes() {
let mut code = Vec::new(); let mut code = Vec::new();
// F0 @ 0 // F0 @ 0
code.push(OpCode::PushI32 as u8); code.push(0x00); code.push(OpCode::PushI32 as u8);
code.push(0x00);
code.extend_from_slice(&7u32.to_le_bytes()); code.extend_from_slice(&7u32.to_le_bytes());
code.push(OpCode::MakeClosure as u8); code.push(0x00); code.push(OpCode::MakeClosure as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); // fn id code.extend_from_slice(&1u32.to_le_bytes()); // fn id
code.extend_from_slice(&0u32.to_le_bytes()); // cap count code.extend_from_slice(&0u32.to_le_bytes()); // cap count
code.push(OpCode::CallClosure as u8); code.push(0x00); code.push(OpCode::CallClosure as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); // argc = 1 (excludes hidden) code.extend_from_slice(&1u32.to_le_bytes()); // argc = 1 (excludes hidden)
code.push(OpCode::PopN as u8); code.push(0x00); code.push(OpCode::PopN as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); code.extend_from_slice(&1u32.to_le_bytes());
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let f0_len = code.len() as u32; let f0_len = code.len() as u32;
// F1 @ f0_len // F1 @ f0_len
code.push(OpCode::PushI32 as u8); code.push(0x00); code.push(OpCode::PushI32 as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); code.extend_from_slice(&1u32.to_le_bytes());
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let f1_len = (code.len() as u32) - f0_len; let f1_len = (code.len() as u32) - f0_len;
let functions = vec![ let functions = vec![
FunctionMeta { code_offset: 0, code_len: f0_len, return_slots: 0, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f0_len, code_len: f1_len, param_slots: 2, return_slots: 1, ..Default::default() }, code_offset: 0,
code_len: f0_len,
return_slots: 0,
..Default::default()
},
FunctionMeta {
code_offset: f0_len,
code_len: f1_len,
param_slots: 2,
return_slots: 1,
..Default::default()
},
]; ];
let res = Verifier::verify(&code, &functions).unwrap(); let res = Verifier::verify(&code, &functions).unwrap();
@ -584,13 +706,21 @@ mod golden_ext {
#[test] #[test]
fn call_closure_on_non_closure_fails() { fn call_closure_on_non_closure_fails() {
let mut code = Vec::new(); let mut code = Vec::new();
code.push(OpCode::PushI32 as u8); code.push(0x00); code.push(OpCode::PushI32 as u8);
code.push(0x00);
code.extend_from_slice(&7u32.to_le_bytes()); code.extend_from_slice(&7u32.to_le_bytes());
code.push(OpCode::CallClosure as u8); code.push(0x00); code.push(OpCode::CallClosure as u8);
code.push(0x00);
code.extend_from_slice(&0u32.to_le_bytes()); code.extend_from_slice(&0u32.to_le_bytes());
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let functions = vec![FunctionMeta { code_offset: 0, code_len: code.len() as u32, return_slots: 0, ..Default::default() }]; let functions = vec![FunctionMeta {
code_offset: 0,
code_len: code.len() as u32,
return_slots: 0,
..Default::default()
}];
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
assert!(matches!(res, Err(VerifierError::NotAClosureOnCallClosure { .. }))); assert!(matches!(res, Err(VerifierError::NotAClosureOnCallClosure { .. })));
} }
@ -600,26 +730,43 @@ mod golden_ext {
// Same as valid case but argc = 0 while callee expects 1 user arg // Same as valid case but argc = 0 while callee expects 1 user arg
let mut code = Vec::new(); let mut code = Vec::new();
// F0 @ 0 // F0 @ 0
code.push(OpCode::PushI32 as u8); code.push(0x00); code.push(OpCode::PushI32 as u8);
code.push(0x00);
code.extend_from_slice(&7u32.to_le_bytes()); code.extend_from_slice(&7u32.to_le_bytes());
code.push(OpCode::MakeClosure as u8); code.push(0x00); code.push(OpCode::MakeClosure as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); // fn id code.extend_from_slice(&1u32.to_le_bytes()); // fn id
code.extend_from_slice(&0u32.to_le_bytes()); // cap count code.extend_from_slice(&0u32.to_le_bytes()); // cap count
code.push(OpCode::CallClosure as u8); code.push(0x00); code.push(OpCode::CallClosure as u8);
code.push(0x00);
code.extend_from_slice(&0u32.to_le_bytes()); // argc = 0 (mismatch) code.extend_from_slice(&0u32.to_le_bytes()); // argc = 0 (mismatch)
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let f0_len = code.len() as u32; let f0_len = code.len() as u32;
// F1 @ f0_len // F1 @ f0_len
code.push(OpCode::PushI32 as u8); code.push(0x00); code.push(OpCode::PushI32 as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); code.extend_from_slice(&1u32.to_le_bytes());
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let f1_len = (code.len() as u32) - f0_len; let f1_len = (code.len() as u32) - f0_len;
let functions = vec![ let functions = vec![
FunctionMeta { code_offset: 0, code_len: f0_len, return_slots: 0, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f0_len, code_len: f1_len, param_slots: 2, return_slots: 1, ..Default::default() }, code_offset: 0,
code_len: f0_len,
return_slots: 0,
..Default::default()
},
FunctionMeta {
code_offset: f0_len,
code_len: f1_len,
param_slots: 2,
return_slots: 1,
..Default::default()
},
]; ];
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
@ -633,35 +780,61 @@ mod golden_ext {
// F2: PushI32 5; Ret (param=1 hidden, ret=1) // F2: PushI32 5; Ret (param=1 hidden, ret=1)
let mut code = Vec::new(); let mut code = Vec::new();
// F0 @ 0 // F0 @ 0
code.push(OpCode::MakeClosure as u8); code.push(0x00); code.push(OpCode::MakeClosure as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); // F1 code.extend_from_slice(&1u32.to_le_bytes()); // F1
code.extend_from_slice(&0u32.to_le_bytes()); // cap=0 code.extend_from_slice(&0u32.to_le_bytes()); // cap=0
code.push(OpCode::CallClosure as u8); code.push(0x00); code.push(OpCode::CallClosure as u8);
code.push(0x00);
code.extend_from_slice(&0u32.to_le_bytes()); // argc=0 code.extend_from_slice(&0u32.to_le_bytes()); // argc=0
code.push(OpCode::PopN as u8); code.push(0x00); code.push(OpCode::PopN as u8);
code.push(0x00);
code.extend_from_slice(&1u32.to_le_bytes()); code.extend_from_slice(&1u32.to_le_bytes());
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let f0_len = code.len() as u32; let f0_len = code.len() as u32;
// F1 @ f0_len // F1 @ f0_len
code.push(OpCode::MakeClosure as u8); code.push(0x00); code.push(OpCode::MakeClosure as u8);
code.push(0x00);
code.extend_from_slice(&2u32.to_le_bytes()); // F2 code.extend_from_slice(&2u32.to_le_bytes()); // F2
code.extend_from_slice(&0u32.to_le_bytes()); // cap=0 code.extend_from_slice(&0u32.to_le_bytes()); // cap=0
code.push(OpCode::CallClosure as u8); code.push(0x00); code.push(OpCode::CallClosure as u8);
code.push(0x00);
code.extend_from_slice(&0u32.to_le_bytes()); // argc=0 code.extend_from_slice(&0u32.to_le_bytes()); // argc=0
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let f1_len = (code.len() as u32) - f0_len; let f1_len = (code.len() as u32) - f0_len;
// F2 @ f0_len + f1_len // F2 @ f0_len + f1_len
code.push(OpCode::PushI32 as u8); code.push(0x00); code.push(OpCode::PushI32 as u8);
code.push(0x00);
code.extend_from_slice(&5u32.to_le_bytes()); code.extend_from_slice(&5u32.to_le_bytes());
code.push(OpCode::Ret as u8); code.push(0x00); code.push(OpCode::Ret as u8);
code.push(0x00);
let f2_len = (code.len() as u32) - f0_len - f1_len; let f2_len = (code.len() as u32) - f0_len - f1_len;
let functions = vec![ let functions = vec![
FunctionMeta { code_offset: 0, code_len: f0_len, return_slots: 0, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f0_len, code_len: f1_len, param_slots: 1, return_slots: 1, ..Default::default() }, code_offset: 0,
FunctionMeta { code_offset: f0_len + f1_len, code_len: f2_len, param_slots: 1, return_slots: 1, ..Default::default() }, code_len: f0_len,
return_slots: 0,
..Default::default()
},
FunctionMeta {
code_offset: f0_len,
code_len: f1_len,
param_slots: 1,
return_slots: 1,
..Default::default()
},
FunctionMeta {
code_offset: f0_len + f1_len,
code_len: f2_len,
param_slots: 1,
return_slots: 1,
..Default::default()
},
]; ];
let res = Verifier::verify(&code, &functions).unwrap(); let res = Verifier::verify(&code, &functions).unwrap();
@ -682,7 +855,12 @@ mod golden_ext {
code.extend_from_slice(&enc_op(OpCode::Yield)); code.extend_from_slice(&enc_op(OpCode::Yield));
code.extend_from_slice(&enc_op(OpCode::Ret)); code.extend_from_slice(&enc_op(OpCode::Ret));
let functions = vec![FunctionMeta { code_offset: 0, code_len: code.len() as u32, return_slots: 1, ..Default::default() }]; let functions = vec![FunctionMeta {
code_offset: 0,
code_len: code.len() as u32,
return_slots: 1,
..Default::default()
}];
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
assert_eq!(res, Err(VerifierError::InvalidYieldContext { pc: 6, height: 1 })); assert_eq!(res, Err(VerifierError::InvalidYieldContext { pc: 6, height: 1 }));
} }
@ -697,9 +875,20 @@ mod golden_ext {
code.extend_from_slice(&1u32.to_le_bytes()); // arg_count (mismatch: callee expects 2) code.extend_from_slice(&1u32.to_le_bytes()); // arg_count (mismatch: callee expects 2)
code.extend_from_slice(&enc_op(OpCode::Ret)); code.extend_from_slice(&enc_op(OpCode::Ret));
let caller = FunctionMeta { code_offset: 0, code_len: code.len() as u32, return_slots: 0, ..Default::default() }; let caller = FunctionMeta {
code_offset: 0,
code_len: code.len() as u32,
return_slots: 0,
..Default::default()
};
// Callee has no code here; only signature matters // Callee has no code here; only signature matters
let callee = FunctionMeta { code_offset: code.len() as u32, code_len: 0, param_slots: 2, return_slots: 0, ..Default::default() }; let callee = FunctionMeta {
code_offset: code.len() as u32,
code_len: 0,
param_slots: 2,
return_slots: 0,
..Default::default()
};
let functions = vec![caller, callee]; let functions = vec![caller, callee];
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
@ -712,11 +901,20 @@ mod golden_ext {
let mut code = Vec::new(); let mut code = Vec::new();
code.extend_from_slice(&enc_op(OpCode::Sleep)); code.extend_from_slice(&enc_op(OpCode::Sleep));
code.push(0xAB); code.push(0xAB);
let functions = vec![FunctionMeta { code_offset: 0, code_len: code.len() as u32, ..Default::default() }]; let functions = vec![FunctionMeta {
code_offset: 0,
code_len: code.len() as u32,
..Default::default()
}];
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
assert_eq!( assert_eq!(
res, res,
Err(VerifierError::TruncatedImmediate { pc: 0, opcode: OpCode::Sleep, need: 4, have: 1 }) Err(VerifierError::TruncatedImmediate {
pc: 0,
opcode: OpCode::Sleep,
need: 4,
have: 1
})
); );
} }
} }
@ -1025,8 +1223,18 @@ mod tests {
code.push(0x00); code.push(0x00);
let functions = vec![ let functions = vec![
FunctionMeta { code_offset: 0, code_len: f0_len, return_slots: 0, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f0_len, code_len: 2, return_slots: 0, ..Default::default() }, code_offset: 0,
code_len: f0_len,
return_slots: 0,
..Default::default()
},
FunctionMeta {
code_offset: f0_len,
code_len: 2,
return_slots: 0,
..Default::default()
},
]; ];
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);
@ -1067,8 +1275,18 @@ mod tests {
let f1_len = (code.len() as u32) - f0_len; let f1_len = (code.len() as u32) - f0_len;
let functions = vec![ let functions = vec![
FunctionMeta { code_offset: 0, code_len: f0_len, return_slots: 1, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f0_len, code_len: f1_len, return_slots: 2, ..Default::default() }, code_offset: 0,
code_len: f0_len,
return_slots: 1,
..Default::default()
},
FunctionMeta {
code_offset: f0_len,
code_len: f1_len,
return_slots: 2,
..Default::default()
},
]; ];
let res = Verifier::verify(&code, &functions); let res = Verifier::verify(&code, &functions);

View File

@ -1,19 +1,19 @@
use crate::call_frame::CallFrame; use crate::call_frame::CallFrame;
use crate::heap::{CoroutineState, Heap};
use crate::object::ObjectKind;
use crate::roots::{RootVisitor, visit_value_for_roots};
use crate::scheduler::Scheduler;
use crate::verifier::Verifier; use crate::verifier::Verifier;
use crate::vm_init_error::VmInitError; use crate::vm_init_error::VmInitError;
use crate::{HostContext, NativeInterface}; use crate::{HostContext, NativeInterface};
use prometeu_bytecode::isa::core::CoreOpCode as OpCode; use prometeu_bytecode::HeapRef;
use prometeu_bytecode::ProgramImage; use prometeu_bytecode::ProgramImage;
use prometeu_bytecode::Value; use prometeu_bytecode::Value;
use crate::roots::{RootVisitor, visit_value_for_roots}; use prometeu_bytecode::isa::core::CoreOpCode as OpCode;
use crate::heap::{Heap, CoroutineState};
use crate::object::ObjectKind;
use crate::scheduler::Scheduler;
use prometeu_bytecode::{ use prometeu_bytecode::{
TRAP_BAD_RET_SLOTS, TRAP_DIV_ZERO, TRAP_INVALID_FUNC, TRAP_INVALID_SYSCALL, TRAP_OOB, TRAP_BAD_RET_SLOTS, TRAP_DIV_ZERO, TRAP_INVALID_FUNC, TRAP_INVALID_SYSCALL, TRAP_OOB,
TRAP_STACK_UNDERFLOW, TRAP_TYPE, TrapInfo, TRAP_STACK_UNDERFLOW, TRAP_TYPE, TrapInfo,
}; };
use prometeu_bytecode::HeapRef;
use prometeu_hal::syscalls::caps::ALL; use prometeu_hal::syscalls::caps::ALL;
use prometeu_hal::vm_fault::VmFault; use prometeu_hal::vm_fault::VmFault;
@ -109,7 +109,6 @@ pub struct VirtualMachine {
current_coro: Option<HeapRef>, current_coro: Option<HeapRef>,
} }
impl Default for VirtualMachine { impl Default for VirtualMachine {
fn default() -> Self { fn default() -> Self {
Self::new(vec![], vec![]) Self::new(vec![], vec![])
@ -118,10 +117,14 @@ impl Default for VirtualMachine {
impl VirtualMachine { impl VirtualMachine {
/// Returns the current program counter. /// Returns the current program counter.
pub fn pc(&self) -> usize { self.pc } pub fn pc(&self) -> usize {
self.pc
}
/// Returns true if there are no active call frames. /// Returns true if there are no active call frames.
pub fn call_stack_is_empty(&self) -> bool { self.call_stack.is_empty() } pub fn call_stack_is_empty(&self) -> bool {
self.call_stack.is_empty()
}
/// Returns up to `n` values from the top of the operand stack (top-first order). /// Returns up to `n` values from the top of the operand stack (top-first order).
pub fn operand_stack_top(&self, n: usize) -> Vec<Value> { pub fn operand_stack_top(&self, n: usize) -> Vec<Value> {
@ -131,20 +134,30 @@ impl VirtualMachine {
} }
/// Returns true if the VM has executed a HALT and is not currently running. /// Returns true if the VM has executed a HALT and is not currently running.
pub fn is_halted(&self) -> bool { self.halted } pub fn is_halted(&self) -> bool {
self.halted
}
/// Adds a software breakpoint at the given PC. /// Adds a software breakpoint at the given PC.
pub fn insert_breakpoint(&mut self, pc: usize) { let _ = self.breakpoints.insert(pc); } pub fn insert_breakpoint(&mut self, pc: usize) {
let _ = self.breakpoints.insert(pc);
}
/// Removes a software breakpoint at the given PC, if present. /// Removes a software breakpoint at the given PC, if present.
pub fn remove_breakpoint(&mut self, pc: usize) { let _ = self.breakpoints.remove(&pc); } pub fn remove_breakpoint(&mut self, pc: usize) {
let _ = self.breakpoints.remove(&pc);
}
/// Returns the list of currently configured breakpoints. /// Returns the list of currently configured breakpoints.
pub fn breakpoints_list(&self) -> Vec<usize> { self.breakpoints.iter().cloned().collect() } pub fn breakpoints_list(&self) -> Vec<usize> {
self.breakpoints.iter().cloned().collect()
}
// Test-only helpers for internal unit tests within this crate. // Test-only helpers for internal unit tests within this crate.
#[cfg(test)] #[cfg(test)]
pub(crate) fn push_operand_for_test(&mut self, v: Value) { self.operand_stack.push(v); } pub(crate) fn push_operand_for_test(&mut self, v: Value) {
self.operand_stack.push(v);
}
/// Creates a new VM instance with the provided bytecode and constants. /// Creates a new VM instance with the provided bytecode and constants.
pub fn new(rom: Vec<u8>, constant_pool: Vec<Value>) -> Self { pub fn new(rom: Vec<u8>, constant_pool: Vec<Value>) -> Self {
Self { Self {
@ -340,7 +353,10 @@ impl VirtualMachine {
let mut steps_executed = 0; let mut steps_executed = 0;
let mut ending_reason: Option<LogicalFrameEndingReason> = None; let mut ending_reason: Option<LogicalFrameEndingReason> = None;
while (self.cycles - start_cycles) < budget && !self.halted && self.pc < self.program.rom.len() { while (self.cycles - start_cycles) < budget
&& !self.halted
&& self.pc < self.program.rom.len()
{
// Debugger support: stop before executing an instruction if there's a breakpoint. // Debugger support: stop before executing an instruction if there's a breakpoint.
// Note: we skip the check for the very first step of a slice to avoid // Note: we skip the check for the very first step of a slice to avoid
// getting stuck on the same breakpoint repeatedly. // getting stuck on the same breakpoint repeatedly.
@ -388,7 +404,6 @@ impl VirtualMachine {
}) })
} }
/// Harness: run exactly `frames` logical frames deterministically. /// Harness: run exactly `frames` logical frames deterministically.
/// ///
/// This repeatedly calls `run_budget` with the provided `budget_per_slice` until /// This repeatedly calls `run_budget` with the provided `budget_per_slice` until
@ -465,12 +480,13 @@ impl VirtualMachine {
| LogicalFrameEndingReason::Breakpoint | LogicalFrameEndingReason::Breakpoint
); );
out.push(rep); out.push(rep);
if terminal { break; } if terminal {
break;
}
} }
Ok(out) Ok(out)
} }
/// Executes a single instruction at the current Program Counter (PC). /// Executes a single instruction at the current Program Counter (PC).
/// ///
/// This follows the classic CPU cycle: /// This follows the classic CPU cycle:
@ -628,14 +644,23 @@ impl VirtualMachine {
args.reverse(); args.reverse();
// Build operand stack for the new coroutine: params followed by zeroed locals // Build operand stack for the new coroutine: params followed by zeroed locals
let mut new_stack: Vec<Value> = Vec::with_capacity((param_slots + local_slots) as usize); let mut new_stack: Vec<Value> =
Vec::with_capacity((param_slots + local_slots) as usize);
// Place user args as parameters // Place user args as parameters
for v in args { new_stack.push(v); } for v in args {
new_stack.push(v);
}
// Zero-init locals // Zero-init locals
for _ in 0..local_slots { new_stack.push(Value::Null); } for _ in 0..local_slots {
new_stack.push(Value::Null);
}
// Initial frame for the coroutine (sentinel-like return to end-of-rom) // Initial frame for the coroutine (sentinel-like return to end-of-rom)
let frames = vec![CallFrame { return_pc: self.program.rom.len() as u32, stack_base: 0, func_idx: fn_id }]; let frames = vec![CallFrame {
return_pc: self.program.rom.len() as u32,
stack_base: 0,
func_idx: fn_id,
}];
let href = self.heap.allocate_coroutine( let href = self.heap.allocate_coroutine(
entry_pc, entry_pc,
@ -663,11 +688,9 @@ impl VirtualMachine {
// wake_tick = current_tick + duration + 1 // wake_tick = current_tick + duration + 1
let duration = instr let duration = instr
.imm_u32() .imm_u32()
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))? as u64; .map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?
let wake = self as u64;
.current_tick let wake = self.current_tick.saturating_add(duration).saturating_add(1);
.saturating_add(duration)
.saturating_add(1);
self.sleep_requested_until = Some(wake); self.sleep_requested_until = Some(wake);
// End the logical frame right after the instruction completes // End the logical frame right after the instruction completes
@ -716,7 +739,7 @@ impl VirtualMachine {
other other
), ),
start_pc as u32, start_pc as u32,
)) ));
} }
}; };
@ -733,10 +756,7 @@ impl VirtualMachine {
return Err(self.trap( return Err(self.trap(
TRAP_TYPE, TRAP_TYPE,
opcode as u16, opcode as u16,
format!( format!("CALL_CLOSURE on non-closure object kind {:?}", header.kind),
"CALL_CLOSURE on non-closure object kind {:?}",
header.kind
),
start_pc as u32, start_pc as u32,
)); ));
} }
@ -785,7 +805,9 @@ impl VirtualMachine {
// Prepare the operand stack to match the direct CALL convention: // Prepare the operand stack to match the direct CALL convention:
// push hidden arg0 (closure_ref) followed by arg1..argN. // push hidden arg0 (closure_ref) followed by arg1..argN.
self.push(Value::HeapRef(href)); self.push(Value::HeapRef(href));
for v in user_args.into_iter() { self.push(v); } for v in user_args.into_iter() {
self.push(v);
}
let stack_base = self let stack_base = self
.operand_stack .operand_stack
@ -794,9 +816,15 @@ impl VirtualMachine {
.ok_or_else(|| LogicalFrameEndingReason::Panic("Stack underflow".into()))?; .ok_or_else(|| LogicalFrameEndingReason::Panic("Stack underflow".into()))?;
// Allocate and zero-init local slots // Allocate and zero-init local slots
for _ in 0..callee_local_slots { self.operand_stack.push(Value::Null); } for _ in 0..callee_local_slots {
self.operand_stack.push(Value::Null);
}
self.call_stack.push(CallFrame { return_pc: self.pc as u32, stack_base, func_idx: fn_id }); self.call_stack.push(CallFrame {
return_pc: self.pc as u32,
stack_base,
func_idx: fn_id,
});
self.pc = callee_code_offset; self.pc = callee_code_offset;
} }
OpCode::PushConst => { OpCode::PushConst => {
@ -821,20 +849,6 @@ impl VirtualMachine {
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?; .map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
self.push(Value::Int32(val)); self.push(Value::Int32(val));
} }
OpCode::PushBounded => {
let val = instr
.imm_u32()
.map_err(|e| LogicalFrameEndingReason::Panic(format!("{:?}", e)))?;
if val > 0xFFFF {
return Err(self.trap(
TRAP_OOB,
opcode as u16,
format!("Bounded value overflow: {} > 0xFFFF", val),
start_pc as u32,
));
}
self.push(Value::Bounded(val));
}
OpCode::PushF64 => { OpCode::PushF64 => {
let val = instr let val = instr
.imm_f64() .imm_f64()
@ -883,17 +897,6 @@ impl VirtualMachine {
(Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a + *b as f64)), (Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a + *b as f64)),
(Value::Int64(a), Value::Float(b)) => Ok(Value::Float(*a as f64 + b)), (Value::Int64(a), Value::Float(b)) => Ok(Value::Float(*a as f64 + b)),
(Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a + *b as f64)), (Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a + *b as f64)),
(Value::Bounded(a), Value::Bounded(b)) => {
let res = a.saturating_add(*b);
if res > 0xFFFF {
Err(OpError::Trap(
TRAP_OOB,
format!("Bounded addition overflow: {} + {} = {}", a, b, res),
))
} else {
Ok(Value::Bounded(res))
}
}
_ => Err(OpError::Panic("Invalid types for ADD".into())), _ => Err(OpError::Panic("Invalid types for ADD".into())),
})?, })?,
OpCode::Sub => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) { OpCode::Sub => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
@ -906,16 +909,6 @@ impl VirtualMachine {
(Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a - b as f64)), (Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a - b as f64)),
(Value::Int64(a), Value::Float(b)) => Ok(Value::Float(a as f64 - b)), (Value::Int64(a), Value::Float(b)) => Ok(Value::Float(a as f64 - b)),
(Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a - b as f64)), (Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a - b as f64)),
(Value::Bounded(a), Value::Bounded(b)) => {
if a < b {
Err(OpError::Trap(
TRAP_OOB,
format!("Bounded subtraction underflow: {} - {} < 0", a, b),
))
} else {
Ok(Value::Bounded(a - b))
}
}
_ => Err(OpError::Panic("Invalid types for SUB".into())), _ => Err(OpError::Panic("Invalid types for SUB".into())),
})?, })?,
OpCode::Mul => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) { OpCode::Mul => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
@ -928,17 +921,6 @@ impl VirtualMachine {
(Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a * b as f64)), (Value::Float(a), Value::Int32(b)) => Ok(Value::Float(a * b as f64)),
(Value::Int64(a), Value::Float(b)) => Ok(Value::Float(a as f64 * b)), (Value::Int64(a), Value::Float(b)) => Ok(Value::Float(a as f64 * b)),
(Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a * b as f64)), (Value::Float(a), Value::Int64(b)) => Ok(Value::Float(a * b as f64)),
(Value::Bounded(a), Value::Bounded(b)) => {
let res = a as u64 * b as u64;
if res > 0xFFFF {
Err(OpError::Trap(
TRAP_OOB,
format!("Bounded multiplication overflow: {} * {} = {}", a, b, res),
))
} else {
Ok(Value::Bounded(res as u32))
}
}
_ => Err(OpError::Panic("Invalid types for MUL".into())), _ => Err(OpError::Panic("Invalid types for MUL".into())),
})?, })?,
OpCode::Div => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) { OpCode::Div => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
@ -1008,15 +990,6 @@ impl VirtualMachine {
} }
Ok(Value::Float(a / b as f64)) Ok(Value::Float(a / b as f64))
} }
(Value::Bounded(a), Value::Bounded(b)) => {
if b == 0 {
return Err(OpError::Trap(
TRAP_DIV_ZERO,
"Bounded division by zero".into(),
));
}
Ok(Value::Bounded(a / b))
}
_ => Err(OpError::Panic("Invalid types for DIV".into())), _ => Err(OpError::Panic("Invalid types for DIV".into())),
})?, })?,
OpCode::Mod => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) { OpCode::Mod => self.binary_op(opcode, start_pc as u32, |a, b| match (a, b) {
@ -1032,41 +1005,8 @@ impl VirtualMachine {
} }
Ok(Value::Int64(a % b)) Ok(Value::Int64(a % b))
} }
(Value::Bounded(a), Value::Bounded(b)) => {
if b == 0 {
return Err(OpError::Trap(TRAP_DIV_ZERO, "Bounded modulo by zero".into()));
}
Ok(Value::Bounded(a % b))
}
_ => Err(OpError::Panic("Invalid types for MOD".into())), _ => Err(OpError::Panic("Invalid types for MOD".into())),
})?, })?,
OpCode::BoundToInt => {
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
if let Value::Bounded(b) = val {
self.push(Value::Int64(b as i64));
} else {
return Err(LogicalFrameEndingReason::Panic(
"Expected bounded for BOUND_TO_INT".into(),
));
}
}
OpCode::IntToBoundChecked => {
let val = self.pop().map_err(LogicalFrameEndingReason::Panic)?;
let int_val = val.as_integer().ok_or_else(|| {
LogicalFrameEndingReason::Panic(
"Expected integer for INT_TO_BOUND_CHECKED".into(),
)
})?;
if !(0..=0xFFFF).contains(&int_val) {
return Err(self.trap(
TRAP_OOB,
OpCode::IntToBoundChecked as u16,
format!("Integer to bounded conversion out of range: {}", int_val),
start_pc as u32,
));
}
self.push(Value::Bounded(int_val as u32));
}
OpCode::Eq => { OpCode::Eq => {
self.binary_op(opcode, start_pc as u32, |a, b| Ok(Value::Boolean(a == b)))? self.binary_op(opcode, start_pc as u32, |a, b| Ok(Value::Boolean(a == b)))?
} }
@ -1323,7 +1263,8 @@ impl VirtualMachine {
OpCode::Syscall as u16, OpCode::Syscall as u16,
format!( format!(
"Missing capability for syscall {} (required=0x{:X})", "Missing capability for syscall {} (required=0x{:X})",
syscall.name(), meta.caps syscall.name(),
meta.caps
), ),
pc_at_syscall, pc_at_syscall,
)); ));
@ -1394,12 +1335,16 @@ impl VirtualMachine {
// Collect GC roots from VM state // Collect GC roots from VM state
struct CollectRoots(Vec<prometeu_bytecode::HeapRef>); struct CollectRoots(Vec<prometeu_bytecode::HeapRef>);
impl crate::roots::RootVisitor for CollectRoots { impl crate::roots::RootVisitor for CollectRoots {
fn visit_heap_ref(&mut self, r: prometeu_bytecode::HeapRef) { self.0.push(r); } fn visit_heap_ref(&mut self, r: prometeu_bytecode::HeapRef) {
self.0.push(r);
}
} }
let mut collector = CollectRoots(Vec::new()); let mut collector = CollectRoots(Vec::new());
self.visit_roots(&mut collector); self.visit_roots(&mut collector);
// Add current coroutine and all suspended (ready/sleeping) coroutines as GC roots // Add current coroutine and all suspended (ready/sleeping) coroutines as GC roots
if let Some(cur) = self.current_coro { collector.0.push(cur); } if let Some(cur) = self.current_coro {
collector.0.push(cur);
}
let mut coro_roots = self.heap.suspended_coroutine_handles(); let mut coro_roots = self.heap.suspended_coroutine_handles();
collector.0.append(&mut coro_roots); collector.0.append(&mut coro_roots);
@ -1583,10 +1528,13 @@ impl VirtualMachine {
for frame in &self.call_stack { for frame in &self.call_stack {
if let Some(func_meta) = self.program.functions.get(frame.func_idx) { if let Some(func_meta) = self.program.functions.get(frame.func_idx) {
let start = frame.stack_base; let start = frame.stack_base;
let frame_slots = (func_meta.param_slots as usize) + (func_meta.local_slots as usize); let frame_slots =
(func_meta.param_slots as usize) + (func_meta.local_slots as usize);
let mut end = start.saturating_add(frame_slots); let mut end = start.saturating_add(frame_slots);
// Clamp to current stack height just in case // Clamp to current stack height just in case
if end > self.operand_stack.len() { end = self.operand_stack.len(); } if end > self.operand_stack.len() {
end = self.operand_stack.len();
}
for i in start..end { for i in start..end {
if let Some(v) = self.operand_stack.get(i) { if let Some(v) = self.operand_stack.get(i) {
visit_value_for_roots(v, visitor); visit_value_for_roots(v, visitor);
@ -1727,13 +1675,25 @@ mod tests {
for _ in 0..10 { for _ in 0..10 {
if !vm1.halted { if !vm1.halted {
let rep = vm1.run_budget(4, &mut native, &mut ctx1).expect("vm1 ok"); let rep = vm1.run_budget(4, &mut native, &mut ctx1).expect("vm1 ok");
trace1.push((vm1.pc, vm1.current_tick, vm1.operand_stack.len(), format!("{:?}", rep.reason))); trace1.push((
vm1.pc,
vm1.current_tick,
vm1.operand_stack.len(),
format!("{:?}", rep.reason),
));
} }
if !vm2.halted { if !vm2.halted {
let rep = vm2.run_budget(4, &mut native, &mut ctx2).expect("vm2 ok"); let rep = vm2.run_budget(4, &mut native, &mut ctx2).expect("vm2 ok");
trace2.push((vm2.pc, vm2.current_tick, vm2.operand_stack.len(), format!("{:?}", rep.reason))); trace2.push((
vm2.pc,
vm2.current_tick,
vm2.operand_stack.len(),
format!("{:?}", rep.reason),
));
}
if vm1.halted && vm2.halted {
break;
} }
if vm1.halted && vm2.halted { break; }
} }
assert!(vm1.halted && vm2.halted, "Both VMs should reach HALT deterministically"); assert!(vm1.halted && vm2.halted, "Both VMs should reach HALT deterministically");
@ -1801,7 +1761,7 @@ mod tests {
#[test] #[test]
fn test_gc_many_coroutines_and_wake_order_determinism() { fn test_gc_many_coroutines_and_wake_order_determinism() {
use crate::heap::{CoroutineState}; use crate::heap::CoroutineState;
use crate::object::ObjectKind; use crate::object::ObjectKind;
// ROM: FrameSync; FrameSync; Halt (two deterministic safepoints back-to-back) // ROM: FrameSync; FrameSync; Halt (two deterministic safepoints back-to-back)
@ -1824,7 +1784,10 @@ mod tests {
} }
// Sanity: allocations present // Sanity: allocations present
assert!(vm.heap.len() as u32 >= coro_count, "heap should contain coroutine objects and bytes"); assert!(
vm.heap.len() as u32 >= coro_count,
"heap should contain coroutine objects and bytes"
);
let mut native = MockNative; let mut native = MockNative;
let mut ctx = HostContext::new(None); let mut ctx = HostContext::new(None);
@ -1902,54 +1865,6 @@ mod tests {
} }
} }
#[test]
fn test_int_to_bound_checked_trap() {
let mut native = MockNative;
let mut ctx = HostContext::new(None);
let mut rom = Vec::new();
rom.extend_from_slice(&(OpCode::PushI32 as u16).to_le_bytes());
rom.extend_from_slice(&70000i32.to_le_bytes()); // > 65535
rom.extend_from_slice(&(OpCode::IntToBoundChecked as u16).to_le_bytes());
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
let mut vm = new_test_vm(rom.clone(), vec![]);
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
match report.reason {
LogicalFrameEndingReason::Trap(trap) => {
assert_eq!(trap.code, TRAP_OOB);
assert_eq!(trap.opcode, OpCode::IntToBoundChecked as u16);
}
_ => panic!("Expected Trap, got {:?}", report.reason),
}
}
#[test]
fn test_bounded_add_overflow_trap() {
let mut native = MockNative;
let mut ctx = HostContext::new(None);
let mut rom = Vec::new();
rom.extend_from_slice(&(OpCode::PushBounded as u16).to_le_bytes());
rom.extend_from_slice(&60000u32.to_le_bytes());
rom.extend_from_slice(&(OpCode::PushBounded as u16).to_le_bytes());
rom.extend_from_slice(&10000u32.to_le_bytes());
rom.extend_from_slice(&(OpCode::Add as u16).to_le_bytes());
rom.extend_from_slice(&(OpCode::Halt as u16).to_le_bytes());
let mut vm = new_test_vm(rom.clone(), vec![]);
let report = vm.run_budget(100, &mut native, &mut ctx).unwrap();
match report.reason {
LogicalFrameEndingReason::Trap(trap) => {
assert_eq!(trap.code, TRAP_OOB);
assert_eq!(trap.opcode, OpCode::Add as u16);
}
_ => panic!("Expected Trap, got {:?}", report.reason),
}
}
#[test] #[test]
fn test_comparisons_polymorphic() { fn test_comparisons_polymorphic() {
let mut native = MockNative; let mut native = MockNative;
@ -2377,7 +2292,6 @@ mod tests {
assert!(vm.pop().is_err()); // Stack should be empty assert!(vm.pop().is_err()); // Stack should be empty
} }
#[test] #[test]
fn test_entry_point_ret_with_prepare_call() { fn test_entry_point_ret_with_prepare_call() {
// PushI32 0 (0x17), then Ret (0x51) // PushI32 0 (0x17), then Ret (0x51)
@ -2431,7 +2345,7 @@ mod tests {
) -> Result<(), VmFault> { ) -> Result<(), VmFault> {
ret.push_bool(true); ret.push_bool(true);
ret.push_int(42); ret.push_int(42);
ret.push_bounded(255)?; ret.push_int(255);
Ok(()) Ok(())
} }
} }
@ -2688,20 +2602,6 @@ mod tests {
} }
} }
#[test]
fn test_host_return_bounded_overflow_trap() {
let mut stack = Vec::new();
let mut ret = HostReturn::new(&mut stack);
let res = ret.push_bounded(65536);
assert!(res.is_err());
match res.err().unwrap() {
VmFault::Trap(code, _) => {
assert_eq!(code, TRAP_OOB);
}
_ => panic!("Expected Trap"),
}
}
#[test] #[test]
fn test_loader_hardening_invalid_magic() { fn test_loader_hardening_invalid_magic() {
let mut vm = VirtualMachine::default(); let mut vm = VirtualMachine::default();
@ -3441,13 +3341,17 @@ mod tests {
other => panic!("Expected FrameSync, got {:?}", other), other => panic!("Expected FrameSync, got {:?}", other),
} }
assert_eq!(vm.heap.len(), 1, "All short-lived objects except main coroutine must be reclaimed deterministically"); assert_eq!(
vm.heap.len(),
1,
"All short-lived objects except main coroutine must be reclaimed deterministically"
);
} }
#[test] #[test]
fn test_gc_keeps_objects_captured_by_suspended_coroutines() { fn test_gc_keeps_objects_captured_by_suspended_coroutines() {
use crate::object::ObjectKind;
use crate::heap::CoroutineState; use crate::heap::CoroutineState;
use crate::object::ObjectKind;
// ROM: FRAME_SYNC; HALT (trigger GC at safepoint) // ROM: FRAME_SYNC; HALT (trigger GC at safepoint)
let mut rom = Vec::new(); let mut rom = Vec::new();
@ -3474,7 +3378,11 @@ mod tests {
vec![], vec![],
); );
assert_eq!(vm.heap.len(), 3, "object + suspended coroutine + main coroutine must be allocated"); assert_eq!(
vm.heap.len(),
3,
"object + suspended coroutine + main coroutine must be allocated"
);
let mut native = MockNative; let mut native = MockNative;
let mut ctx = HostContext::new(None); let mut ctx = HostContext::new(None);
@ -3572,9 +3480,30 @@ mod tests {
// VM with three functions (0=main, 1=A, 2=B) // VM with three functions (0=main, 1=A, 2=B)
let mut vm = new_test_vm(rom.clone(), vec![]); let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![ vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, FunctionMeta {
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, code_offset: off_main as u32,
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, code_len: main.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
FunctionMeta {
code_offset: off_a as u32,
code_len: fn_a.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
FunctionMeta {
code_offset: off_b as u32,
code_len: fn_b.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
]); ]);
let mut native = MockNative; let mut native = MockNative;
@ -3589,16 +3518,24 @@ mod tests {
// Consider currently running coroutine // Consider currently running coroutine
if let Some(cur) = vm.current_coro { if let Some(cur) = vm.current_coro {
if let Some(f) = vm.call_stack.last() { if let Some(f) = vm.call_stack.last() {
if f.func_idx == 1 { a_href = Some(cur); } if f.func_idx == 1 {
if f.func_idx == 2 { b_href = Some(cur); } a_href = Some(cur);
}
if f.func_idx == 2 {
b_href = Some(cur);
}
} }
} }
// And also consider suspended (Ready/Sleeping) coroutines // And also consider suspended (Ready/Sleeping) coroutines
for h in vm.heap.suspended_coroutine_handles() { for h in vm.heap.suspended_coroutine_handles() {
if let Some(co) = vm.heap.coroutine_data(h) { if let Some(co) = vm.heap.coroutine_data(h) {
if let Some(f) = co.frames.last() { if let Some(f) = co.frames.last() {
if f.func_idx == 1 { a_href = Some(h); } if f.func_idx == 1 {
if f.func_idx == 2 { b_href = Some(h); } a_href = Some(h);
}
if f.func_idx == 2 {
b_href = Some(h);
}
} }
} }
} }
@ -3613,10 +3550,15 @@ mod tests {
let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap(); let _ = vm.run_budget(100, &mut native, &mut ctx).unwrap();
let a_now = vm.heap.coroutine_data(a_href).unwrap().stack.len(); let a_now = vm.heap.coroutine_data(a_href).unwrap().stack.len();
let b_now = vm.heap.coroutine_data(b_href).unwrap().stack.len(); let b_now = vm.heap.coroutine_data(b_href).unwrap().stack.len();
if a_now > prev_a { trace.push(1); } if a_now > prev_a {
else if b_now > prev_b { trace.push(2); } trace.push(1);
else { panic!("no coroutine progress detected this frame"); } } else if b_now > prev_b {
prev_a = a_now; prev_b = b_now; trace.push(2);
} else {
panic!("no coroutine progress detected this frame");
}
prev_a = a_now;
prev_b = b_now;
} }
assert_eq!(trace, vec![1, 2, 1, 2, 1, 2], "Coroutines must strictly alternate under Yield"); assert_eq!(trace, vec![1, 2, 1, 2, 1, 2], "Coroutines must strictly alternate under Yield");
@ -3668,9 +3610,30 @@ mod tests {
let mut vm = new_test_vm(rom.clone(), vec![]); let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![ vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, FunctionMeta {
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, code_offset: off_main as u32,
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, code_len: main.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
FunctionMeta {
code_offset: off_a as u32,
code_len: fn_a.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
FunctionMeta {
code_offset: off_b as u32,
code_len: fn_b.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
]); ]);
let mut native = MockNative; let mut native = MockNative;
@ -3685,15 +3648,23 @@ mod tests {
let mut b_href = None; let mut b_href = None;
if let Some(cur) = vm.current_coro { if let Some(cur) = vm.current_coro {
if let Some(f) = vm.call_stack.last() { if let Some(f) = vm.call_stack.last() {
if f.func_idx == 1 { a_href = Some(cur); } if f.func_idx == 1 {
if f.func_idx == 2 { b_href = Some(cur); } a_href = Some(cur);
}
if f.func_idx == 2 {
b_href = Some(cur);
}
} }
} }
for h in vm.heap.suspended_coroutine_handles() { for h in vm.heap.suspended_coroutine_handles() {
if let Some(co) = vm.heap.coroutine_data(h) { if let Some(co) = vm.heap.coroutine_data(h) {
if let Some(f) = co.frames.last() { if let Some(f) = co.frames.last() {
if f.func_idx == 1 { a_href = Some(h); } if f.func_idx == 1 {
if f.func_idx == 2 { b_href = Some(h); } a_href = Some(h);
}
if f.func_idx == 2 {
b_href = Some(h);
}
} }
} }
} }
@ -3728,7 +3699,10 @@ mod tests {
// in the following frame, and we observe its heap stack update at end tick = wake_tick + 1. // in the following frame, and we observe its heap stack update at end tick = wake_tick + 1.
// A executes SLEEP at its first run (tick 1), so wake_tick = 1 + N + 1, observed tick = +1. // A executes SLEEP at its first run (tick 1), so wake_tick = 1 + N + 1, observed tick = +1.
let expected_observed_end_tick = 1u64 + sleep_n as u64 + 2u64; let expected_observed_end_tick = 1u64 + sleep_n as u64 + 2u64;
assert_eq!(woke_at_tick, expected_observed_end_tick, "A must wake at the exact tick (+1 frame to observe)"); assert_eq!(
woke_at_tick, expected_observed_end_tick,
"A must wake at the exact tick (+1 frame to observe)"
);
// And B must have produced at least N items (one per frame) before A's wake. // And B must have produced at least N items (one per frame) before A's wake.
assert!(ones_before as u64 >= sleep_n as u64, "B must keep running while A sleeps"); assert!(ones_before as u64 >= sleep_n as u64, "B must keep running while A sleeps");
} }
@ -3776,9 +3750,30 @@ mod tests {
let mut vm1 = new_test_vm(rom.clone(), vec![]); let mut vm1 = new_test_vm(rom.clone(), vec![]);
let mut vm2 = new_test_vm(rom.clone(), vec![]); let mut vm2 = new_test_vm(rom.clone(), vec![]);
let fm: std::sync::Arc<[prometeu_bytecode::FunctionMeta]> = std::sync::Arc::from(vec![ let fm: std::sync::Arc<[prometeu_bytecode::FunctionMeta]> = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, FunctionMeta {
FunctionMeta { code_offset: off_a as u32, code_len: fn_a.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, code_offset: off_main as u32,
FunctionMeta { code_offset: off_b as u32, code_len: fn_b.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, code_len: main.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
FunctionMeta {
code_offset: off_a as u32,
code_len: fn_a.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
FunctionMeta {
code_offset: off_b as u32,
code_len: fn_b.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
]); ]);
vm1.program.functions = fm.clone(); vm1.program.functions = fm.clone();
vm2.program.functions = fm; vm2.program.functions = fm;
@ -3793,20 +3788,29 @@ mod tests {
// Discover A/B handles in both VMs // Discover A/B handles in both VMs
let find_ab = |vm: &VirtualMachine| { let find_ab = |vm: &VirtualMachine| {
let mut a = None; let mut b = None; let mut a = None;
let mut b = None;
// running // running
if let Some(cur) = vm.current_coro { if let Some(cur) = vm.current_coro {
if let Some(f) = vm.call_stack.last() { if let Some(f) = vm.call_stack.last() {
if f.func_idx == 1 { a = Some(cur); } if f.func_idx == 1 {
if f.func_idx == 2 { b = Some(cur); } a = Some(cur);
}
if f.func_idx == 2 {
b = Some(cur);
}
} }
} }
// suspended // suspended
for h in vm.heap.suspended_coroutine_handles() { for h in vm.heap.suspended_coroutine_handles() {
if let Some(co) = vm.heap.coroutine_data(h) { if let Some(co) = vm.heap.coroutine_data(h) {
if let Some(f) = co.frames.last() { if let Some(f) = co.frames.last() {
if f.func_idx == 1 { a = Some(h); } if f.func_idx == 1 {
if f.func_idx == 2 { b = Some(h); } a = Some(h);
}
if f.func_idx == 2 {
b = Some(h);
}
} }
} }
} }
@ -3826,17 +3830,34 @@ mod tests {
let _ = vm1.run_budget(100, &mut native, &mut c1).unwrap(); let _ = vm1.run_budget(100, &mut native, &mut c1).unwrap();
let a_now = vm1.heap.coroutine_data(a1).unwrap().stack.len(); let a_now = vm1.heap.coroutine_data(a1).unwrap().stack.len();
let b_now = vm1.heap.coroutine_data(b1).unwrap().stack.len(); let b_now = vm1.heap.coroutine_data(b1).unwrap().stack.len();
if a_now > a1_prev { trace1.push(1); } else if b_now > b1_prev { trace1.push(2); } else { panic!("no progress 1"); } if a_now > a1_prev {
a1_prev = a_now; b1_prev = b_now; trace1.push(1);
} else if b_now > b1_prev {
trace1.push(2);
} else {
panic!("no progress 1");
}
a1_prev = a_now;
b1_prev = b_now;
let _ = vm2.run_budget(100, &mut native, &mut c2).unwrap(); let _ = vm2.run_budget(100, &mut native, &mut c2).unwrap();
let a2_now = vm2.heap.coroutine_data(a2).unwrap().stack.len(); let a2_now = vm2.heap.coroutine_data(a2).unwrap().stack.len();
let b2_now = vm2.heap.coroutine_data(b2).unwrap().stack.len(); let b2_now = vm2.heap.coroutine_data(b2).unwrap().stack.len();
if a2_now > a2_prev { trace2.push(1); } else if b2_now > b2_prev { trace2.push(2); } else { panic!("no progress 2"); } if a2_now > a2_prev {
a2_prev = a2_now; b2_prev = b2_now; trace2.push(1);
} else if b2_now > b2_prev {
trace2.push(2);
} else {
panic!("no progress 2");
}
a2_prev = a2_now;
b2_prev = b2_now;
} }
assert_eq!(trace1, trace2, "Execution trace (coroutine IDs) must match exactly across runs"); assert_eq!(
trace1, trace2,
"Execution trace (coroutine IDs) must match exactly across runs"
);
} }
#[test] #[test]
@ -3867,9 +3888,23 @@ mod tests {
let mut vm = new_test_vm(rom.clone(), vec![]); let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![ vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: off_main as u32, code_len: main.len() as u32, param_slots: 0, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, FunctionMeta {
code_offset: off_main as u32,
code_len: main.len() as u32,
param_slots: 0,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
// Function F takes 1 parameter (the HeapRef) which stays on its stack while sleeping // Function F takes 1 parameter (the HeapRef) which stays on its stack while sleeping
FunctionMeta { code_offset: off_f as u32, code_len: fn_f.len() as u32, param_slots: 1, local_slots: 0, return_slots: 0, max_stack_slots: 8 }, FunctionMeta {
code_offset: off_f as u32,
code_len: fn_f.len() as u32,
param_slots: 1,
local_slots: 0,
return_slots: 0,
max_stack_slots: 8,
},
]); ]);
// Force GC at first safepoint to stress retention // Force GC at first safepoint to stress retention
@ -3887,7 +3922,10 @@ mod tests {
assert!(matches!(rep.reason, LogicalFrameEndingReason::FrameSync)); assert!(matches!(rep.reason, LogicalFrameEndingReason::FrameSync));
// The captured object must remain alive because it is referenced by the sleeping coroutine's stack // The captured object must remain alive because it is referenced by the sleeping coroutine's stack
assert!(vm.heap.is_valid(captured), "captured object must remain alive while coroutine sleeps"); assert!(
vm.heap.is_valid(captured),
"captured object must remain alive while coroutine sleeps"
);
} }
#[test] #[test]
@ -3918,7 +3956,10 @@ mod tests {
assert!(vm.halted); assert!(vm.halted);
assert_eq!(vm.operand_stack.len(), 1); assert_eq!(vm.operand_stack.len(), 1);
let top = vm.peek().unwrap().clone(); let top = vm.peek().unwrap().clone();
let href = match top { Value::HeapRef(h) => h, _ => panic!("Expected HeapRef on stack") }; let href = match top {
Value::HeapRef(h) => h,
_ => panic!("Expected HeapRef on stack"),
};
assert!(vm.heap.is_valid(href)); assert!(vm.heap.is_valid(href));
assert_eq!(vm.heap.closure_fn_id(href), Some(7)); assert_eq!(vm.heap.closure_fn_id(href), Some(7));
let env = vm.heap.closure_env_slice(href).expect("env slice"); let env = vm.heap.closure_env_slice(href).expect("env slice");
@ -3961,7 +4002,10 @@ mod tests {
// After HALT, stack must contain only the closure ref // After HALT, stack must contain only the closure ref
assert_eq!(vm.operand_stack.len(), 1); assert_eq!(vm.operand_stack.len(), 1);
let href = match vm.pop().unwrap() { Value::HeapRef(h) => h, _ => panic!("Expected HeapRef") }; let href = match vm.pop().unwrap() {
Value::HeapRef(h) => h,
_ => panic!("Expected HeapRef"),
};
assert_eq!(vm.heap.closure_fn_id(href), Some(9)); assert_eq!(vm.heap.closure_fn_id(href), Some(9));
let env = vm.heap.closure_env_slice(href).expect("env slice"); let env = vm.heap.closure_env_slice(href).expect("env slice");
assert_eq!(env.len(), 3); assert_eq!(env.len(), 3);
@ -3995,8 +4039,18 @@ mod tests {
let mut vm = new_test_vm(rom.clone(), vec![]); let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![ vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f1_start, code_len: f1_len, param_slots: 1, return_slots: 1, ..Default::default() }, code_offset: f0_start as u32,
code_len: f0_len as u32,
..Default::default()
},
FunctionMeta {
code_offset: f1_start,
code_len: f1_len,
param_slots: 1,
return_slots: 1,
..Default::default()
},
]); ]);
let mut native = MockNative; let mut native = MockNative;
@ -4034,8 +4088,18 @@ mod tests {
let mut vm = new_test_vm(rom.clone(), vec![]); let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![ vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f1_start, code_len: f1_len, param_slots: 1, return_slots: 1, ..Default::default() }, code_offset: f0_start as u32,
code_len: f0_len as u32,
..Default::default()
},
FunctionMeta {
code_offset: f1_start,
code_len: f1_len,
param_slots: 1,
return_slots: 1,
..Default::default()
},
]); ]);
let mut native = MockNative; let mut native = MockNative;
@ -4062,7 +4126,11 @@ mod tests {
let f0_len = rom.len() - f0_start; let f0_len = rom.len() - f0_start;
let mut vm = new_test_vm(rom.clone(), vec![]); let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() }]); vm.program.functions = std::sync::Arc::from(vec![FunctionMeta {
code_offset: f0_start as u32,
code_len: f0_len as u32,
..Default::default()
}]);
let mut native = MockNative; let mut native = MockNative;
let mut ctx = HostContext::new(None); let mut ctx = HostContext::new(None);
@ -4073,7 +4141,9 @@ mod tests {
assert_eq!(info.code, TRAP_TYPE); assert_eq!(info.code, TRAP_TYPE);
assert_eq!(info.opcode, OpCode::CallClosure as u16); assert_eq!(info.opcode, OpCode::CallClosure as u16);
} }
other => panic!("Expected Trap(TYPE) from CALL_CLOSURE on non-closure, got {:?}", other), other => {
panic!("Expected Trap(TYPE) from CALL_CLOSURE on non-closure, got {:?}", other)
}
} }
} }
@ -4114,9 +4184,25 @@ mod tests {
let mut vm = new_test_vm(rom.clone(), vec![]); let mut vm = new_test_vm(rom.clone(), vec![]);
vm.program.functions = std::sync::Arc::from(vec![ vm.program.functions = std::sync::Arc::from(vec![
FunctionMeta { code_offset: f0_start as u32, code_len: f0_len as u32, ..Default::default() }, FunctionMeta {
FunctionMeta { code_offset: f1_start, code_len: f1_len, param_slots: 1, return_slots: 1, ..Default::default() }, code_offset: f0_start as u32,
FunctionMeta { code_offset: f2_start, code_len: f2_len, param_slots: 1, return_slots: 1, ..Default::default() }, code_len: f0_len as u32,
..Default::default()
},
FunctionMeta {
code_offset: f1_start,
code_len: f1_len,
param_slots: 1,
return_slots: 1,
..Default::default()
},
FunctionMeta {
code_offset: f2_start,
code_len: f2_len,
param_slots: 1,
return_slots: 1,
..Default::default()
},
]); ]);
let mut native = MockNative; let mut native = MockNative;

View File

@ -1,10 +1,12 @@
//! Deterministic tests for multi-return syscalls with the slot-based ABI. //! Deterministic tests for multi-return syscalls with the slot-based ABI.
use prometeu_bytecode::isa::core::CoreOpCode as OpCode;
use prometeu_bytecode::Value; use prometeu_bytecode::Value;
use prometeu_vm::{HostContext, HostReturn, NativeInterface, VirtualMachine}; use prometeu_bytecode::isa::core::CoreOpCode as OpCode;
use prometeu_hal::vm_fault::VmFault; use prometeu_hal::vm_fault::VmFault;
use prometeu_vm::{HostContext, HostReturn, NativeInterface, VirtualMachine};
fn enc_op(op: OpCode) -> [u8; 2] { (op as u16).to_le_bytes() } fn enc_op(op: OpCode) -> [u8; 2] {
(op as u16).to_le_bytes()
}
#[test] #[test]
fn vm_syscall_multi_return_stack_contents() { fn vm_syscall_multi_return_stack_contents() {
@ -29,7 +31,7 @@ fn vm_syscall_multi_return_stack_contents() {
ret.push_int(11); ret.push_int(11);
ret.push_int(22); ret.push_int(22);
ret.push_bool(true); ret.push_bool(true);
ret.push_bounded(7)?; ret.push_int(7);
Ok(()) Ok(())
} }
} }
@ -45,10 +47,8 @@ fn vm_syscall_multi_return_stack_contents() {
// Verify top-of-stack order: last pushed is on top // Verify top-of-stack order: last pushed is on top
let top = vm.operand_stack_top(4); let top = vm.operand_stack_top(4);
assert_eq!(top, vec![ assert_eq!(
Value::Bounded(7), top,
Value::Boolean(true), vec![Value::Int64(7), Value::Boolean(true), Value::Int64(22), Value::Int64(11),]
Value::Int64(22), );
Value::Int64(11),
]);
} }

View File

@ -29,7 +29,7 @@ This document defines the minimal, stable Core ISA surface for the Prometeu Virt
- Stack manipulation: - Stack manipulation:
- `PUSH_CONST u32` — load constant by index → _pushes `[value]`. - `PUSH_CONST u32` — load constant by index → _pushes `[value]`.
- `PUSH_I64 i64`, `PUSH_F64 f64`, `PUSH_BOOL u8`, `PUSH_I32 i32`, `PUSH_BOUNDED u32(<=0xFFFF)` — push literals. - `PUSH_I64 i64`, `PUSH_F64 f64`, `PUSH_BOOL u8`, `PUSH_I32 i32` — push literals.
- `POP` — pops 1. - `POP` — pops 1.
- `POP_N u32` — pops N. - `POP_N u32` — pops N.
- `DUP``[x] -> [x, x]`. - `DUP``[x] -> [x, x]`.
@ -38,8 +38,6 @@ This document defines the minimal, stable Core ISA surface for the Prometeu Virt
- Arithmetic: - Arithmetic:
- `ADD`, `SUB`, `MUL`, `DIV`, `MOD` — binary numeric ops. - `ADD`, `SUB`, `MUL`, `DIV`, `MOD` — binary numeric ops.
- `NEG` — unary numeric negation. - `NEG` — unary numeric negation.
- `BOUND_TO_INT``[bounded] -> [int64]`.
- `INT_TO_BOUND_CHECKED``[int] -> [bounded]` (traps on overflow 0..65535).
- Comparison and logic: - Comparison and logic:
- `EQ`, `NEQ`, `LT`, `LTE`, `GT`, `GTE` — comparisons → `[bool]`. - `EQ`, `NEQ`, `LT`, `LTE`, `GT`, `GTE` — comparisons → `[bool]`.