update some docs and project clean up (only runtime

This commit is contained in:
bQUARKz 2026-02-18 01:00:58 +00:00
parent 4f73f46ba9
commit 90ecd77031
Signed by: bquarkz
SSH Key Fingerprint: SHA256:Z7dgqoglWwoK6j6u4QC87OveEq74WOhFN+gitsxtkf8
94 changed files with 1161 additions and 11746 deletions

769
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +1,6 @@
[workspace] [workspace]
members = [ members = [
"crates/compiler/languages/prometeu-languages-registry",
"crates/compiler/languages/prometeu-language-pbs",
"crates/compiler/prometeu-build-pipeline",
"crates/compiler/prometeu-bytecode", "crates/compiler/prometeu-bytecode",
"crates/compiler/prometeu-core",
"crates/compiler/prometeu-deps",
"crates/compiler/prometeu-language-api",
"crates/compiler/prometeu-lowering",
"crates/console/prometeu-drivers", "crates/console/prometeu-drivers",
"crates/console/prometeu-firmware", "crates/console/prometeu-firmware",
@ -19,7 +11,6 @@ members = [
"crates/host/prometeu-host-desktop-winit", "crates/host/prometeu-host-desktop-winit",
"crates/tools/prometeu-cli", "crates/tools/prometeu-cli",
"crates/tools/prometeu-lsp",
] ]
resolver = "2" resolver = "2"

View File

@ -1,9 +0,0 @@
[package]
name = "prometeu-language-pbs"
version = "0.1.0"
edition = "2021"
license = "MIT"
description = ""
[dependencies]
prometeu-language-api = { path = "../../prometeu-language-api" }

View File

@ -1,16 +0,0 @@
use std::sync::OnceLock;
use prometeu_language_api::{LanguageSpec, SourcePolicy};
pub static LANGUAGE_SPEC: OnceLock<LanguageSpec> = OnceLock::new();
fn registry() -> &'static LanguageSpec {
LANGUAGE_SPEC.get_or_init(|| {
LanguageSpec {
id: "pbs",
source_policy: SourcePolicy {
extensions: vec!["pbs"],
case_sensitive: true,
},
}
})
}

View File

@ -1,3 +0,0 @@
mod language_spec;
pub use language_spec::LANGUAGE_SPEC;

View File

@ -1,11 +0,0 @@
[package]
name = "prometeu-languages-registry"
version = "0.1.0"
edition = "2021"
license = "MIT"
description = ""
[dependencies]
prometeu-language-api = { path = "../../prometeu-language-api" }
prometeu-language-pbs = { path = "../prometeu-language-pbs" }

View File

@ -1,20 +0,0 @@
use prometeu_language_api::LanguageSpec;
use std::collections::HashMap;
use std::sync::OnceLock;
use prometeu_language_pbs::LANGUAGE_SPEC as PBS_LANGUAGE_SPEC;
static REGISTRY: OnceLock<HashMap<&'static str, LanguageSpec>> = OnceLock::new();
fn registry() -> &'static HashMap<&'static str, LanguageSpec> {
let pbs = PBS_LANGUAGE_SPEC.get().unwrap();
REGISTRY.get_or_init(|| {
HashMap::from([
(pbs.id, pbs.clone()),
])
})
}
pub fn get_language_spec(id: &str) -> Option<&LanguageSpec> {
registry().get(id)
}

View File

@ -1,3 +0,0 @@
mod language_spec_registry;
pub use language_spec_registry::get_language_spec;

View File

@ -1,24 +0,0 @@
[package]
name = "prometeu-build-pipeline"
version = "0.1.0"
edition = "2021"
license.workspace = true
repository.workspace = true
[[bin]]
name = "prometeu-build-pipeline"
path = "src/main.rs"
[package.metadata.dist]
dist = true
include = ["../../VERSION.txt"]
[dependencies]
prometeu-deps = { path = "../prometeu-deps" }
prometeu-core = { path = "../prometeu-core" }
prometeu-languages-registry = { path = "../languages/prometeu-languages-registry" }
clap = { version = "4.5.54", features = ["derive"] }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
anyhow = "1.0.100"
camino = "1.2.2"

View File

@ -1,158 +0,0 @@
use crate::pipeline::run_phases;
use crate::{BuildMode, PipelineConfig, PipelineInput, PipelineOutput};
use anyhow::{Context, Result};
use clap::{Parser, Subcommand};
use prometeu_deps::{load_sources, resolve_workspace, DepsConfig};
use std::path::{Path, PathBuf};
use camino::Utf8Path;
use crate::emit_artifacts::{emit_artifacts, EmitOptions};
/// Command line interface for the Prometeu Compiler.
#[derive(Parser)]
#[command(name = "prometeu")]
#[command(version, about = "PROMETEU toolchain entrypoint", long_about = None)]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
}
/// Available subcommands for the compiler.
#[derive(Subcommand)]
pub enum Commands {
/// Builds a Prometeu project by compiling source code into an artifact (pbc/program image).
Build {
/// Path to the project root directory.
project_dir: PathBuf,
/// Path to save the compiled artifact.
/// If omitted, deps/pipeline decide a default under target/ or dist/.
#[arg(short, long)]
out: Option<PathBuf>,
/// Whether to generate a .json symbols file for source mapping.
#[arg(long, default_value_t = true)]
emit_symbols: bool,
/// Whether to generate a .disasm file for debugging.
#[arg(long, default_value_t = true)]
emit_disasm: bool,
/// Whether to explain the dependency resolution process.
#[arg(long)]
explain_deps: bool,
/// Build mode (debug/release).
#[arg(long, default_value = "debug")]
mode: String,
},
/// Verifies if a Prometeu project is valid without emitting code.
Verify {
project_dir: PathBuf,
/// Whether to explain the dependency resolution process.
#[arg(long)]
explain_deps: bool,
},
}
pub fn run() -> Result<()> {
let cli = Cli::parse();
match cli.command {
Commands::Build {
project_dir,
out,
emit_disasm,
emit_symbols,
explain_deps,
mode,
} => {
let build_mode = parse_mode(&mode)?;
let cfg = PipelineConfig {
mode: build_mode,
enable_cache: true,
enable_frontends: false,
};
let pipeline_output = run_pipeline(cfg, &project_dir, explain_deps)
.context("pipeline: failed to execute pipeline")?;
for diagnostics in &pipeline_output.diagnostics {
eprintln!("{:?}", diagnostics);
}
let emit_opts = EmitOptions {
out,
emit_symbols,
emit_disasm,
};
emit_artifacts(&emit_opts, &pipeline_output)
.context("emit: failed to write artifacts")?;
if pipeline_output.diagnostics.iter().any(|d| d.severity.is_error()) {
anyhow::bail!("build failed due to errors");
}
}
Commands::Verify {
project_dir,
explain_deps,
} => {
let cfg = PipelineConfig {
mode: BuildMode::Test,
enable_cache: true,
enable_frontends: false,
};
let pipeline_output = run_pipeline(cfg, &project_dir, explain_deps)
.context("pipeline: failed to execute pipeline")?;
for diagnostic in &pipeline_output.diagnostics {
eprintln!("{:?}", diagnostic);
}
if pipeline_output.diagnostics.iter().any(|d| d.severity.is_error()) {
anyhow::bail!("verify failed due to errors");
}
}
}
Ok(())
}
fn run_pipeline(cfg: PipelineConfig, project_dir: &Path, explain_deps: bool) -> Result<PipelineOutput> {
let deps_cfg = DepsConfig {
explain: explain_deps,
cache_dir: Default::default(),
registry_dirs: vec![],
};
let project_dir_path_buf = Utf8Path::from_path(project_dir)
.with_context(|| format!("deps: failed to convert project_dir to Utf8Path: {:?}", project_dir))?;
let resolved = resolve_workspace(&deps_cfg, project_dir_path_buf)
.with_context(|| format!("deps: failed to resolve project at {:?}", project_dir))?;
let sources = load_sources(&deps_cfg, &resolved)
.context("deps: failed to load sources")?;
let input = PipelineInput {
graph: resolved.graph,
stack: resolved.stack,
sources
};
Ok(run_phases(cfg, input))
}
/// Parse `--mode` from CLI.
fn parse_mode(s: &str) -> Result<BuildMode> {
match s.to_ascii_lowercase().as_str() {
"debug" => Ok(BuildMode::Debug),
"release" => Ok(BuildMode::Release),
"test" => Ok(BuildMode::Test),
other => anyhow::bail!("invalid --mode '{}': expected debug|release|test", other),
}
}

View File

@ -1,23 +0,0 @@
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BuildMode {
Debug,
Release,
Test,
}
#[derive(Debug, Clone)]
pub struct PipelineConfig {
pub mode: BuildMode,
pub enable_cache: bool,
pub enable_frontends: bool,
}
impl Default for PipelineConfig {
fn default() -> Self {
Self {
mode: BuildMode::Debug,
enable_cache: true,
enable_frontends: false, // Hard Reset default: pipeline runs with no FE.
}
}
}

View File

@ -1,71 +0,0 @@
use std::any::Any;
use prometeu_core::{Diagnostic, FileDB, FileId, NameInterner, ProjectId};
use prometeu_deps::BuildStack;
/// Per-project arena slot created from the BuildStack order.
/// The pipeline owns this vector and indexes it by stack position.
#[derive(Debug)]
pub struct ProjectCtx {
pub project_id: ProjectId,
/// FileIds inserted into `source_db` for this project.
pub files: Vec<FileId>,
/// Frontend output (TypedHIRBundle or similar) - intentionally opaque.
pub frontend_out: Option<Box<dyn Any>>,
/// Backend output (ProgramImage / BytecodeModule / Artifact).
/// Keep as opaque until you finalize your bytecode/image crate.
pub backend_out: Option<Box<dyn Any>>,
}
impl ProjectCtx {
pub fn new(project_id: ProjectId) -> Self {
Self {
project_id,
files: Vec::new(),
frontend_out: None,
backend_out: None,
}
}
}
#[derive(Debug)]
pub struct PipelineCtx {
pub source_db: FileDB,
pub interner: NameInterner,
pub diagnostics: Vec<Diagnostic>,
pub projects: Vec<ProjectCtx>,
}
impl PipelineCtx {
pub fn new() -> Self {
Self {
source_db: FileDB::new(),
interner: NameInterner::new(),
diagnostics: Vec::new(),
projects: Vec::new(),
}
}
pub fn push_diagnostic(&mut self, d: Diagnostic) {
self.diagnostics.push(d);
}
/// Initialize per-project contexts from the BuildStack order.
pub fn init_projects_from_stack(&mut self, stack: &BuildStack) {
self.projects.clear();
self.projects.reserve(stack.projects.len());
for project_id in &stack.projects {
self.projects.push(ProjectCtx::new(project_id.clone()));
}
}
pub fn project_ctx_mut(&mut self, index_in_stack: usize) -> &mut ProjectCtx {
&mut self.projects[index_in_stack]
}
pub fn project_ctx(&self, index_in_stack: usize) -> &ProjectCtx {
&self.projects[index_in_stack]
}
}

View File

@ -1,17 +0,0 @@
use std::path::PathBuf;
use crate::PipelineOutput;
pub struct EmitOptions {
pub(crate) out: Option<PathBuf>,
pub(crate) emit_symbols: bool,
pub(crate) emit_disasm: bool,
}
pub fn emit_artifacts(_opts: &EmitOptions, _outp: &PipelineOutput) -> anyhow::Result<()> {
// Later:
// - decide output dir (opts.out or default)
// - write .pbc / program image
// - write symbols.json (if exists)
// - write disasm (if exists)
Ok(())
}

View File

@ -1,12 +0,0 @@
pub mod cli;
pub mod config;
pub mod ctx;
pub mod pipeline;
pub mod phases;
mod emit_artifacts;
pub use config::*;
pub use ctx::*;
pub use pipeline::*;
pub use cli::run;

View File

@ -1,7 +0,0 @@
use anyhow::Result;
/// Main entry point for the Prometeu Compiler binary.
/// It delegates execution to the library's `run` function.
fn main() -> Result<()> {
prometeu_build_pipeline::run()
}

View File

@ -1,12 +0,0 @@
use crate::{
config::PipelineConfig,
ctx::PipelineCtx,
pipeline::{PipelineInput},
};
pub fn run(_cfg: &PipelineConfig, input: &PipelineInput, ctx: &mut PipelineCtx) {
// Arena init: one ProjectCtx per project in stack order.
ctx.init_projects_from_stack(&input.stack);
// NOTE: no filesystem, no FE/BE assumptions here.
}

View File

@ -1,7 +0,0 @@
use crate::{config::PipelineConfig, ctx::PipelineCtx, pipeline::{Artifacts, PipelineInput}};
pub fn run(_cfg: &PipelineConfig, _input: &PipelineInput, _ctx: &mut PipelineCtx) -> Artifacts {
// Hard Reset stub:
// - later: emit build outputs (to FS via deps if you want strict IO centralization).
Artifacts::default()
}

View File

@ -1,11 +0,0 @@
use crate::{config::PipelineConfig, ctx::PipelineCtx, pipeline::PipelineInput};
pub fn run(cfg: &PipelineConfig, _input: &PipelineInput, _ctx: &mut PipelineCtx) {
if !cfg.enable_frontends {
return;
}
// Hard Reset:
// - no FE wired yet.
// - later: iterate projects in stack order and call FE plugin(s).
}

View File

@ -1,117 +0,0 @@
use prometeu_core::{Diagnostic, Severity, Span};
use prometeu_deps::LoadedSources;
use crate::{
config::PipelineConfig,
ctx::PipelineCtx,
pipeline::PipelineInput,
};
pub fn run(_cfg: &PipelineConfig, input: &PipelineInput, ctx: &mut PipelineCtx) {
load_sources(&input.sources, ctx);
for i in 0..ctx.projects.len() {
let is_empty = ctx.projects[i].files.is_empty();
if is_empty {
let project_id = &input.stack.projects[i];
let project_name = input.graph.project(project_id).unwrap().name.clone();
ctx.push_diagnostic(Diagnostic {
severity: Severity::Warning,
code: "PIPELINE_NO_SOURCES".into(),
message: format!(
"Project '{}' has no source files loaded.",
project_name
),
span: Span::none(),
related: vec![],
});
}
}
}
fn load_sources(sources: &LoadedSources, ctx: &mut PipelineCtx) {
let stack_len = ctx.projects.len();
let src_len = sources.per_project.len();
// 1) Diagnostic is sizes don't match
if src_len != stack_len {
ctx.push_diagnostic(Diagnostic {
severity: Severity::Error,
code: "PIPELINE_SOURCES_STACK_LEN_MISMATCH".into(),
message: format!(
"LoadedSources.per_project len ({}) does not match BuildStack len ({}).",
src_len, stack_len
),
span: Span::none(),
related: vec![],
});
}
// 2) Process the bare minimum (don't panic, just keep running with diagnostics)
let n = stack_len.min(src_len);
for i in 0..n {
let expected = ctx.projects[i].project_id;
let got = sources.per_project[i].project_id;
if got != expected {
ctx.push_diagnostic(Diagnostic {
severity: Severity::Error,
code: "PIPELINE_SOURCES_STACK_ORDER_MISMATCH".into(),
message: format!(
"LoadedSources is not aligned with BuildStack at index {}: expected project_id {:?}, got {:?}.",
i, expected, got
),
span: Span::none(),
related: vec![],
});
// there is no fix tolerance here, if it is wrong, it is wrong
// just catch as much diagnostics as possible before "crashing"
continue;
}
for f in &sources.per_project[i].files {
let file_id = ctx.source_db.upsert(&f.uri, &f.text);
ctx.projects[i].files.push(file_id);
}
}
// 3) If any LoadSources remains, it is a deps bug
if src_len > stack_len {
for extra in &sources.per_project[stack_len..] {
ctx.push_diagnostic(Diagnostic {
severity: Severity::Error,
code: "PIPELINE_SOURCES_EXTRA_PROJECT".into(),
message: format!(
"LoadedSources contains extra project_id {:?} not present in BuildStack.",
extra.project_id
),
span: Span::none(),
related: vec![],
});
}
}
// 4) If missing inputs, it is another deps bug...
if stack_len > src_len {
let mut diagnostics: Vec<Diagnostic> = Vec::new();
for missing in &ctx.projects[src_len..] {
diagnostics.push(Diagnostic {
severity: Severity::Error,
code: "PIPELINE_SOURCES_MISSING_PROJECT".into(),
message: format!(
"LoadedSources missing sources for project_id {:?} present in BuildStack.",
missing.project_id
),
span: Span::none(),
related: vec![],
});
}
for diagnostic in diagnostics {
ctx.push_diagnostic(diagnostic);
}
}
}

View File

@ -1,6 +0,0 @@
use crate::{config::PipelineConfig, ctx::PipelineCtx, pipeline::PipelineInput};
pub fn run(_cfg: &PipelineConfig, _input: &PipelineInput, _ctx: &mut PipelineCtx) {
// Hard Reset stub:
// - later: consume TypedHIRBundle(s) and lower into ProgramImage/BytecodeModule.
}

View File

@ -1,5 +0,0 @@
pub mod boot;
pub mod load_source;
pub mod language;
pub mod lowering;
pub mod emit;

View File

@ -1,59 +0,0 @@
use crate::{config::PipelineConfig, ctx::PipelineCtx, phases};
use prometeu_core::Diagnostic;
use prometeu_deps::{BuildStack, LoadedSources, ResolvedGraph};
#[derive(Debug, Clone)]
pub struct PipelineInput {
pub graph: ResolvedGraph,
pub stack: BuildStack,
pub sources: LoadedSources
}
#[derive(Debug, Default, Clone)]
pub struct PipelineStats {
pub projects_count: usize,
pub files_count: usize,
}
#[derive(Debug, Default, Clone)]
pub struct Artifacts {
// placeholder: later include produced ProgramImage(s), debug bundles, logs, etc.
}
#[derive(Debug, Default)]
pub struct PipelineOutput {
pub diagnostics: Vec<Diagnostic>,
pub artifacts: Artifacts,
pub stats: PipelineStats,
}
pub(crate) fn run_phases(cfg: PipelineConfig, input: PipelineInput) -> PipelineOutput {
let mut ctx = PipelineCtx::new();
// Boot: create project slots in arena order.
phases::boot::run(&cfg, &input, &mut ctx);
// Load source: populate FileDB from LoadedSources.
phases::load_source::run(&cfg, &input, &mut ctx);
// Frontend phase (stub / optional).
phases::language::run(&cfg, &input, &mut ctx);
// Backend phase (stub).
phases::lowering::run(&cfg, &input, &mut ctx);
// Emit phase (stub).
let artifacts = phases::emit::run(&cfg, &input, &mut ctx);
// Stats (basic).
let mut stats = PipelineStats::default();
stats.projects_count = ctx.projects.len();
stats.files_count = ctx.projects.iter().map(|p| p.files.len()).sum();
PipelineOutput {
diagnostics: ctx.diagnostics,
artifacts,
stats,
}
}

View File

@ -1,18 +1,3 @@
//! # Prometeu Bytecode (PBC)
//!
//! This crate defines the core Application Binary Interface (ABI) and Instruction Set Architecture (ISA)
//! for the Prometeu Virtual Machine (PVM).
//!
//! It serves as the "source of truth" for how programs are structured, encoded, and executed
//! within the ecosystem.
//!
//! ## Core Components:
//! - [`opcode`]: Defines the available instructions and their performance characteristics.
//! - [`abi`]: Specifies the binary rules for operands and stack behavior.
//! - [`asm`]: Provides a programmatic Assembler to convert high-level instructions to bytes.
//! - [`disasm`]: Provides a Disassembler to inspect compiled bytecode.
//! - [`readwrite`]: Internal utilities for Little-Endian binary I/O.
pub mod opcode; pub mod opcode;
pub mod opcode_spec; pub mod opcode_spec;
pub mod abi; pub mod abi;

View File

@ -1,10 +0,0 @@
[package]
name = "prometeu-core"
version = "0.1.0"
edition = "2024"
license.workspace = true
[dependencies]
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
prometeu-bytecode = { path = "../prometeu-bytecode" }

View File

@ -1,3 +0,0 @@
mod source;
pub use source::*;

View File

@ -1,81 +0,0 @@
use serde::{Serialize, Serializer};
use crate::Span;
#[derive(Debug, Clone, PartialEq)]
pub enum Severity {
Error,
Warning,
}
impl Severity {
pub fn is_error(&self) -> bool {
match self {
Severity::Error => true,
Severity::Warning => false,
}
}
}
impl Serialize for Severity {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match self {
Severity::Error => serializer.serialize_str("error"),
Severity::Warning => serializer.serialize_str("warning"),
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct Diagnostic {
pub severity: Severity,
pub code: String,
pub message: String,
pub span: Span,
pub related: Vec<(String, Span)>,
}
#[derive(Debug, Clone, Serialize)]
pub struct DiagnosticBundle {
pub diagnostics: Vec<Diagnostic>,
}
impl DiagnosticBundle {
pub fn new() -> Self {
Self {
diagnostics: Vec::new(),
}
}
pub fn push(&mut self, diagnostic: Diagnostic) {
self.diagnostics.push(diagnostic);
}
pub fn error(code: &str, message: String, span: Span) -> Self {
let mut bundle = Self::new();
bundle.push(Diagnostic {
severity: Severity::Error,
code: code.to_string(),
message,
span,
related: Vec::new(),
});
bundle
}
pub fn has_errors(&self) -> bool {
self.diagnostics
.iter()
.any(|d| matches!(d.severity, Severity::Error))
}
}
impl From<Diagnostic> for DiagnosticBundle {
fn from(diagnostic: Diagnostic) -> Self {
let mut bundle = Self::new();
bundle.push(diagnostic);
bundle
}
}

View File

@ -1,69 +0,0 @@
use std::collections::HashMap;
use crate::FileId;
use crate::LineIndex;
#[derive(Default, Debug)]
pub struct FileDB {
files: Vec<FileData>,
uri_to_id: HashMap<String, FileId>,
}
#[derive(Debug)]
struct FileData {
uri: String,
text: String,
line_index: LineIndex,
}
impl FileDB {
pub fn new() -> Self {
Self {
files: Vec::new(),
uri_to_id: HashMap::new(),
}
}
pub fn upsert(&mut self, uri: &str, text: &str) -> FileId {
if let Some(&id) = self.uri_to_id.get(uri) {
let line_index = LineIndex::new(&text);
self.files[id.0 as usize] = FileData {
uri: uri.to_owned(),
text: text.to_owned(),
line_index,
};
id
} else {
let id = FileId(self.files.len() as u32);
let line_index = LineIndex::new(&text);
self.files.push(FileData {
uri: uri.to_owned(),
text: text.to_owned(),
line_index,
});
self.uri_to_id.insert(uri.to_string(), id);
id
}
}
pub fn file_id(&self, uri: &str) -> Option<FileId> {
self.uri_to_id.get(uri).copied()
}
pub fn uri(&self, id: FileId) -> &str {
&self.files[id.0 as usize].uri
}
pub fn text(&self, id: FileId) -> &str {
&self.files[id.0 as usize].text
}
pub fn line_index(&self, id: FileId) -> &LineIndex {
&self.files[id.0 as usize].line_index
}
/// Returns a list of all known file IDs in insertion order.
pub fn all_files(&self) -> Vec<FileId> {
(0..self.files.len()).map(|i| FileId(i as u32)).collect()
}
}

View File

@ -1,60 +0,0 @@
macro_rules! define_id {
($name:ident) => {
#[repr(transparent)]
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, serde::Serialize, serde::Deserialize)]
pub struct $name(pub u32);
impl $name {
pub const NONE: $name = $name(u32::MAX);
#[inline]
pub const fn as_u32(self) -> u32 { self.0 }
#[inline]
pub fn is_none(self) -> bool {
self == $name::NONE
}
}
impl From<u32> for $name {
#[inline]
fn from(value: u32) -> Self { Self(value) }
}
impl From<$name> for u32 {
#[inline]
fn from(value: $name) -> Self { value.0 }
}
};
}
define_id!(FileId);
define_id!(NodeId);
define_id!(NameId);
define_id!(SymbolId);
define_id!(TypeId);
define_id!(ModuleId);
define_id!(ProjectId);
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use std::mem::size_of;
#[test]
fn ids_are_repr_transparent_and_hashable() {
assert_eq!(size_of::<FileId>(), 4);
assert_eq!(size_of::<NodeId>(), 4);
assert_eq!(size_of::<NameId>(), 4);
assert_eq!(size_of::<SymbolId>(), 4);
assert_eq!(size_of::<TypeId>(), 4);
assert_eq!(size_of::<ModuleId>(), 4);
assert_eq!(size_of::<ProjectId>(), 4);
// Hash/Eq usage
let mut m: HashMap<SymbolId, &str> = HashMap::new();
m.insert(SymbolId(1), "one");
assert_eq!(m.get(&SymbolId(1)).copied(), Some("one"));
}
}

View File

@ -1,41 +0,0 @@
#[derive(Debug)]
pub struct LineIndex {
line_starts: Vec<u32>,
total_len: u32,
}
impl LineIndex {
pub fn new(text: &str) -> Self {
let mut line_starts = vec![0];
for (offset, c) in text.char_indices() {
if c == '\n' {
line_starts.push((offset + 1) as u32);
}
}
Self {
line_starts,
total_len: text.len() as u32,
}
}
pub fn offset_to_line_col(&self, offset: u32) -> (u32, u32) {
let line = match self.line_starts.binary_search(&offset) {
Ok(line) => line as u32,
Err(line) => (line - 1) as u32,
};
let col = offset - self.line_starts[line as usize];
(line, col)
}
pub fn line_col_to_offset(&self, line: u32, col: u32) -> Option<u32> {
let start = *self.line_starts.get(line as usize)?;
let offset = start + col;
let next_start = self.line_starts.get(line as usize + 1).copied().unwrap_or(self.total_len);
if offset < next_start || (offset == next_start && offset == self.total_len) {
Some(offset)
} else {
None
}
}
}

View File

@ -1,13 +0,0 @@
mod ids;
mod span;
mod file_db;
mod name_interner;
mod diagnostics;
mod line_index;
pub use ids::*;
pub use span::Span;
pub use file_db::FileDB;
pub use line_index::LineIndex;
pub use name_interner::NameInterner;
pub use diagnostics::*;

View File

@ -1,56 +0,0 @@
use std::collections::HashMap;
use crate::NameId;
#[derive(Debug, Default, Clone)]
pub struct NameInterner {
names: Vec<String>,
ids: HashMap<String, NameId>,
}
impl NameInterner {
pub fn new() -> Self {
Self {
names: Vec::new(),
ids: HashMap::new(),
}
}
pub fn intern(&mut self, s: &str) -> NameId {
if let Some(id) = self.ids.get(s) {
return *id;
}
let id = NameId(self.names.len() as u32);
self.names.push(s.to_string());
self.ids.insert(self.names[id.0 as usize].clone(), id);
id
}
pub fn get(&self, s: &str) -> Option<NameId> {
self.ids.get(s).copied()
}
pub fn resolve(&self, id: NameId) -> &str {
&self.names[id.0 as usize]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn interner_intern_resolve_roundtrip() {
let mut interner = NameInterner::new();
let id = interner.intern("foo");
assert_eq!(interner.resolve(id), "foo");
}
#[test]
fn interner_dedups_strings() {
let mut interner = NameInterner::new();
let id1 = interner.intern("bar");
let id2 = interner.intern("bar");
assert_eq!(id1, id2);
}
}

View File

@ -1,39 +0,0 @@
use crate::FileId;
#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct Span {
pub file: FileId,
pub start: u32, // byte offset
pub end: u32, // byte offset, exclusive
}
impl Span {
#[inline]
pub fn new(file: FileId, start: u32, end: u32) -> Self {
Self { file, start, end }
}
#[inline]
pub fn none() -> Self {
Self {
file: FileId::NONE,
start: 0,
end: 0,
}
}
#[inline]
pub fn is_none(&self) -> bool {
self.file.is_none()
}
#[inline]
pub fn len(&self) -> u32 {
self.end.saturating_sub(self.start)
}
#[inline]
pub fn contains(&self, byte: u32) -> bool {
self.start <= byte && byte < self.end
}
}

View File

@ -1,69 +0,0 @@
use prometeu_core::{FileDB, LineIndex};
#[test]
fn test_line_index_roundtrip() {
let text = "line 1\nline 2\nline 3";
let index = LineIndex::new(text);
// Roundtrip for each character
for (offset, _) in text.char_indices() {
let (line, col) = index.offset_to_line_col(offset as u32);
let recovered_offset = index.line_col_to_offset(line, col).expect("Should recover offset");
assert_eq!(offset as u32, recovered_offset, "Offset mismatch at line {}, col {}", line, col);
}
}
#[test]
fn test_line_index_boundaries() {
let text = "a\nbc\n";
let index = LineIndex::new(text);
// "a" -> (0, 0)
assert_eq!(index.offset_to_line_col(0), (0, 0));
assert_eq!(index.line_col_to_offset(0, 0), Some(0));
// "\n" -> (0, 1)
assert_eq!(index.offset_to_line_col(1), (0, 1));
assert_eq!(index.line_col_to_offset(0, 1), Some(1));
// "b" -> (1, 0)
assert_eq!(index.offset_to_line_col(2), (1, 0));
assert_eq!(index.line_col_to_offset(1, 0), Some(2));
// "c" -> (1, 1)
assert_eq!(index.offset_to_line_col(3), (1, 1));
assert_eq!(index.line_col_to_offset(1, 1), Some(3));
// "\n" (second) -> (1, 2)
assert_eq!(index.offset_to_line_col(4), (1, 2));
assert_eq!(index.line_col_to_offset(1, 2), Some(4));
// EOF (after last \n) -> (2, 0)
assert_eq!(index.offset_to_line_col(5), (2, 0));
assert_eq!(index.line_col_to_offset(2, 0), Some(5));
// Out of bounds
assert_eq!(index.line_col_to_offset(2, 1), None);
assert_eq!(index.line_col_to_offset(3, 0), None);
}
#[test]
fn test_file_db_upsert_and_access() {
let mut db = FileDB::new();
let uri = "file:///test.txt";
let text = "hello\nworld".to_string();
let id = db.upsert(uri, text.clone());
assert_eq!(db.file_id(uri), Some(id));
assert_eq!(db.uri(id), uri);
assert_eq!(db.text(id), &text);
let index = db.line_index(id);
assert_eq!(index.offset_to_line_col(6), (1, 0)); // 'w' is at offset 6
// Update existing file
let new_text = "new content".to_string();
let same_id = db.upsert(uri, new_text.clone());
assert_eq!(id, same_id);
assert_eq!(db.text(id), &new_text);
}

View File

@ -1,14 +0,0 @@
use prometeu_core::{FileId, Span};
#[test]
fn span_end_is_exclusive() {
let file = FileId(1);
let s = Span::new(file, 2, 5);
// len = end - start
assert_eq!(s.len(), 3);
// contains is [start, end)
assert!(s.contains(2));
assert!(s.contains(3));
assert!(s.contains(4));
assert!(!s.contains(5));
}

View File

@ -1,19 +0,0 @@
[package]
name = "prometeu-deps"
version = "0.1.0"
edition = "2021"
license = "MIT"
description = ""
[dependencies]
serde = { version = "1.0.228", features = ["derive"] }
prometeu-core = { path = "../prometeu-core" }
prometeu-language-api = { path = "../prometeu-language-api" }
prometeu-languages-registry = { path = "../languages/prometeu-languages-registry" }
anyhow = "1.0.101"
camino = "1.2.2"
walkdir = "2.5.0"
serde_json = "1.0.149"
[features]
default = []

View File

@ -1,19 +0,0 @@
mod model;
mod load_sources;
mod workspace;
pub use workspace::resolve_workspace;
pub use load_sources::load_sources;
pub use model::manifest::*;
pub use model::resolved_project::ResolvedWorkspace;
pub use model::deps_config::DepsConfig;
pub use model::project_descriptor::ProjectDescriptor;
pub use model::build_stack::BuildStack;
pub use model::resolved_graph::ResolvedGraph;
pub use model::loaded_sources::LoadedSources;
pub use model::project_sources::ProjectSources;
pub use model::loaded_file::LoadedFile;
pub use model::cache_blobs::CacheBlobs;
pub use model::cache_plan::CachePlan;

View File

@ -1,97 +0,0 @@
use anyhow::{Context, Result};
use camino::Utf8PathBuf;
use walkdir::WalkDir;
use crate::{
DepsConfig,
LoadedFile,
LoadedSources,
ProjectSources,
ResolvedWorkspace,
};
pub fn load_sources(cfg: &DepsConfig, resolved: &ResolvedWorkspace) -> Result<LoadedSources> {
let mut per_project = Vec::with_capacity(resolved.stack.projects.len());
for project_id in &resolved.stack.projects {
let project = resolved
.graph
.project(project_id)
.with_context(|| format!("deps: unknown project_id {:?} in build stack", project_id))?;
if cfg.explain {
eprintln!(
"[deps] load_sources: project {}@{} ({:?})",
project.name, project.version, project.project_dir
);
}
let mut files: Vec<LoadedFile> = Vec::new();
for root in &project.source_roots {
let abs_root = project.project_dir.join(root);
if cfg.explain {
eprintln!("[deps] scanning {:?}", abs_root);
}
if !abs_root.exists() {
anyhow::bail!(
"deps: source root does not exist for project {}@{}: {:?}",
project.name,
project.version,
abs_root
);
}
// Walk recursively.
for entry in WalkDir::new(&abs_root)
.follow_links(false)
.into_iter()
.filter_map(|e| e.ok())
{
let ft = entry.file_type();
if !ft.is_file() {
continue;
}
let path = entry.path();
// TODO: precisamos mexer no prometeu.json para configurar o frontend do projeto
// Filter extensions: start with PBS only.
if path.extension().and_then(|s| s.to_str()) != Some("pbs") {
continue;
}
// Convert to Utf8Path (the best effort) and use a stable "uri".
let path_utf8: Utf8PathBuf = match Utf8PathBuf::from_path_buf(path.to_path_buf()) {
Ok(p) => p,
Err(_) => {
anyhow::bail!("deps: non-utf8 path found while scanning sources: {:?}", path);
}
};
let text = std::fs::read_to_string(&path_utf8)
.with_context(|| format!("deps: failed to read source file {:?}", path_utf8))?;
// TODO: normalize newlines
files.push(LoadedFile {
uri: path_utf8.to_string(),
text,
});
}
}
// Determinism: sort a file list by uri (important for stable builds).
files.sort_by(|a, b| a.uri.cmp(&b.uri));
per_project.push(ProjectSources {
project_id: project_id.clone(),
files,
});
}
Ok(LoadedSources { per_project })
}

View File

@ -1,6 +0,0 @@
use prometeu_core::ProjectId;
#[derive(Debug, Clone)]
pub struct BuildStack {
pub projects: Vec<ProjectId>,
}

View File

@ -1,7 +0,0 @@
/// Cache blobs computed/validated by deps.
/// The pipeline may decide when to store, but deps executes IO and cache validity.
#[derive(Debug, Clone)]
pub struct CacheBlobs {
// placeholder
pub _unused: (),
}

View File

@ -1,4 +0,0 @@
#[derive(Debug, Clone)]
pub struct CachePlan {
}

View File

@ -1,7 +0,0 @@
use camino::Utf8PathBuf;
pub struct DepsConfig {
pub explain: bool,
pub cache_dir: Utf8PathBuf,
pub registry_dirs: Vec<Utf8PathBuf>, // or sources ?
}

View File

@ -1,5 +0,0 @@
#[derive(Debug, Clone)]
pub struct LoadedFile {
pub uri: String,
pub text: String,
}

View File

@ -1,8 +0,0 @@
use crate::model::project_sources::ProjectSources;
/// Sources already loaded by deps (IO happens in deps, not in pipeline).
#[derive(Debug, Clone)]
pub struct LoadedSources {
/// For each project in the stack, a list of files (uri + text).
pub per_project: Vec<ProjectSources>,
}

View File

@ -1,75 +0,0 @@
use camino::Utf8PathBuf;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Manifest {
pub name: String,
pub version: String,
#[serde(default)]
pub source_roots: Vec<String>,
pub language: LanguageDecl,
#[serde(default)]
pub deps: Vec<DepDecl>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LanguageDecl {
pub id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum DepDecl {
Local {
path: String,
},
Git {
git: String,
rev: Option<String>,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PrometeuLock {
pub schema: u32,
#[serde(default)]
pub mappings: Vec<LockMapping>,
}
impl PrometeuLock {
pub fn blank() -> Self {
Self {
schema: 0,
mappings: vec![],
}
}
pub fn lookup_git_local_dir(&self, url: &str, rev: &str) -> Option<&String> {
self.mappings.iter().find_map(|m| match m {
LockMapping::Git {
git, rev: r, local_dir
} if git == url && r == rev => Some(local_dir),
_ => None,
})
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "kind", rename_all = "lowercase")]
pub enum LockMapping {
Git {
git: String,
rev: String,
local_dir: String,
},
Registry {
registry: String,
version: String,
local_dir: String,
},
}

View File

@ -1,11 +0,0 @@
pub mod deps_config;
pub mod project_descriptor;
pub mod build_stack;
pub mod resolved_graph;
pub mod loaded_sources;
pub mod project_sources;
pub mod loaded_file;
pub mod cache_blobs;
pub mod resolved_project;
pub mod cache_plan;
pub mod manifest;

View File

@ -1,14 +0,0 @@
use camino::Utf8PathBuf;
use prometeu_core::ProjectId;
use prometeu_language_api::SourcePolicy;
#[derive(Debug, Clone)]
pub struct ProjectDescriptor {
pub project_id: ProjectId,
pub name: String,
pub version: String,
pub project_dir: Utf8PathBuf,
pub source_roots: Vec<Utf8PathBuf>,
pub language_id: String,
pub source_policy: SourcePolicy,
}

View File

@ -1,8 +0,0 @@
use prometeu_core::ProjectId;
use crate::model::loaded_file::LoadedFile;
#[derive(Debug, Clone)]
pub struct ProjectSources {
pub project_id: ProjectId,
pub files: Vec<LoadedFile>,
}

View File

@ -1,16 +0,0 @@
use prometeu_core::ProjectId;
use crate::ProjectDescriptor;
#[derive(Debug, Clone)]
pub struct ResolvedGraph {
pub root: ProjectId,
pub projects: Vec<ProjectDescriptor>, // arena
// opcional: adjacency list para checks
pub edges: Vec<Vec<ProjectId>>, // edges[from] = vec[to]
}
impl ResolvedGraph {
pub fn project(&self, id: &ProjectId) -> Option<&ProjectDescriptor> {
self.projects.get(id.0 as usize)
}
}

View File

@ -1,9 +0,0 @@
use prometeu_core::ProjectId;
use crate::{BuildStack, ResolvedGraph};
#[derive(Debug, Clone)]
pub struct ResolvedWorkspace {
pub project_id: ProjectId,
pub graph: ResolvedGraph,
pub stack: BuildStack,
}

View File

@ -1,32 +0,0 @@
use anyhow::{Context, Result};
use camino::{Utf8Path, Utf8PathBuf};
use crate::workspace::model::DepRef;
pub trait DepsHost {
fn read_to_string(&self, path: &Utf8Path) -> Result<String>;
// fn ensure_project_local(&self, from_dir: &Utf8Path, dep: &DepRef) -> Result<Utf8PathBuf>;
}
pub struct FsHost;
impl DepsHost for FsHost {
fn read_to_string(&self, path: &Utf8Path) -> Result<String> {
std::fs::read_to_string(path)
.with_context(|| format!("failed to read {:?}", path))
}
// fn ensure_project_local(&self, from_dir: &Utf8Path, dep: &DepRef) -> Result<Utf8PathBuf> {
// match dep {
// DepRef::Local { path } => {
// let joined = from_dir.join(path);
// let canon = joined.canonicalize()
// .with_context(|| format!("deps: dep path does not exist: {:?}", joined))?;
// Utf8PathBuf::from_path_buf(canon)
// .map_err(|p| anyhow::anyhow!("deps: non-utf8 dep dir: {:?}", p))
// }
// _ => unimplemented!(),
// }
// }
}

View File

@ -1,6 +0,0 @@
mod resolve_workspace;
mod host;
mod model;
mod phases;
pub use resolve_workspace::resolve_workspace;

View File

@ -1,31 +0,0 @@
use camino::Utf8PathBuf;
use prometeu_core::ProjectId;
use prometeu_language_api::SourcePolicy;
use crate::Manifest;
#[derive(Debug, Clone)]
pub struct RawProjectNode {
pub dir: Utf8PathBuf,
pub manifest_path: Utf8PathBuf,
pub manifest: Manifest,
}
#[derive(Debug, Clone)]
pub enum DepRef {
Local {
path: Utf8PathBuf
},
}
#[derive(Debug, Clone)]
pub struct ProjectNode {
pub id: ProjectId,
pub dir: Utf8PathBuf,
pub name: String,
pub version: String,
pub source_roots: Vec<Utf8PathBuf>,
pub language_id: String,
pub deps: Vec<DepRef>,
pub source_policy: SourcePolicy,
}

View File

@ -1,131 +0,0 @@
use crate::model::manifest::DepDecl;
use crate::workspace::host::DepsHost;
use crate::workspace::model::RawProjectNode;
use crate::workspace::phases::state::ResolverState;
use crate::Manifest;
use anyhow::{anyhow, bail, Context, Result};
use camino::Utf8PathBuf;
use serde_json;
use std::fs::canonicalize;
/// Phase 1: Discover all projects in the workspace.
///
/// - Reads `prometeu.json` from each pending project directory.
/// - Parses `Manifest`.
/// - Registers the raw node.
/// - Enqueues local-path deps for discovery (v0).
///
/// Does NOT:
/// - assign ProjectId
/// - build edges
/// - validate versions
pub fn discover(
cfg: &crate::DepsConfig,
host: &dyn DepsHost,
state: &mut ResolverState,
) -> Result<()> {
while let Some(canon_dir) = state.pending.pop_front() {
// de-dup by directory
if state.raw_by_dir.contains_key(&canon_dir) {
continue;
}
let manifest_path = canon_dir.join("prometeu.json");
if !manifest_path.exists() || !manifest_path.is_file() {
bail!(
"deps: manifest not found: expected a file {:?} (project dir {:?})",
manifest_path,
canon_dir
);
}
if cfg.explain {
eprintln!("[deps][discover] reading {:?}", manifest_path);
}
let text = host
.read_to_string(&manifest_path)
.with_context(|| format!("deps: failed to read manifest {:?}", manifest_path))?;
let manifest: Manifest = serde_json::from_str(&text)
.with_context(|| format!("deps: invalid manifest JSON {:?}", manifest_path))?;
// Register raw node
let raw_idx = state.raw.len();
state.raw.push(RawProjectNode {
dir: canon_dir.clone(),
manifest_path: manifest_path.clone(),
manifest: manifest.clone(),
});
state.raw_by_dir.insert(canon_dir.clone(), raw_idx);
for dep in &manifest.deps {
match dep {
DepDecl::Local { path } => {
let dep_dir = canon_dir.join(path);
let dep_dir_std = dep_dir.canonicalize().with_context(|| {
format!(
"deps: dep path does not exist: {:?} (from {:?})",
dep_dir, canon_dir
)
})?;
let dep_dir_canon = Utf8PathBuf::from_path_buf(dep_dir_std)
.map_err(|p| anyhow!("deps: non-utf8 dep dir: {:?}", p))?;
if cfg.explain {
eprintln!("[deps][discover] local dep '{}' -> {:?}", path, dep_dir_canon);
}
state.pending.push_back(dep_dir_canon);
}
DepDecl::Git { git, rev } => {
let Some(rev) = rev.as_deref() else {
bail!(
"deps: git dependency '{}' requires an explicit 'rev' (commit hash) for now",
git
);
};
let Some(local_dir) = state.lock.lookup_git_local_dir(git, rev) else {
bail!(
"deps: git dependency requires prometeu.lock mapping, but entry not found: git='{}' rev='{}'",
git,
rev
);
};
// canonicalize the lock-provided local dir to keep identity stable
let local_dir_std = canonicalize(local_dir)
.with_context(|| format!("deps: prometeu.lock local_dir does not exist: {:?}", local_dir))?;
let local_dir_canon = Utf8PathBuf::from_path_buf(local_dir_std)
.map_err(|p| anyhow!("deps: non-utf8 lock local_dir: {:?}", p))?;
// validate manifest exists at the mapped project root
// (this check should not belong here, but it is ok)
let mapped_manifest = local_dir_canon.join("prometeu.json");
if !mapped_manifest.exists() || !mapped_manifest.is_file() {
bail!(
"deps: prometeu.lock maps git dep to {:?}, but manifest is missing: {:?}",
local_dir_canon,
mapped_manifest
);
}
if cfg.explain {
eprintln!(
"[deps][discover] git dep '{}' rev '{}' -> {:?}",
git, rev, local_dir_canon
);
}
state.pending.push_back(local_dir_canon);
}
}
}
}
Ok(())
}

View File

@ -1,62 +0,0 @@
use anyhow::{Context, Result};
use prometeu_core::ProjectId;
use crate::workspace::model::DepRef;
use crate::workspace::phases::state::ResolverState;
/// Phase 3: Localize dependencies and build graph edges.
///
/// For each project node:
/// - For each DepRef:
/// - host.ensure_project_local(from_dir, dep) -> dep_dir (local on disk)
/// - map dep_dir to ProjectId via st.by_dir
/// - st.edges[from].push(dep_id)
///
/// v0 policy:
/// - Only DepRef::LocalPath is supported.
/// - Git/Registry cause a hard error (future extension point).
pub fn localize(cfg: &crate::DepsConfig, state: &mut ResolverState) -> Result<()> {
// Reset edges (allows re-run / deterministic behavior)
for e in &mut state.edges {
e.clear();
}
for from_idx in 0..state.nodes.len() {
let from_id: ProjectId = state.nodes[from_idx].id;
let from_dir = state.nodes[from_idx].dir.clone();
if cfg.explain {
eprintln!(
"[deps][localize] from id={:?} dir={:?}",
from_id, from_dir
);
}
// Clone deps to avoid borrow conflicts (simple + safe for now)
let deps = state.nodes[from_idx].deps.clone();
for dep in deps {
match &dep {
DepRef::Local {
path
} => {
let dep_id = state.by_dir.get(path).copied().with_context(|| {
format!(
"deps: localized dep dir {:?} was not discovered; \
ensure the dep has a prometeu.json and is reachable via local paths",
path
)
})?;
state.edges[from_id.0 as usize].push(dep_id);
}
}
}
// Optional: keep edges deterministic
state.edges[from_id.0 as usize].sort_by_key(|id| id.0);
state.edges[from_id.0 as usize].dedup();
}
Ok(())
}

View File

@ -1,144 +0,0 @@
use crate::model::manifest::DepDecl;
use crate::workspace::model::{DepRef, ProjectNode};
use crate::workspace::phases::state::ResolverState;
use anyhow::{anyhow, bail, Context, Result};
use camino::Utf8PathBuf;
use prometeu_core::ProjectId;
use prometeu_languages_registry::get_language_spec;
use std::fs::canonicalize;
/// Phase 2: Materialize projects (allocate ProjectId / arena nodes).
///
/// Inputs:
/// - st.raw (RawProjectNode: dir + manifest)
///
/// Outputs:
/// - st.nodes (ProjectNode arena)
/// - st.by_dir (dir -> ProjectId)
/// - st.edges (allocated adjacency lists, empty for now)
/// - st.root (ProjectId for root_dir)
///
/// Does NOT:
/// - resolve deps to local dirs (that's phase localize)
/// - validate version conflicts/cycles
/// - resolve language/source policy
pub fn materialize(cfg: &crate::DepsConfig, state: &mut ResolverState) -> Result<()> {
// Reset materialized state (allows rerun in future refactors/tests)
state.nodes.clear();
state.by_dir.clear();
state.edges.clear();
state.root = None;
state.nodes.reserve(state.raw.len());
state.edges.reserve(state.raw.len());
for (idx, raw) in state.raw.iter().enumerate() {
let id = ProjectId(idx as u32);
// Default source roots if omitted
let source_roots: Vec<Utf8PathBuf> = raw
.manifest
.source_roots
.iter()
.map(|root| Utf8PathBuf::from(root))
.collect();
if source_roots.is_empty() {
bail!(
"deps: no source roots specified for project {}",
raw.manifest.name
)
}
// Convert DepDecl -> DepRef (no localization yet)
let mut deps: Vec<DepRef> = Vec::with_capacity(raw.manifest.deps.len());
for d in &raw.manifest.deps {
match d {
DepDecl::Local { path } => {
let joined = raw.dir.join(path);
let dir_std = joined.canonicalize()
.with_context(|| format!("deps: local dep path does not exist: {:?} (from {:?})", joined, raw.dir))?;
let dir_canon = Utf8PathBuf::from_path_buf(dir_std)
.map_err(|p| anyhow!("deps: non-utf8 dep dir: {:?}", p))?;
deps.push(DepRef::Local {
path: dir_canon
});
}
DepDecl::Git { git, rev } => {
let Some(rev) = rev.as_deref() else {
bail!(
"deps: git dependency '{}' requires an explicit 'rev' (commit hash) for now",
git
);
};
let Some(local_dir) = state.lock.lookup_git_local_dir(git, rev) else {
bail!(
"deps: git dependency requires prometeu.lock mapping, but entry not found: git='{}' rev='{}'",
git,
rev
);
};
// canonicalize the lock-provided local dir to keep identity stable
let path = canonicalize(local_dir).with_context(|| {
format!(
"deps: prometeu.lock local_dir does not exist: {:?}",
local_dir
)
})?;
let local_dir_canon = Utf8PathBuf::from_path_buf(path)
.map_err(|p| anyhow!("deps: non-utf8 lock local_dir: {:?}", p))?;
deps.push(DepRef::Local {
path: local_dir_canon,
});
}
}
}
if cfg.explain {
eprintln!(
"[deps][materialize] id={:?} {}@{} dir={:?} language={}",
id, raw.manifest.name, raw.manifest.version, raw.dir, raw.manifest.language.id
);
}
let source_policy = get_language_spec(raw.manifest.language.id.as_str())
.map(|spec| spec.source_policy.clone())
.ok_or(anyhow!(
"deps: unknown language spec: {}",
raw.manifest.language.id
))?;
// Record node
state.nodes.push(ProjectNode {
id,
dir: raw.dir.clone(),
name: raw.manifest.name.clone(),
version: raw.manifest.version.clone(),
source_roots,
language_id: raw.manifest.language.id.clone(),
deps,
source_policy,
});
state.by_dir.insert(raw.dir.clone(), id);
state.edges.push(Vec::new());
}
// Determine root id
if let Some(root_id) = state.by_dir.get(&state.root_dir).copied() {
state.root = Some(root_id);
} else {
// This should never happen if seed/discover worked.
// Keep it as a hard failure (in a later validate phase you can convert to a nicer diagnostic).
anyhow::bail!(
"deps: root project dir {:?} was not discovered/materialized",
state.root_dir
);
}
Ok(())
}

View File

@ -1,10 +0,0 @@
mod run_all;
mod state;
mod discover;
mod materialize;
mod localize;
mod validate;
mod policy;
mod stack;
pub use run_all::run_all;

View File

@ -1,17 +0,0 @@
use anyhow::{bail, Result};
use crate::workspace::phases::state::ResolverState;
pub fn policy(_cfg: &crate::DepsConfig, state: &mut ResolverState) -> Result<()> {
for node in &state.nodes {
if node.source_policy.extensions.is_empty() {
bail!(
"deps: project {}@{} has empty source_policy.extensions (language={})",
node.name,
node.version,
node.language_id
);
}
}
Ok(())
}

View File

@ -1,50 +0,0 @@
use anyhow::{Context, Result};
use camino::Utf8Path;
use crate::{BuildStack, DepsConfig, ProjectDescriptor, ResolvedGraph, ResolvedWorkspace};
use crate::workspace::host::FsHost;
use crate::workspace::phases::{discover, localize, materialize, policy, stack, state, validate};
pub fn run_all(cfg: &DepsConfig, fs_host: &FsHost, root_dir: &Utf8Path) -> Result<ResolvedWorkspace> {
let mut st = state::seed(cfg, root_dir)?;
discover::discover(cfg, fs_host, &mut st)?;
materialize::materialize(cfg, &mut st)?;
localize::localize(cfg, &mut st)?;
validate::validate(cfg, &st)?;
policy::policy(cfg, &mut st)?;
let build_stack: BuildStack = stack::stack(cfg, &mut st)?;
let root = st
.root
.context("deps: internal error: root ProjectId not set")?;
// Build the arena expected by ResolvedGraph: index == ProjectId.0
// materialize already assigns ProjectId(idx), so st.nodes order is stable.
let mut projects: Vec<ProjectDescriptor> = Vec::with_capacity(st.nodes.len());
for n in &st.nodes {
projects.push(ProjectDescriptor {
project_id: n.id,
name: n.name.clone(),
version: n.version.clone(),
project_dir: n.dir.clone(),
source_roots: n.source_roots.clone(),
language_id: n.language_id.clone(),
source_policy: n.source_policy.clone(),
});
}
let graph = ResolvedGraph {
root,
projects,
edges: st.edges,
};
Ok(ResolvedWorkspace {
project_id: root,
graph,
stack: build_stack,
})
}

View File

@ -1,97 +0,0 @@
use anyhow::{Context, Result};
use prometeu_core::ProjectId;
use std::collections::VecDeque;
use crate::BuildStack;
use crate::workspace::phases::state::ResolverState;
/// Phase: BuildStack (deps-first topo order).
///
/// Output:
/// - state.stack: Vec<ProjectId> where deps appear before dependents.
///
/// Determinism:
/// - ties are resolved by ProjectId order (stable across runs if discovery is stable).
pub fn stack(cfg: &crate::DepsConfig, state: &mut ResolverState) -> Result<BuildStack> {
let n = state.nodes.len();
let _root = state.root.context("deps: internal error: root ProjectId not set")?;
// Build indegree
let mut indeg = vec![0usize; n];
for outs in &state.edges {
for &to in outs {
indeg[to.0 as usize] += 1;
}
}
// Deterministic queue: push in ProjectId order
let mut q = VecDeque::new();
for i in 0..n {
if indeg[i] == 0 {
q.push_back(i);
}
}
let mut order: Vec<ProjectId> = Vec::with_capacity(n);
while let Some(i) = q.pop_front() {
order.push(ProjectId(i as u32));
// Ensure deterministic traversal of outgoing edges too
// (your localize already sort/dedup edges, but this doesn't hurt)
for &to in &state.edges[i] {
let j = to.0 as usize;
indeg[j] -= 1;
if indeg[j] == 0 {
// Deterministic insert: keep queue ordered by ProjectId
// Simple O(n) insertion is fine for now.
insert_sorted_by_id(&mut q, j);
}
}
}
// If validate ran, this should already be cycle-free; still keep a guard.
if order.len() != n {
anyhow::bail!(
"deps: internal error: stack generation did not visit all nodes ({} of {})",
order.len(),
n
);
}
if cfg.explain {
eprintln!("[deps][stack] build order:");
for id in &order {
let node = &state.nodes[id.0 as usize];
eprintln!(" - {:?} {}@{} dir={:?}", id, node.name, node.version, node.dir);
}
}
Ok(BuildStack {
projects: order,
})
}
/// Insert node index `i` into queue `q` keeping it sorted by ProjectId (index).
fn insert_sorted_by_id(q: &mut VecDeque<usize>, i: usize) {
// Common fast path: append if >= last
if let Some(&last) = q.back() {
if i >= last {
q.push_back(i);
return;
}
}
// Otherwise find insertion point
let mut pos = 0usize;
for &v in q.iter() {
if i < v {
break;
}
pos += 1;
}
// VecDeque has no insert, so rebuild (small sizes OK for hard reset)
let mut tmp: Vec<usize> = q.drain(..).collect();
tmp.insert(pos, i);
*q = VecDeque::from(tmp);
}

View File

@ -1,58 +0,0 @@
use camino::{Utf8Path, Utf8PathBuf};
use std::collections::{HashMap, VecDeque};
use anyhow::Context;
use crate::workspace::model::{RawProjectNode, ProjectNode};
use prometeu_core::ProjectId;
use crate::PrometeuLock;
use serde_json;
pub struct ResolverState {
pub root_dir: Utf8PathBuf,
// phase1 output
pub raw: Vec<RawProjectNode>,
pub raw_by_dir: HashMap<Utf8PathBuf, usize>,
pub pending: VecDeque<Utf8PathBuf>,
// phase2+
pub nodes: Vec<ProjectNode>,
pub by_dir: HashMap<Utf8PathBuf, ProjectId>,
pub edges: Vec<Vec<ProjectId>>,
pub root: Option<ProjectId>,
pub lock: PrometeuLock,
}
pub fn seed(_cfg: &crate::DepsConfig, root_dir: &Utf8Path) -> anyhow::Result<ResolverState> {
let path_buf = root_dir.canonicalize()?;
let root_dir_canon = Utf8PathBuf::from_path_buf(path_buf)
.map_err(|p| anyhow::anyhow!("deps: non-utf8 root dir: {:?}", p))?;
let lock_path = root_dir_canon.join("prometeu.lock");
let lock = if lock_path.exists() {
let txt = std::fs::read_to_string(&lock_path)?;
serde_json::from_str::<PrometeuLock>(&txt)
.with_context(|| format!("invalid prometeu.lock at {:?}", lock_path))?
} else {
PrometeuLock::blank()
};
let mut pending = VecDeque::new();
pending.push_back(root_dir_canon.clone());
Ok(ResolverState {
root_dir: root_dir_canon.clone(),
raw: vec![],
raw_by_dir: HashMap::new(),
pending,
nodes: vec![],
by_dir: HashMap::new(),
edges: vec![],
root: None,
lock,
})
}

View File

@ -1,108 +0,0 @@
use anyhow::{bail, Context, Result};
use prometeu_core::ProjectId;
use std::collections::{HashMap, VecDeque};
use crate::workspace::phases::state::ResolverState;
/// Phase: Validate workspace graph & invariants (v0).
///
/// Checks:
/// - root present
/// - edges are in-range
/// - no cycles
/// - no version conflicts for same project name
pub fn validate(cfg: &crate::DepsConfig, state: &ResolverState) -> Result<()> {
// 1) root present
let root = state.root.context("deps: internal error: root ProjectId not set")?;
if cfg.explain {
eprintln!("[deps][validate] root={:?}", root);
}
// 2) edges sanity
let n = state.nodes.len();
for (from_idx, outs) in state.edges.iter().enumerate() {
for &to in outs {
let to_idx = to.0 as usize;
if to_idx >= n {
bail!(
"deps: invalid edge: from {:?} -> {:?} (to out of range; nodes={})",
ProjectId(from_idx as u32),
to,
n
);
}
}
}
// 3) version conflicts by name
// name -> (version -> ProjectId)
let mut by_name: HashMap<&str, HashMap<&str, ProjectId>> = HashMap::new();
for node in &state.nodes {
let vmap = by_name.entry(node.name.as_str()).or_default();
vmap.entry(node.version.as_str()).or_insert(node.id);
}
for (name, versions) in &by_name {
if versions.len() > 1 {
// create deterministic message
let mut vs: Vec<(&str, ProjectId)> = versions.iter().map(|(v, id)| (*v, *id)).collect();
vs.sort_by(|a, b| a.0.cmp(b.0));
let mut msg = format!("deps: version conflict for project '{}':", name);
for (v, id) in vs {
let dir = &state.nodes[id.0 as usize].dir;
msg.push_str(&format!("\n - {} at {:?} (id={:?})", v, dir, id));
}
bail!(msg);
}
}
// 4) cycle detection (Kahn + leftover nodes)
// Build indegree
let mut indeg = vec![0usize; n];
for outs in &state.edges {
for &to in outs {
indeg[to.0 as usize] += 1;
}
}
let mut q = VecDeque::new();
for i in 0..n {
if indeg[i] == 0 {
q.push_back(i);
}
}
let mut visited = 0usize;
while let Some(i) = q.pop_front() {
visited += 1;
for &to in &state.edges[i] {
let j = to.0 as usize;
indeg[j] -= 1;
if indeg[j] == 0 {
q.push_back(j);
}
}
}
if visited != n {
// Nodes with indeg>0 are part of cycles (or downstream of them)
let mut cyclic: Vec<ProjectId> = Vec::new();
for i in 0..n {
if indeg[i] > 0 {
cyclic.push(ProjectId(i as u32));
}
}
// Deterministic error output
cyclic.sort_by_key(|id| id.0);
let mut msg = "deps: dependency cycle detected among:".to_string();
for id in cyclic {
let node = &state.nodes[id.0 as usize];
msg.push_str(&format!("\n - {:?} {}@{} dir={:?}", id, node.name, node.version, node.dir));
}
bail!(msg);
}
Ok(())
}

View File

@ -1,10 +0,0 @@
use anyhow::Result;
use camino::Utf8Path;
use crate::{DepsConfig, ResolvedWorkspace};
use crate::workspace::host::FsHost;
pub fn resolve_workspace(cfg: &DepsConfig, root_dir: &Utf8Path) -> Result<ResolvedWorkspace> {
let host = FsHost;
crate::workspace::phases::run_all(cfg, &host, root_dir)
}

View File

@ -1,10 +0,0 @@
[package]
name = "prometeu-language-api"
version = "0.1.0"
edition = "2021"
license = "MIT"
description = "Canonical language contract for Prometeu Backend: identifiers, references, and strict Frontend trait."
repository = "https://github.com/prometeu/runtime"
[dependencies]

View File

@ -1,21 +0,0 @@
#[derive(Debug, Clone)]
pub struct SourcePolicy {
pub extensions: Vec<&'static str>,
pub case_sensitive: bool,
}
impl SourcePolicy {
pub fn matches_ext(&self, ext: &str) -> bool {
if self.case_sensitive {
self.extensions.iter().any(|e| *e == ext)
} else {
self.extensions.iter().any(|e| e.eq_ignore_ascii_case(ext))
}
}
}
#[derive(Debug, Clone)]
pub struct LanguageSpec {
pub id: &'static str,
pub source_policy: SourcePolicy,
}

View File

@ -1,3 +0,0 @@
mod language_spec;
pub use language_spec::*;

View File

@ -1,19 +0,0 @@
[package]
name = "prometeu-lowering"
version = "0.1.0"
edition = "2021"
license.workspace = true
repository.workspace = true
[dependencies]
prometeu-bytecode = { path = "../prometeu-bytecode" }
prometeu-core = { path = "../prometeu-core" }
prometeu-language-api = { path = "../prometeu-language-api" }
clap = { version = "4.5.54", features = ["derive"] }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"
anyhow = "1.0.100"
pathdiff = "0.2.1"
[dev-dependencies]
tempfile = "3.10.1"

View File

@ -6,6 +6,4 @@ license.workspace = true
[dependencies] [dependencies]
serde_json = "1.0.149" serde_json = "1.0.149"
prometeu-vm = { path = "../prometeu-vm" }
prometeu-core = { path = "../../compiler/prometeu-core" }
prometeu-hal = { path = "../prometeu-hal" } prometeu-hal = { path = "../prometeu-hal" }

View File

@ -5,11 +5,6 @@ edition = "2024"
license.workspace = true license.workspace = true
[dependencies] [dependencies]
prometeu-drivers = { path = "../prometeu-drivers" }
prometeu-vm = { path = "../prometeu-vm" } prometeu-vm = { path = "../prometeu-vm" }
prometeu-system = { path = "../prometeu-system" } prometeu-system = { path = "../prometeu-system" }
prometeu-bytecode = { path = "../../compiler/prometeu-bytecode" }
prometeu-core = { path = "../../compiler/prometeu-core" }
prometeu-hal = { path = "../prometeu-hal" } prometeu-hal = { path = "../prometeu-hal" }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149"

View File

@ -5,7 +5,6 @@ edition = "2024"
license.workspace = true license.workspace = true
[dependencies] [dependencies]
prometeu-core = { path = "../../compiler/prometeu-core" }
prometeu-bytecode = { path = "../../compiler/prometeu-bytecode" } prometeu-bytecode = { path = "../../compiler/prometeu-bytecode" }
serde = { version = "1.0.228", features = ["derive"] } serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.149" serde_json = "1.0.149"

View File

@ -8,7 +8,6 @@ license.workspace = true
serde_json = "1.0.149" serde_json = "1.0.149"
prometeu-vm = { path = "../prometeu-vm" } prometeu-vm = { path = "../prometeu-vm" }
prometeu-bytecode = { path = "../../compiler/prometeu-bytecode" } prometeu-bytecode = { path = "../../compiler/prometeu-bytecode" }
prometeu-core = { path = "../../compiler/prometeu-core" }
prometeu-hal = { path = "../prometeu-hal" } prometeu-hal = { path = "../prometeu-hal" }
[dev-dependencies] [dev-dependencies]

View File

@ -5,7 +5,5 @@ edition = "2024"
license.workspace = true license.workspace = true
[dependencies] [dependencies]
serde = { version = "1.0.228", features = ["derive"] }
prometeu-bytecode = { path = "../../compiler/prometeu-bytecode" } prometeu-bytecode = { path = "../../compiler/prometeu-bytecode" }
prometeu-core = { path = "../../compiler/prometeu-core" }
prometeu-hal = { path = "../prometeu-hal" } prometeu-hal = { path = "../prometeu-hal" }

View File

@ -114,6 +114,12 @@ pub struct GateEntry {
pub type_id: u32, pub type_id: u32,
} }
impl Default for VirtualMachine {
fn default() -> Self {
Self::new(vec![], vec![])
}
}
impl VirtualMachine { impl VirtualMachine {
/// Creates a new VM instance with the provided bytecode and constants. /// Creates a new VM instance with the provided bytecode and constants.
pub fn new(rom: Vec<u8>, constant_pool: Vec<Value>) -> Self { pub fn new(rom: Vec<u8>, constant_pool: Vec<Value>) -> Self {
@ -242,15 +248,6 @@ impl VirtualMachine {
}); });
} }
}
impl Default for VirtualMachine {
fn default() -> Self {
Self::new(vec![], vec![])
}
}
impl VirtualMachine {
/// Executes the VM for a limited number of cycles (budget). /// Executes the VM for a limited number of cycles (budget).
/// ///
/// This is the heart of the deterministic execution model. Instead of running /// This is the heart of the deterministic execution model. Instead of running
@ -359,10 +356,6 @@ impl VirtualMachine {
/// 2. Decode: Identify what operation to perform. /// 2. Decode: Identify what operation to perform.
/// 3. Execute: Perform the operation, updating stacks, memory, or calling peripherals. /// 3. Execute: Perform the operation, updating stacks, memory, or calling peripherals.
pub fn step(&mut self, native: &mut dyn NativeInterface, ctx: &mut HostContext) -> Result<(), LogicalFrameEndingReason> { pub fn step(&mut self, native: &mut dyn NativeInterface, ctx: &mut HostContext) -> Result<(), LogicalFrameEndingReason> {
self.step_impl(native, ctx)
}
fn step_impl(&mut self, native: &mut dyn NativeInterface, ctx: &mut HostContext) -> Result<(), LogicalFrameEndingReason> {
if self.halted || self.pc >= self.program.rom.len() { if self.halted || self.pc >= self.program.rom.len() {
return Ok(()); return Ok(());
} }

View File

@ -14,7 +14,6 @@ dist = true
include = ["../../VERSION.txt"] include = ["../../VERSION.txt"]
[dependencies] [dependencies]
prometeu-core = { path = "../../compiler/prometeu-core" }
prometeu-firmware = { path = "../../console/prometeu-firmware" } prometeu-firmware = { path = "../../console/prometeu-firmware" }
prometeu-system = { path = "../../console/prometeu-system" } prometeu-system = { path = "../../console/prometeu-system" }
prometeu-drivers = { path = "../../console/prometeu-drivers" } prometeu-drivers = { path = "../../console/prometeu-drivers" }

View File

@ -13,10 +13,6 @@ path = "src/main.rs"
name = "prometeu-runtime" name = "prometeu-runtime"
path = "src/bin/prometeu-runtime.rs" path = "src/bin/prometeu-runtime.rs"
[[bin]]
name = "prometeuc"
path = "src/bin/prometeuc.rs"
[package.metadata.dist] [package.metadata.dist]
dist = true dist = true
include = [ include = [
@ -27,5 +23,4 @@ include = [
[dependencies] [dependencies]
clap = { version = "4.5", features = ["derive"] } clap = { version = "4.5", features = ["derive"] }
prometeu-host-desktop-winit = { path = "../../host/prometeu-host-desktop-winit" } prometeu-host-desktop-winit = { path = "../../host/prometeu-host-desktop-winit" }
prometeu-build-pipeline = { path = "../../compiler/prometeu-build-pipeline" }
anyhow = "1.0.100" anyhow = "1.0.100"

View File

@ -1 +0,0 @@
fn main() -> anyhow::Result<()> { prometeu_build_pipeline::run() }

View File

@ -1,12 +0,0 @@
[package]
name = "prometeu-lsp"
version = "0.1.0"
edition = "2021"
license = "MIT"
[dependencies]
tower-lsp = "0.20"
tokio = { version = "1", features = ["full"] }
tokio-util = { version = "0.7" }
prometeu-core = { path = "../../compiler/prometeu-core" }
prometeu-build-pipeline = { path = "../../compiler/prometeu-build-pipeline" }

View File

@ -1,51 +0,0 @@
// use std::sync::Arc;
// use tokio::sync::RwLock;
// use tokio_util::sync::CancellationToken;
//
// use std::collections::HashMap;
// use prometeu_core::{FileDB, FileId, ProjectId};
// use crate::rebuild::LspDiagnostic;
// use crate::rebuild::FlatSymbol;
//
// #[derive(Default)]
// pub struct AnalysisDb {
// pub file_db: FileDB,
// pub file_to_project: HashMap<FileId, ProjectId>,
//
// // Os campos abaixo serão conectados conforme PR-03/04/05 (podem começar como None)
// // pub ast: Option<AstArena>,
// // pub symbols: Option<SymbolArena>,
// // pub types: Option<TypeArena>,
// // pub diagnostics: Vec<Diagnostic>,
//
// /// Incrementa a cada rebuild concluído com sucesso
// pub revision: u64,
//
// /// Cancel token do último rebuild em progresso (se houver)
// pub active_rebuild: Option<CancellationToken>,
//
// /// Último snapshot bom (consultado pelos handlers LSP)
// pub last_good: Option<AnalysisSnapshot>,
// }
//
// pub type SharedDb = Arc<RwLock<AnalysisDb>>;
//
// impl AnalysisDb {
// pub fn project_for_file(&self, file: FileId) -> Option<ProjectId> {
// self.file_to_project.get(&file).copied()
// }
//
// /// Returns all known file ids in the FileDB.
// pub fn file_ids(&self) -> Vec<FileId> {
// // delegate to FileDB helper (added in prometeu-analysis)
// self.file_db.all_files()
// }
// }
//
// #[derive(Default, Clone)]
// pub struct AnalysisSnapshot {
// /// Diagnostics por arquivo (URI LSP → diagnostics já convertidos)
// pub diagnostics_by_uri: HashMap<String, Vec<LspDiagnostic>>,
// /// Lista “flatten” de símbolos para workspaceSymbol/documentSymbol
// pub symbols_flat: Vec<FlatSymbol>,
// }

View File

@ -1,190 +0,0 @@
// use std::sync::Arc;
// use tokio::sync::RwLock;
// use tower_lsp::{Client, LspService, Server};
// use tower_lsp::lsp_types as lsp;
//
// mod analysis_db;
// mod rebuild;
// struct Backend {
// db: SharedDb,
// client: Client,
// }
// #[tower_lsp::async_trait]
// impl tower_lsp::LanguageServer for Backend {
// async fn initialize(
// &self,
// _: tower_lsp::lsp_types::InitializeParams,
// ) -> tower_lsp::jsonrpc::Result<tower_lsp::lsp_types::InitializeResult> {
// Ok(tower_lsp::lsp_types::InitializeResult {
// capabilities: tower_lsp::lsp_types::ServerCapabilities {
// text_document_sync: Some(
// tower_lsp::lsp_types::TextDocumentSyncCapability::Kind(
// tower_lsp::lsp_types::TextDocumentSyncKind::FULL,
// ),
// ),
// // MVP capabilities only (PR-08):
// definition_provider: Some(tower_lsp::lsp_types::OneOf::Left(true)),
// document_symbol_provider: Some(tower_lsp::lsp_types::OneOf::Left(true)),
// // workspace_symbol is not available in tower-lsp 0.20 trait
// ..Default::default()
// },
// ..Default::default()
// })
// }
//
// async fn initialized(&self, _: tower_lsp::lsp_types::InitializedParams) {}
//
// async fn shutdown(&self) -> tower_lsp::jsonrpc::Result<()> {
// Ok(())
// }
//
// // didOpen: upsert texto, solicita rebuild
// async fn did_open(&self, params: tower_lsp::lsp_types::DidOpenTextDocumentParams) {
// let uri = params.text_document.uri.to_string();
// let text = params.text_document.text;
// {
// let mut guard = self.db.write().await;
// guard.file_db.upsert(&uri, text);
// }
// rebuild::request_rebuild(self.db.clone(), self.client.clone()).await;
// }
//
// // didChange (FULL): receber conteúdo completo e upsert
// async fn did_change(&self, params: tower_lsp::lsp_types::DidChangeTextDocumentParams) {
// let uri = params.text_document.uri.to_string();
// // Full-sync: esperamos 1 conteúdo completo
// if let Some(change) = params.content_changes.into_iter().last() {
// let text = change.text;
// let mut guard = self.db.write().await;
// guard.file_db.upsert(&uri, text);
// }
// rebuild::request_rebuild(self.db.clone(), self.client.clone()).await;
// }
//
// // didClose: opcionalmente remover do db e limpar diagnostics
// async fn did_close(&self, params: tower_lsp::lsp_types::DidCloseTextDocumentParams) {
// let uri = params.text_document.uri;
// // Estratégia simples: manter FileDB para estabilidade de IDs, mas limpar diagnostics
// let _ = self
// .client
// .publish_diagnostics(uri.clone(), vec![], Some(0))
// .await;
// }
//
// async fn goto_definition(
// &self,
// params: tower_lsp::lsp_types::GotoDefinitionParams,
// ) -> tower_lsp::jsonrpc::Result<Option<tower_lsp::lsp_types::GotoDefinitionResponse>> {
// let tdp = params.text_document_position_params;
// let uri = tdp.text_document.uri;
// let pos = tdp.position;
//
// let guard = self.db.read().await;
// // Map URI to current text and index
// let Some(fid) = guard.file_db.file_id(uri.as_str()) else { return Ok(None) };
// let text = guard.file_db.text(fid).to_string();
// let idx = prometeu_analysis::TextIndex::new(&text);
// let byte = idx.lsp_to_byte(pos.line, pos.character);
// let ident = ident_at(&text, byte);
//
// if let Some(name) = ident {
// if let Some(snap) = &guard.last_good {
// let mut hits: Vec<lsp::Location> = Vec::new();
// for s in &snap.symbols_flat {
// if s.name == name {
// hits.push(s.location.clone());
// }
// }
// if !hits.is_empty() {
// return Ok(Some(lsp::GotoDefinitionResponse::Array(hits)));
// }
// }
// }
// Ok(None)
// }
//
//
// // MVP stubs: documentSymbol/workspaceSymbol/definition retornam vazio até PRs seguintes
// async fn document_symbol(
// &self,
// params: tower_lsp::lsp_types::DocumentSymbolParams,
// ) -> tower_lsp::jsonrpc::Result<Option<tower_lsp::lsp_types::DocumentSymbolResponse>> {
// let uri = params.text_document.uri;
// let guard = self.db.read().await;
// if let Some(snap) = &guard.last_good {
// let mut items: Vec<lsp::SymbolInformation> = Vec::new();
// for s in &snap.symbols_flat {
// if s.location.uri == uri {
// items.push(lsp::SymbolInformation {
// name: s.name.clone(),
// kind: s.kind,
// location: s.location.clone(),
// tags: None,
// deprecated: None,
// container_name: None,
// });
// }
// }
// return Ok(Some(lsp::DocumentSymbolResponse::Flat(items)));
// }
// Ok(Some(lsp::DocumentSymbolResponse::Flat(vec![])))
// }
//
// // async fn workspace_symbol(
// // &self,
// // params: lsp::WorkspaceSymbolParams,
// // ) -> tower_lsp::jsonrpc::Result<Option<Vec<lsp::SymbolInformation>>> {
// // let query = params.query.to_lowercase();
// // let guard = self.db.read().await;
// // if let Some(snap) = &guard.last_good {
// // let mut out: Vec<lsp::SymbolInformation> = Vec::new();
// // for s in &snap.symbols_flat {
// // if s.name.to_lowercase().contains(&query) {
// // out.push(lsp::SymbolInformation {
// // name: s.name.clone(),
// // kind: s.kind,
// // location: s.location.clone(),
// // tags: None,
// // deprecated: None,
// // container_name: None,
// // });
// // if out.len() >= 50 { break; }
// // }
// // }
// // return Ok(Some(out));
// // }
// // Ok(Some(vec![]))
// // }
// }
#[tokio::main]
async fn main() {
// let stdin = tokio::io::stdin();
// let stdout = tokio::io::stdout();
//
// let db: SharedDb = Arc::new(RwLock::new(analysis_db::sourceDb::default()));
//
// let (service, socket) = LspService::new(|client| Backend { db: db.clone(), client });
// Server::new(stdin, stdout, socket).serve(service).await;
}
// // Simple textual identifier extraction for MVP definition lookup.
// fn ident_at(text: &str, byte: u32) -> Option<String> {
// let b = byte as usize;
// if b > text.len() { return None; }
// // Expand left and right over identifier characters (ASCII + underscore; acceptable MVP)
// let bytes = text.as_bytes();
// let mut start = b;
// while start > 0 {
// let c = bytes[start - 1];
// if (c as char).is_ascii_alphanumeric() || c == b'_' { start -= 1; } else { break; }
// }
// let mut end = b;
// while end < bytes.len() {
// let c = bytes[end];
// if (c as char).is_ascii_alphanumeric() || c == b'_' { end += 1; } else { break; }
// }
// if start < end { Some(text[start..end].to_string()) } else { None }
// }

View File

@ -1,147 +0,0 @@
// use tokio_util::sync::CancellationToken;
// use tower_lsp::Client;
//
// use crate::source_db::{AnalysisSnapshot, SharedDb};
// use tower_lsp::lsp_types as lsp;
// use prometeu_core::{FileId, NameInterner, Severity, SymbolKind, TextIndex};
//
// #[derive(Clone, Debug)]
// pub struct LspDiagnostic {
// pub range: lsp::Range,
// pub severity: Option<lsp::DiagnosticSeverity>,
// pub code: Option<lsp::NumberOrString>,
// pub message: String,
// }
//
// #[derive(Clone, Debug)]
// pub struct FlatSymbol {
// pub name: String,
// pub kind: lsp::SymbolKind,
// pub location: lsp::Location,
// }
//
// /// Requests a project rebuild (coarse). Cancels the previous rebuild if in progress.
// pub async fn request_rebuild(db: SharedDb, client: Client) {
// // 1) short lock: cancel previous token and install a new one
// let new_token = CancellationToken::new();
// {
// let mut guard = db.write().await;
// if let Some(prev) = guard.active_rebuild.take() {
// prev.cancel();
// }
// guard.active_rebuild = Some(new_token.clone());
// }
//
// // 2) spawn task: run analysis outside the lock
// tokio::spawn(async move {
// // Safe point: check before starting
// if new_token.is_cancelled() { return; }
//
// // Clone snapshot of files (URIs and texts) under a short read lock
// let (files, revision) = {
// let guard = db.read().await;
// let mut v = Vec::new();
// for fid in guard.file_ids() {
// let uri = guard.file_db.uri(fid).to_string();
// let text = guard.file_db.text(fid).to_string();
// v.push((fid, uri, text));
// }
// (v, guard.revision)
// };
//
// // Prepare accumulators
// let mut diagnostics_by_uri: std::collections::HashMap<String, Vec<LspDiagnostic>> = std::collections::HashMap::new();
// let mut symbols_flat: Vec<FlatSymbol> = Vec::new();
//
// // For each file: run a minimal frontend to collect diagnostics and top-level symbols
// for (fid, uri, text) in files.into_iter() {
// if new_token.is_cancelled() { return; }
// let text_index = TextIndex::new(&text);
//
// // Parser + basic pipeline
// let mut interner = NameInterner::new();
// let mut parser = ParserFacade::new(&text, fid, &mut interner);
// match parser.parse_and_collect() {
// Ok(parsed) => {
// // Diagnostics (from parse/collect are already inside parsed.diags)
// let mut file_diags = Vec::new();
// for d in parsed.diagnostics {
// let range = span_to_range(fid, &text_index, d.span.start, d.span.end);
// file_diags.push(LspDiagnostic {
// range,
// severity: Some(match d.severity { Severity::Error => lsp::DiagnosticSeverity::ERROR, Severity::Warning => lsp::DiagnosticSeverity::WARNING }),
// code: Some(lsp::NumberOrString::String(d.code)),
// message: d.message,
// });
// }
// diagnostics_by_uri.insert(uri.clone(), file_diags);
//
// // Symbols: flatten only top-level decls with their decl_span
// for sym in parsed.symbols {
// let lsp_loc = lsp::Location {
// uri: uri.parse().unwrap_or_else(|_| lsp::Url::parse("untitled:").unwrap()),
// range: span_to_range(fid, &text_index, sym.decl_span.start, sym.decl_span.end),
// };
// let kind = match sym.kind {
// SymbolKind::Function => lsp::SymbolKind::FUNCTION,
// SymbolKind::Service => lsp::SymbolKind::INTERFACE,
// SymbolKind::Struct => lsp::SymbolKind::STRUCT,
// SymbolKind::Contract => lsp::SymbolKind::CLASS,
// SymbolKind::ErrorType => lsp::SymbolKind::ENUM,
// _ => lsp::SymbolKind::VARIABLE,
// };
// symbols_flat.push(FlatSymbol { name: sym.name, kind, location: lsp_loc });
// }
// }
// Err(diags) => {
// // Parser returned errors only; publish them
// let mut file_diags = Vec::new();
// for d in diags {
// let range = span_to_range(fid, &text_index, d.span.start, d.span.end);
// file_diags.push(LspDiagnostic {
// range,
// severity: Some(match d.severity { Severity::Error => lsp::DiagnosticSeverity::ERROR, Severity::Warning => lsp::DiagnosticSeverity::WARNING }),
// code: Some(lsp::NumberOrString::String(d.code)),
// message: d.message,
// });
// }
// diagnostics_by_uri.insert(uri.clone(), file_diags);
// }
// }
// }
//
// if new_token.is_cancelled() { return; }
//
// // 3) short lock: swap state + revision++ if not cancelled; then publish diagnostics
// let snapshot = AnalysisSnapshot { diagnostics_by_uri: diagnostics_by_uri.clone(), symbols_flat };
// {
// let mut guard = db.write().await;
// if new_token.is_cancelled() { return; }
// // if no new changes since we started, accept this snapshot
// guard.last_good = Some(snapshot);
// guard.revision = revision.saturating_add(1);
// }
//
// // Publish diagnostics per file
// for (uri, diags) in diagnostics_by_uri.into_iter() {
// let lsp_diags: Vec<lsp::Diagnostic> = diags.into_iter().map(|d| lsp::Diagnostic {
// range: d.range,
// severity: d.severity,
// code: d.code,
// message: d.message,
// ..Default::default()
// }).collect();
// let _ = client.publish_diagnostics(uri.parse().unwrap_or_else(|_| lsp::Url::parse("untitled:").unwrap()), lsp_diags, None).await;
// }
// });
// }
//
// fn span_to_range(file: FileId, idx: &TextIndex, start: u32, end: u32) -> lsp::Range {
// // Ignore `file` here since idx is built from that file's text
// let (s_line, s_col) = idx.byte_to_lsp(start);
// let (e_line, e_col) = idx.byte_to_lsp(end);
// lsp::Range {
// start: lsp::Position { line: s_line, character: s_col },
// end: lsp::Position { line: e_line, character: e_col },
// }
// }

View File

@ -1,4 +1,4 @@
< [Back](chapter-14.md) | [Summary](table-of-contents.md) > < [Back](chapter-14.md) | [Summary](table-of-contents.md) | [Next](chapter-16.md) >
# Asset Management # Asset Management
@ -328,4 +328,4 @@ Where:
> **Asset Types describe content.** > **Asset Types describe content.**
> **The SDK orchestrates; the hardware executes.** > **The SDK orchestrates; the hardware executes.**
< [Back](chapter-14.md) | [Summary](table-of-contents.md) > < [Back](chapter-14.md) | [Summary](table-of-contents.md) | [Next](chapter-16.md) >

View File

@ -0,0 +1,294 @@
< [Back](chapter-15.md) | [Summary](table-of-contents.md) >
# **Host ABI and Syscalls**
This chapter defines the Application Binary Interface (ABI) between the Prometeu Virtual Machine (PVM) and the host environment. It specifies how syscalls are encoded, invoked, verified, and accounted for.
Syscalls provide controlled access to host-managed subsystems such as graphics, audio, input, asset banks, and persistent storage.
This chapter defines the **contract**. Individual subsystems (GFX, AUDIO, MEMCARD, ASSETS, etc.) define their own syscall tables that conform to this ABI.
---
## 1 Design Principles
The syscall system follows these rules:
1. **Deterministic**: Syscalls must behave deterministically for the same inputs and frame state.
2. **Synchronous**: Syscalls execute to completion within the current VM slice.
3. **Non-blocking**: Long operations must be modeled as request + status polling.
4. **Capability-gated**: Each syscall requires a declared capability.
5. **Stack-based ABI**: Arguments and return values are passed via VM slots.
6. **Not first-class**: Syscalls are callable but cannot be stored as values.
---
## 2 Syscall Instruction Semantics
The VM provides a single instruction:
```
SYSCALL <id>
```
Where:
* `<id>` is a 32-bit integer identifying the syscall.
Execution steps:
1. The VM looks up the syscall metadata using `<id>`.
2. The VM verifies that enough arguments exist on the stack.
3. The VM checks capability requirements.
4. The syscall executes in the host environment.
5. The syscall leaves exactly `ret_slots` values on the stack.
If any contract rule is violated, the VM traps.
---
## 3 Syscall Metadata Table
Each syscall is defined by a metadata entry.
### SyscallMeta structure
```
SyscallMeta {
id: u32
name: string
arg_slots: u8
ret_slots: u8
capability: CapabilityId
may_allocate: bool
cost_hint: u32
}
```
Fields:
| Field | Description |
| -------------- | ------------------------------------------------ |
| `id` | Unique syscall identifier |
| `name` | Human-readable name |
| `arg_slots` | Number of input stack slots |
| `ret_slots` | Number of return stack slots |
| `capability` | Required capability |
| `may_allocate` | Whether the syscall may allocate VM heap objects |
| `cost_hint` | Expected cycle cost (for analysis/profiling) |
The verifier uses this table to validate stack effects.
---
## 4 Arguments and Return Values
Syscalls use the same slot-based ABI as functions.
### Argument passing
Arguments are pushed onto the stack before the syscall.
Example:
```
push a
push b
SYSCALL X // expects 2 arguments
```
### Return values
After execution, the syscall leaves exactly `ret_slots` values on the stack.
Example:
```
// before: []
SYSCALL input_state
// after: [held, pressed, released]
```
### Slot types
Each slot contains one of the VM value types:
* int
* bool
* float
* handle
* null
Composite return values are represented as multiple slots (stack tuples).
---
## 5 Syscalls as Callable Entities (Not First-Class)
Syscalls behave like functions in terms of arguments and return values, but they are **not first-class values**.
This means:
* Syscalls can be invoked.
* Syscalls cannot be stored in variables.
* Syscalls cannot be passed as arguments.
* Syscalls cannot be returned from functions.
Only user-defined functions and closures are first-class.
### Example declaration (conceptual)
```
host fn input_state() -> (int, int, int)
```
This represents a syscall with three return values, but it cannot be treated as a function value.
---
## 6 Error Model: Traps vs Status Codes
Syscalls use a hybrid error model.
### Trap conditions (contract violations)
The VM traps when:
* The syscall id is invalid.
* The required capability is missing.
* The stack does not contain enough arguments.
* A handle is invalid or dead.
These are considered fatal contract violations.
### Status returns (domain conditions)
Normal operational states are returned as values.
Examples:
* asset not yet loaded
* audio voice unavailable
* memcard full
These are represented by status codes in return slots.
---
## 7 Capability System
Each syscall requires a capability.
Capabilities are declared by the cartridge manifest.
Example capability groups:
* `gfx`
* `audio`
* `input`
* `asset`
* `memcard`
If a syscall is invoked without the required capability:
* The VM traps.
---
## 8 Interaction with the Garbage Collector
The VM heap is managed by the GC. Host-managed memory is separate.
### Heap vs host memory
| Memory | Managed by | GC scanned |
| --------------- | ---------- | ---------- |
| VM heap objects | VM GC | Yes |
| Asset banks | Host | No |
| Audio buffers | Host | No |
| Framebuffers | Host | No |
Assets are addressed by identifiers, not VM heap handles.
### Host root rule
If a syscall stores a handle to a VM heap object beyond the duration of the call, it must register that handle as a **host root**.
This prevents the GC from collecting objects still in use by the host.
This rule applies only to VM heap objects (such as closures or user objects), not to asset identifiers or primitive values.
---
## 9 Determinism Rules
Syscalls must obey deterministic execution rules.
Forbidden behaviors:
* reading real-time clocks
* accessing non-deterministic OS APIs
* performing blocking I/O
Allowed patterns:
* frame-based timers
* request + poll status models
* event delivery at frame boundaries
---
## 10 Cost Model and Budgeting
Each syscall contributes to frame cost.
The VM tracks:
* cycles spent in syscalls
* syscall counts
* allocation cost (if any)
Example telemetry:
```
Frame 10231:
Syscalls: 12
Cycles (syscalls): 380
Allocations via syscalls: 2
```
Nothing is free.
---
## 11 Blocking and Long Operations
Syscalls must not block.
Long operations must use a two-phase model:
1. Request
2. Status polling or event notification
Example pattern:
```
asset.load(id)
...
status, progress = asset.status(id)
```
---
## 12 Summary
* Syscalls are deterministic, synchronous, and non-blocking.
* They use the same slot-based ABI as functions.
* They are callable but not first-class.
* Capabilities control access to host subsystems.
* GC only manages VM heap objects.
* Host-held heap objects must be registered as roots.
* All syscall costs are tracked per frame.
< [Back](chapter-15.md) | [Summary](table-of-contents.md) >

View File

@ -1,457 +1,427 @@
< [Back](chapter-1.md) | [Summary](table-of-contents.md) | [Next](chapter-3.md) > < [Back](chapter-1.md) | [Summary](table-of-contents.md) | [Next](chapter-3.md) >
# ⚙️ ** PVM (PROMETEU VM) — Instruction Set** # **Prometeu Virtual Machine (PVM)**
## 1. Overview This chapter defines the execution model, value system, calling convention, memory model, and host interface of the Prometeu Virtual Machine.
The **PROMETEU VM** is a mandatory virtual machine always present in the logical hardware: The PVM is a **deterministic, stack-based VM** designed for a 2D fantasy console environment. Its primary goal is to provide predictable performance, safe memory access, and a stable execution contract suitable for real-time games running at a fixed frame rate. fileciteturn2file0
* **stack-based**
* deterministic
* cycle-oriented
* designed for teaching and inspection
It exists to:
* map high-level language concepts
* make computational cost visible
* allow execution analysis
* serve as the foundation of the PROMETEU cartridge
> The PROMETEU VM is simple by choice.
> Simplicity is a pedagogical tool.
--- ---
## 2. Execution Model ## 1 Core Design Principles
### 2.1 Main Components The PVM is designed around the following constraints:
The VM has: 1. **Deterministic execution**: no hidden threads or asynchronous callbacks.
2. **Frame-based timing**: execution is bounded by frame time.
* **PC (Program Counter)** — next instruction 3. **Safe memory model**: all heap objects are accessed through handles.
* **Operand Stack** — value stack 4. **Simple compilation target**: stack-based bytecode with verified control flow.
* **Call Stack** — stores execution frames for function calls 5. **Stable ABI**: multi-value returns with fixed slot semantics.
* **Scope Stack** — stores frames for blocks within a function 6. **First-class functions**: functions can be passed, stored, and returned.
* **Heap** — dynamic memory
* **Globals** — global variables
* **Constant Pool** — literals and references
* **ROM** — cartridge bytecode
* **RAM** — mutable data
--- ---
### 2.2 Execution Cycle ## 2 Execution Model
Each instruction executes: The PVM executes bytecode in a **frame loop**. Each frame:
``` 1. The firmware enters the VM.
FETCH → DECODE → EXECUTE → ADVANCE PC 2. The VM runs until:
```
Properties: * the frame budget is consumed, or
* a `FRAME_SYNC` instruction is reached.
3. At `FRAME_SYNC`:
* every instruction has a fixed cost in cycles * events are delivered
* there is no invisible implicit work * input is sampled
* execution is fully deterministic * optional GC may run
4. Control returns to the firmware.
`FRAME_SYNC` is the **primary safepoint** in the system.
--- ---
## 3. Fundamental Types ## 3 Value Types
All runtime values are stored in VM slots as a `Value`.
### Primitive value types (stack values)
| Type | Description | | Type | Description |
| --------- | ------------------------- | | ------- | --------------------- |
| `int32` | 32-bit signed integer | | `int` | 64-bit signed integer |
| `int64` | 64-bit signed integer | | `bool` | Boolean value |
| `float` | 64-bit floating point | | `float` | 64-bit floating point |
| `boolean` | true/false |
| `string` | immutable UTF-8 |
| `null` | absence of value |
| `ref` | heap reference |
### 3.1 Numeric Promotion ### Built-in vector and graphics types (stack values)
The VM promotes types automatically during operations:
* `int32` + `int32``int32`
* `int32` + `int64``int64`
* `int` + `float``float`
* Bitwise operations promote `int32` to `int64` if any operand is `int64`.
Do not exist: These are treated as VM primitives with dedicated opcodes:
* magic coercions | Type | Description |
* implicit casts | ------- | --------------------------------- |
* silent overflows | `vec2` | 2D vector (x, y) |
| `color` | Packed color value |
| `pixel` | Combination of position and color |
These types:
* live entirely on the stack
* are copied by value
* never allocate on the heap
### Heap values
All user-defined objects live on the heap and are accessed via **handles**.
| Type | Description |
| -------- | -------------------------- |
| `handle` | Reference to a heap object |
| `null` | Null handle |
Handles may refer to:
* user objects
* arrays
* strings
* closures
--- ---
## 4. Stack Conventions & Calling ABI ## 4 Handles and Gate Table
* Operations use the top of the stack. Heap objects are accessed through **handles**. A handle is a pair:
* Results always return to the stack.
* **LIFO Order:** Last pushed = first consumed.
* **Mandatory Return:** Every function (`Call`) and `Syscall` MUST leave exactly **one** value on the stack upon completion. If there is no meaningful value to return, `Null` must be pushed.
### 4.1 Calling Convention (Call / Ret)
1. **Arguments:** The caller pushes arguments in order (arg0, arg1, ..., argN-1).
2. **Execution:** The `Call` instruction specifies `args_count`. These `N` values become the **locals** of the new frame (local 0 = arg0, local 1 = arg1, etc.).
3. **Return Value:** Before executing `Ret`, the callee MUST push its return value.
4. **Cleanup:** The `Ret` instruction is responsible for:
- Popping the return value.
- Removing all locals (the arguments) from the operand stack.
- Re-pushing the return value.
- Restoring the previous frame and PC.
### 4.2 Syscall Convention
1. **Arguments:** The caller pushes arguments in order.
2. **Execution:** The native implementation pops arguments as needed. Since it's a stack, it will pop them in reverse order (argN-1 first, then argN-2, ..., arg0).
3. **Return Value:** The native implementation MUST push exactly one value onto the stack before returning to the VM.
4. **Cleanup:** The native implementation is responsible for popping all arguments it expects.
Example:
``` ```
PUSH_CONST 3 handle = { index, generation }
PUSH_CONST 4
ADD
``` ```
State: The VM maintains a **gate table**:
``` ```
[3] GateEntry {
[3, 4] alive: bool
[7] generation: u32
base: usize
slots: u32
type_id: u32
}
``` ```
--- When an object is freed:
## 5. Instruction Categories * its gate entry is marked dead
* its generation is incremented
1. Flow control If a handles generation does not match the gate entry, the VM traps.
2. Stack
3. Arithmetic and logic This prevents use-after-free bugs.
4. Variables
5. Functions
6. Heap and structures
7. Peripherals (syscalls)
8. System
--- ---
## 6. Instructions — VM Set 1 ## 5 Heap Model
### 6.1 Execution Control * All user objects live in the heap.
* Objects are fixed-layout blocks of slots.
* No inheritance at the memory level.
* Traits/interfaces are resolved by the compiler or via vtables.
| Instruction | Cycles | Description | Built-in types remain stack-only.
| ------------------- | ------ | ------------------------- |
| `NOP` | 1 | Does nothing | Heap objects include:
| `HALT` | 1 | Terminates execution |
| `JMP addr` | 2 | Unconditional jump | * user structs/classes
| `JMP_IF_FALSE addr` | 3 | Jumps if top is false | * strings
| `JMP_IF_TRUE addr` | 3 | Jumps if top is true | * arrays
* closures
--- ---
### 6.2 Stack ## 6 Tuples and Multi-Return ABI
| Instruction | Cycles | Description | The PVM supports **multi-value returns**.
| -------------- | ------ | ------------------- |
| `PUSH_CONST k` | 2 | Pushes constant | ### Tuple rules
| `POP` | 1 | Removes top |
| `DUP` | 1 | Duplicates top | * Tuples are **stack-only**.
| `SWAP` | 1 | Swaps two tops | * Maximum tuple arity is **N = 6**.
| `PUSH_I32 v` | 2 | Pushes 32-bit int | * Tuples are not heap objects by default.
| `PUSH_I64 v` | 2 | Pushes 64-bit int | * To persist a tuple, it must be explicitly boxed.
| `PUSH_F64 v` | 2 | Pushes 64-bit float |
| `PUSH_BOOL v` | 2 | Pushes boolean | ### Call convention
Each function declares a fixed `ret_slots` value.
At call time:
1. Caller prepares arguments.
2. `CALL` transfers control.
3. Callee executes.
4. `RET` leaves exactly `ret_slots` values on the stack.
The verifier ensures that:
* all control paths produce the same `ret_slots`
* stack depth is consistent.
--- ---
### 6.3 Arithmetic ## 7 Call Stack and Frames
| Instruction | Cycles | The VM uses a **call stack**.
| ----------- | ------ |
| `ADD` | 2 |
| `SUB` | 2 |
| `MUL` | 4 |
| `DIV` | 6 |
--- Each frame contains:
### 6.4 Comparison and Logic
| Instruction | Cycles |
| ----------- | ------ |
| `EQ` | 2 |
| `NEQ` | 2 |
| `LT` | 2 |
| `GT` | 2 |
| `LTE` | 2 |
| `GTE` | 2 |
| `AND` | 2 |
| `OR` | 2 |
| `NOT` | 1 |
| `BIT_AND` | 2 |
| `BIT_OR` | 2 |
| `BIT_XOR` | 2 |
| `SHL` | 2 |
| `SHR` | 2 |
| `NEG` | 1 |
---
### 6.5 Variables
| Instruction | Cycles | Description |
| -------------- | ------ | ---------------- |
| `GET_GLOBAL i` | 3 | Reads global |
| `SET_GLOBAL i` | 3 | Writes global |
| `GET_LOCAL i` | 2 | Reads local |
| `SET_LOCAL i` | 2 | Writes local |
---
### 6.6 Functions
| Instruction | Cycles | Description |
|----------------------| ------ |-----------------------------------------------|
| `CALL <u32 func_id>` | 5 | Saves PC and creates a new call frame |
| `RET` | 4 | Returns from function, restoring PC |
| `PUSH_SCOPE` | 3 | Creates a scope within the current function |
| `POP_SCOPE` | 3 | Removes current scope and its local variables |
**ABI Rules for Functions:**
* **`func_id`:** A 32-bit index into the **final FunctionTable**, assigned by the compiler linker at build time.
* **Mandatory Return Value:** Every function MUST leave exactly one value on the stack before `RET`. If the function logic doesn't return a value, it must push `null`.
* **Stack Cleanup:** `RET` automatically clears all local variables (based on `stack_base`) and re-pushes the return value.
---
### 6.7 Heap
| Instruction | Cycles | Description |
| --------------- | ------ | --------------- |
| `ALLOC size` | 10 | Allocates on heap |
| `LOAD_REF off` | 3 | Reads field |
| `STORE_REF off` | 3 | Writes field |
Heap is:
* finite
* monitored
* accounted for in the CAP
---
### 6.8 Peripherals (Syscalls)
| Instruction | Cycles | Description |
|--------------| -------- | --------------------- |
| `SYSCALL id` | variable | Call to hardware |
**ABI Rules for Syscalls:**
* **Argument Order:** Arguments must be pushed in the order they appear in the call (LIFO stack behavior).
* Example: `gfx.draw_rect(x, y, w, h, color)` means:
1. `PUSH x`
2. `PUSH y`
3. `PUSH w`
4. `PUSH h`
5. `PUSH color`
6. `SYSCALL 0x1002`
* **Consumption:** The native function MUST pop all its arguments from the stack.
* **Return Value:** If the syscall returns a value, it will be pushed onto the stack by the native implementation. If not, the stack state for the caller remains as it was before pushing arguments.
#### Implemented Syscalls (v0.1)
| ID | Name | Arguments (Stack) | Return |
| ---------- | ----------------- | ---------------------------- | ------- |
| `0x0001` | `system.has_cart` | - | `bool` |
| `0x0002` | `system.run_cart` | - | - |
| `0x1001` | `gfx.clear` | `color_idx` | - |
| `0x1002` | `gfx.draw_rect` | `x, y, w, h, color_idx` | - |
| `0x1003` | `gfx.draw_line` | `x1, y1, x2, y2, color_idx` | - |
| `0x1004` | `gfx.draw_circle` | `xc, yc, r, color_idx` | - |
| `0x1005` | `gfx.draw_disc` | `xc, yc, r, b_col, f_col` | - |
| `0x1006` | `gfx.draw_square` | `x, y, w, h, b_col, f_col` | - |
| `0x2001` | `input.get_pad` | `button_id` | `bool` |
| `0x3001` | `audio.play` | `s_id, v_id, vol, pan, pitch`| - |
**Button IDs:**
- `0`: Up, `1`: Down, `2`: Left, `3`: Right
- `4`: A, `5`: B, `6`: X, `7`: Y
- `8`: L, `9`: R
- `10`: Start, `11`: Select
---
## 7. Execution Errors
Errors are:
* explicit
* fatal
* never silent
Types:
* stack underflow
* invalid type
* invalid heap
* invalid frame
Generate:
* clear message
* state dump
* stack trace
---
## 8. Determinism
Guarantees:
* same input → same result
* same sequence → same cycles
* no speculative execution
* no invisible optimizations
> If you see the instruction, you pay for it.
---
## 9. Relationship with Languages
Java, TypeScript, Lua etc:
* are source languages
* compiled to this bytecode
* never executed directly
All run on the **same VM**.
---
## 10. Example
Source:
```java
x = 3 + 4;
```
Bytecode:
``` ```
PUSH_CONST 3 Frame {
PUSH_CONST 4 return_pc
ADD base_pointer
SET_GLOBAL 0 ret_slots
}
``` ```
Cost: Execution uses only the following call instructions:
``` | Opcode | Description |
2 + 2 + 2 + 3 = 9 cycles | ------ | ---------------------- |
``` | `CALL` | Calls a function by id |
| `RET` | Returns from function |
There is no separate `PUSH_FRAME` or `POP_FRAME` instruction in the public ISA.
--- ---
## 11. Execution per Tick ## 8 Closures and First-Class Functions
The VM does not run infinitely. Closures are heap objects and represent **function values**.
It executes: The PVM treats functions as **first-class values**. This means:
* until consuming the **logical frame** budget * Functions can be stored in variables.
* or until `HALT` * Functions can be passed as arguments.
* Functions can be returned from other functions.
* All function values are represented as closures.
The budget is defined by the PROMETEU logical hardware (e.g., `CYCLES_PER_FRAME`). Even functions without captures are represented as closures with an empty capture set.
Example: ### Closure layout
``` ```
vm.step_budget(10_000) Closure {
func_id
captures[]
}
``` ```
This feeds: Captures are stored as handles or value copies.
* CAP All closure environments are part of the GC root set.
* profiling
* certification ### Direct and indirect calls
The PVM supports two forms of function invocation:
| Opcode | Description |
| -------------- | -------------------------------------- |
| `CALL` | Direct call by function id |
| `CALL_CLOSURE` | Indirect call through a closure handle |
For `CALL_CLOSURE`:
1. The closure handle is read from the stack.
2. The VM extracts the `func_id` from the closure.
3. The function is invoked using the closures captures as its environment.
The verifier ensures that:
* The closure handle is valid.
* The target functions arity matches the call site.
* The `ret_slots` contract is respected.
--- ---
## 12. Logical Frame and `FRAME_SYNC` ## 9 Coroutines (Deterministic)
PROMETEU defines **logical frame** as the minimum unit of consistent game update. The PVM supports **cooperative coroutines**.
* **Input is latched per logical frame** (does not change until the logical frame is completed). Characteristics:
* The **cycle budget** is applied to the logical frame.
* A new logical frame only starts when the current frame ends.
### 12.1 System Instruction: `FRAME_SYNC` * Coroutines are scheduled deterministically.
* No preemption.
* No parallel execution.
* All scheduling happens at safepoints.
The `FRAME_SYNC` instruction marks the **end of the logical frame**. Each coroutine contains:
| Instruction | Cycles | Description | ```
| ------------ | ------ | ---------------------------------- | Coroutine {
| `FRAME_SYNC` | 1 | Finalizes the current logical frame | call_stack
operand_stack
state
}
```
Properties: ### Coroutine instructions
* `FRAME_SYNC` is a **system instruction**. | Opcode | Description |
* It should not be exposed as a "manual" API to the user. | ------- | -------------------------- |
* Tooling/compiler can **inject** `FRAME_SYNC` automatically at the end of the main loop. | `SPAWN` | Creates a coroutine |
| `YIELD` | Suspends current coroutine |
| `SLEEP` | Suspends for N frames |
### 12.2 Semantics (what happens when it executes) Scheduling is:
When executing `FRAME_SYNC`, the core: * round-robin
* deterministic
* bounded by frame budget
1. **Finalizes** the current logical frame. Coroutine stacks are part of the GC root set.
2. **Presents** the frame (`gfx.present()` or `gfx.compose_and_present()` depending on the GFX model).
3. **Releases** the input latch.
4. **Resets** the budget for the next logical frame.
### 12.3 Overbudget (when the frame doesn't finish on time)
If the logical frame budget runs out **before** the VM reaches `FRAME_SYNC`:
* the VM **pauses** (PC and stacks remain at the exact point)
* there is **no present**
* the input latch is **maintained**
* on the next host tick, the VM **continues** from where it left off, still in the same logical frame
Practical effect:
* if the code needs 2 budgets to reach `FRAME_SYNC`, the game updates at ~30 FPS (logical frame takes 2 ticks)
* this is deterministic and reportable in the CAP
--- ---
## 15. Extensibility ## 10 Garbage Collection
The Instruction Set is versioned. The PVM uses a **mark-sweep collector**.
Future: ### GC rules
* DMA * GC runs only at **safepoints**.
* streaming * The primary safepoint is `FRAME_SYNC`.
* vectors * GC is triggered by:
* fictitious coprocessors
No existing instruction changes its meaning. * heap thresholds, or
* allocation pressure.
### Root set
The GC marks from:
* operand stack
* call stack frames
* global variables
* coroutine stacks
* closure environments
* host-held handles
The collector:
* does not compact memory (v1)
* uses free lists for reuse
--- ---
## 16. Summary ## 11 Event and Interrupt Model
The PVM does not allow asynchronous callbacks.
All events are:
* queued by the firmware
* delivered at `FRAME_SYNC`
This ensures:
* stack-based VM
* explicit cost
* deterministic execution * deterministic execution
* integrated with CAP * predictable frame timing
* foundation of every PROMETEU cartridge
Coroutines are the only supported concurrency mechanism.
---
## 12 Host Interface (Syscalls)
All hardware access occurs through syscalls.
Syscalls are:
* synchronous
* deterministic
* capability-checked
They operate on the following subsystems:
### Graphics
* tilebanks
* layers
* sprites
* palette control
* fade registers
* frame present
### Audio
* voice allocation
* play/stop
* volume/pan/pitch
* steal policy
### Input
* sampled once per frame
* exposed as frame state
### Assets
Asset banks are **host-owned memory**.
The VM interacts through handles:
| Syscall | Description |
| -------------- | ---------------------------- |
| `asset.load` | Request asset load into slot |
| `asset.status` | Query load state |
| `asset.commit` | Activate loaded asset |
Asset memory:
* is not part of the VM heap
* is not scanned by GC
### Save Memory (MEMCARD)
| Syscall | Description |
| --------------- | --------------- |
| `mem.read_all` | Read save data |
| `mem.write_all` | Write save data |
| `mem.commit` | Persist save |
| `mem.size` | Query capacity |
---
## 13 Verifier Requirements
Before execution, bytecode must pass the verifier.
The verifier ensures:
1. Valid jump targets
2. Stack depth consistency
3. Correct `ret_slots` across all paths
4. Handle safety rules
5. Closure call safety
6. No invalid opcode sequences
Invalid bytecode is rejected.
---
## 14 Summary
The Prometeu VM is:
* stack-based
* deterministic
* frame-synchronized
* handle-based for heap access
* multi-return capable
* first-class function capable
* coroutine-driven for concurrency
This design balances:
* ease of compilation
* predictable performance
* safety and debuggability
* suitability for real-time 2D games.
< [Back](chapter-1.md) | [Summary](table-of-contents.md) | [Next](chapter-3.md) > < [Back](chapter-1.md) | [Summary](table-of-contents.md) | [Next](chapter-3.md) >

View File

@ -1,309 +1,353 @@
< [Back](chapter-2.md) | [Summary](table-of-contents.md) | [Next](chapter-4.md) > < [Back](chapter-2.md) | [Summary](table-of-contents.md) | [Next](chapter-4.md) >
# 🧠 **Memory: Stack, Heap, and Allocation** # 🧠 **Memory Model**
## 1. Overview This chapter defines the memory architecture of the Prometeu Virtual Machine (PVM). It describes the stack, heap, handles, object layout, garbage collection, and interaction with host-owned memory such as asset banks.
PROMETEU treats **memory as an explicit resource**. The memory model is designed to be:
Nothing is allocated "for convenience". * deterministic
* safe
Nothing grows "automatically". * simple to verify
* suitable for real-time 2D games
Nothing is invisible.
This chapter defines:
- the **memory spaces** of the PROMETEU VM
- how **Stack** and **Heap** work
- the cost and consequences of **dynamic allocation**
- how memory relates to **CAP and certification**
--- ---
## 2. PROMETEU VM Memory Spaces ## 1 Overview
The PROMETEU VM memory is divided into regions with clear responsibilities: The PVM uses a **split memory model**:
``` 1. **Stack memory**
+---------------------------+
|Constant Pool |
+---------------------------+
|Global Space |
+---------------------------+
|Call Stack |
| (Frames + Locals) |
+---------------------------+
| Operand Stack |
+---------------------------+
| Heap |
+---------------------------+
``` * used for temporary values
* function arguments
* multi-return tuples
2. **Heap memory**
Each region: * used for all user-defined objects
* accessed only through handles
3. **Host-owned memory**
- has its own semantics * asset banks
- has defined limits * audio buffers
- has a direct impact on execution cost * framebuffers
* not part of the VM heap
--- ---
## 3. Operand Stack ## 2 Stack Memory
### 3.1 Definition The stack is used for:
The **Operand Stack** is used for: * primitive values
* built-in value types
* temporary results
* function arguments
* tuple returns
- passing operands between instructions ### Stack value types
- intermediate results
- expression evaluation
It is: | Type | Description |
| -------- | ------------------------ |
| `int` | 64-bit integer |
| `bool` | Boolean |
| `float` | 64-bit float |
| `vec2` | 2D vector |
| `color` | Packed color |
| `pixel` | Position + color |
| `handle` | Reference to heap object |
- **LIFO** All stack values are:
- automatically growing within the frame
- **reset** between frames (except explicitly persisted values) * fixed-size
* copied by value
* never directly reference raw memory
### Stack properties
* Stack is bounded and verified.
* Stack depth must be consistent across all control paths.
* Stack never stores raw pointers.
--- ---
### 3.2 Characteristics ## 3 Tuples (Stack-Only Aggregates)
- does not store complex structures Tuples are used for multi-value returns.
- stores primitive values or references
- overflow or underflow are **fatal errors**
> The operand stack is cheap, fast, and temporary. ### Tuple rules
>
--- * Tuples exist only on the stack.
* Maximum tuple arity: **6 slots**.
* Tuples are not heap objects by default.
* To persist a tuple, it must be explicitly boxed into a heap object.
## 4. Call Stack ### Example
### 4.1 Execution Frames Function returning two values:
Each function call creates a **Call Frame**, containing:
- local variables
- parameters
- return address
- execution context
Frames are created with:
``` ```
PUSH_FRAME n fn position(): (int, int)
``` ```
And destroyed with: At runtime:
``` ```
POP_FRAME stack top → [x, y]
``` ```
--- ---
### 4.2 Costs and Limits ## 4 Heap Memory
- frame creation has an explicit cost All user-defined objects live in the heap.
- maximum stack depth is limited
- deep recursion is discouraged
PROMETEU favors: ### Heap characteristics
- iteration * Linear slot-based storage.
- explicit state * Objects are fixed-layout blocks.
- conscious depth control * No raw pointer access.
* No inheritance at memory level.
Heap objects include:
* user structs/classes
* arrays
* strings
* closures
* boxed tuples (optional)
--- ---
## 5. Global Space ## 5 Handles and Gate Table
### 5.1 Definition All heap objects are accessed via **handles**.
The **Global Space** stores: A handle is defined as:
- global variables
- persistent game state
- long-term references
Globals:
- survive between frames
- occupy memory permanently
- count towards total memory usage
---
### 5.2 Conscious usage
PROMETEU encourages:
- few globals
- compact structures
- explicit initialization
Globals are equivalent to **static RAM** in microcontrollers.
---
## 6. Heap
### 6.1 Definition
The **Heap** is the dynamic memory region of the PROMETEU VM.
It is used for:
- objects
- arrays
- tables
- composite structures
Every allocation on the heap is done explicitly with:
``` ```
ALLOC size handle = { index, generation }
``` ```
--- The VM maintains a **gate table**:
### 6.2 Allocation Costs
Allocating memory:
- consumes **cycles**
- consumes **available heap**
- increases pressure on the system
PROMETEU treats allocation as an **expensive operation by definition**.
> Allocation is an architectural decision, not a detail.
>
---
## 7. Heap Limits
### 7.1 Finite Heap
The heap:
- has a defined maximum size
- can vary according to the active CAP
- never grows dynamically
Example:
``` ```
Heap Limit:32KB GateEntry {
Heap Used:28KB alive: bool
generation: u32
base: usize
slots: u32
type_id: u32
}
``` ```
Exceeding the limit: ### Handle safety
- does not crash the game When an object is freed:
- generates an execution error
- appears in the certification report * `alive` becomes false
* `generation` is incremented
When a handle is used:
* index must exist
* generation must match
Otherwise, the VM traps.
This prevents:
* use-after-free
* stale references
--- ---
### 7.2 Heap and CAP ## 6 Object Layout
During a JAM or evaluation: Heap objects have a simple, fixed layout:
- heap peak is measured ```
- the value is compared to the CAP limit Object {
- non-compliances are recorded type_id
field_0
field_1
...
}
```
The game **continues running**, but the evidence is recorded. Properties:
* Fields are stored in slot order.
* No hidden base classes.
* No pointer arithmetic.
Traits and method dispatch are resolved:
* statically by the compiler, or
* via vtable handles (if dynamic dispatch is used).
--- ---
## 8. Garbage Collection (GC) ## 7 Closures
### 8.1 Existence of GC Closures are heap objects.
PROMETEU may use **simple Garbage Collection**, with the following properties: Layout:
- non-incremental (v0.1) ```
- explicit cost Closure {
- observable pauses func_id
- documented behavior capture_count
captures[]
}
```
GC **is not invisible**. Captures may be:
* copied values
* handles to heap objects
Closure environments are part of the GC root set.
--- ---
### 8.2 GC Cost ## 8 Coroutine Memory
When GC occurs: Each coroutine owns its own stacks:
- cycles are consumed ```
- the frame may be impacted Coroutine {
- the event is recorded call_stack
operand_stack
state
}
```
PROMETEU teaches: All coroutine stacks are included in the GC root set.
> "Creating garbage has a cost." Coroutines do not share stacks or frames.
>
--- ---
## 9. Memory Best Practices ## 9 Garbage Collection
PROMETEU explicitly encourages: The PVM uses a **mark-sweep collector**.
- structure reuse ### GC properties
- allocation outside the main loop
- persistent buffers
- pooling manual
And discourages: * Non-moving (no compaction in v1).
* Runs only at **safepoints**.
* Primary safepoint: `FRAME_SYNC`.
- per-frame allocation ### GC triggers
- disposable temporary structures
- unplanned growth GC may run when:
* heap usage exceeds threshold
* allocation pressure is high
### Root set
The collector marks from:
* operand stack
* call stack frames
* global variables
* coroutine stacks
* closure environments
* host-held handles
--- ---
## 10. Relationship with Microcontrollers ## 10 Allocation and Deallocation
The PROMETEU memory model is intentionally similar to real MCUs: ### Allocation
| MCU | PROMETEU | Heap allocation:
| --- | --- |
| Static RAM | Global Space |
| Stack | Call Stack |
| Heap | Heap |
| Allocation failure | Explicit error |
| Fragmentation | Dev's responsibility |
This creates a direct transfer of learning. 1. VM reserves a slot block.
2. A gate entry is created.
3. A handle is returned.
If allocation fails:
* VM may trigger GC.
* If still failing, a trap occurs.
### Deallocation
Objects are freed only by the GC.
When freed:
* gate is marked dead
* generation is incremented
* memory becomes available via free list
--- ---
## 11. Pedagogical Implications ## 11 Host-Owned Memory (Asset Banks)
This model allows teaching: Asset memory is **not part of the VM heap**.
- the difference between stack and heap It is managed by the firmware.
- allocation cost
- data lifetime
- architectural impact of simple decisions
- the relationship between memory and time
Everything with **immediate and visible feedback**. Examples:
* tilebanks
* audio sample banks
* sprite sheets
### Properties
* VM cannot access asset memory directly.
* Access occurs only through syscalls.
* Asset memory is not scanned by GC.
--- ---
## 12. Summary ## 12 Save Memory (MEMCARD)
- PROMETEU has well-defined memory spaces Save memory is a host-managed persistent storage area.
- stack is temporary and cheap
- heap is finite and expensive Properties:
- allocation has an explicit cost
- GC is visible and measurable * fixed size
- memory participates in certification * accessed only via syscalls
* not part of the VM heap
* not scanned by GC
---
## 13 Memory Safety Rules
The VM enforces:
1. All heap access via handles.
2. Generation checks on every handle use.
3. Bounds checking on object fields.
4. No raw pointer arithmetic.
5. Verified stack discipline.
Any violation results in a trap.
---
## 14 Summary
The PVM memory model is based on:
* stack-only primitive and tuple values
* heap-only user objects
* generation-based handles
* deterministic GC at frame safepoints
* strict separation between VM heap and host memory
This design ensures:
* predictable performance
* memory safety
* simple verification
* suitability for real-time game workloads.
< [Back](chapter-2.md) | [Summary](table-of-contents.md) | [Next](chapter-4.md) > < [Back](chapter-2.md) | [Summary](table-of-contents.md) | [Next](chapter-4.md) >

View File

@ -1,289 +1,308 @@
< [Back](chapter-8.md) | [Summary](table-of-contents.md) | [Next](chapter-10.md) > < [Back](chapter-8.md) | [Summary](table-of-contents.md) | [Next](chapter-10.md) >
# ⚡ **Events and Interrupts** # ⚡ **Events and Scheduling**
## 1. Overview This chapter defines how the Prometeu Virtual Machine (PVM) handles events, frame synchronization, and cooperative concurrency. It replaces the older interrupt-oriented terminology with a simpler and more accurate model based on **frame boundaries**, **event queues**, and **coroutines**.
PROMETEU clearly distinguishes between **normal execution**, **events**, and **interrupts**. The goal is to preserve determinism while still allowing responsive and structured game logic.
Nothing occurs "out of time".
Nothing interrupts the system without cost.
Nothing happens without a well-defined point in the execution cycle.
> Events are signals.
> Interrupts are machine decisions.
>
This chapter defines:
- what PROMETEU considers an event
- how interrupts are modeled
- when they can occur
- how they relate to cycles, CAP, and determinism
--- ---
## 2. Event Philosophy in PROMETEU ## 1 Core Philosophy
PROMETEU **does not use invisible asynchronous callbacks**. Prometeu does **not use asynchronous callbacks** or preemptive interrupts for user code.
Every event: All external signals are:
- is registered * queued by the firmware
- is delivered at a predictable moment * delivered at deterministic points
- is handled within the main loop * processed inside the main execution loop
This model avoids: Nothing executes “out of time”.
Nothing interrupts the program in the middle of an instruction.
Nothing occurs without a known cost.
- implicit concurrency This guarantees:
- hidden race conditions
- non-deterministic side effects
PROMETEU favors: * deterministic behavior
* reproducible runs
> explicit control over reactivity. * stable frame timing
>
--- ---
## 3. Events ## 2 Events
### 3.1 Definition ### 2.1 Definition
An **event** in PROMETEU is a **logical signal** generated by the system or the program, indicating that something has occurred. An **event** is a logical signal generated by the system or by internal runtime mechanisms.
Examples of events:
- end of frame
- timer expired
- system state change
- execution error
Events **do not execute code automatically**.
They only **inform**.
---
### 3.2 Event Queue
PROMETEU maintains an **event queue**:
- events are queued
- the queue is processed in order
- processing occurs at defined points in the frame
Events: Events:
- do not interrupt execution arbitrarily * represent something that has occurred
- do not execute outside the loop * do not execute code automatically
* are processed explicitly during the frame
Examples:
* end of frame
* timer expiration
* asset load completion
* system state change
* execution error
--- ---
## 4. Interrupts ### 2.2 Event Queue
### 4.1 Definition The firmware maintains an **event queue**.
An **interrupt** is a special event, treated by the system as **priority**, which can: Properties:
- change the normal flow of execution * events are queued in order
- execute specific system code * events are processed at frame boundaries
- impact cycles and budget * event processing is deterministic
Interrupts are **rare and explicit**. Events never:
* execute user code automatically
* interrupt instructions
* run outside the main loop
--- ---
### 4.2 What is NOT an interrupt ## 3 Frame Boundary (Sync Phase)
In PROMETEU, the following are **not interrupts**: The primary synchronization point in Prometeu is the **frame boundary**, reached by the `FRAME_SYNC` instruction.
- button input At this point:
- collisions
- common timers
- game logic
These are treated as **state or normal events**. 1. Input state is sampled.
2. Events are delivered.
3. Coroutine scheduling occurs.
4. Optional garbage collection may run.
5. Control returns to the firmware.
This replaces the older notion of a "VBlank interrupt".
The frame boundary:
* has a fixed, measurable cost
* does not execute arbitrary user code
* is fully deterministic
--- ---
## 5. Types of Interrupts in PROMETEU ## 4 System Events vs System Faults
PROMETEU defines a small and well-controlled set of interrupts. Prometeu distinguishes between normal events and system faults.
### 5.1 Frame Interrupt (Conceptual VBlank) ### 4.1 Normal events
The end of each frame generates a **logical synchronization interrupt**, responsible for: Examples:
- framebuffer swap * timer expired
- audio commit * asset loaded
- state synchronization * frame boundary
This interrupt: These are delivered through the event queue.
- occurs at SYNC ### 4.2 System faults
- has a fixed cost
- does not execute user code
---
### 5.2 System Interrupt
Generated by exceptional conditions: Generated by exceptional conditions:
- fatal VM error * invalid instruction
- memory violation * memory violation
- invalid instruction * handle misuse
* verifier failure
Result: Result:
- execution is halted * VM execution stops
- VM state is preserved * state is preserved
- detailed report is generated * a diagnostic report is generated
System faults are not recoverable events.
--- ---
### 5.3 Timed Interrupts (Timers) ## 5 Timers
PROMETEU can offer **system timers**, modeled as: Timers are modeled as **frame-based counters**.
- counters based on frames Properties:
- signals generated upon reaching zero
* measured in frames, not real time
* deterministic across runs
* generate events when they expire
Timers: Timers:
- do not trigger code automatically * do not execute code automatically
- generate queryable events * produce queryable or queued events
Conceptual example: Example:
``` ```
if (timer.expired(T1)) { if timer.expired(t1) {
// handle event // handle event
} }
``` ```
--- ---
## 6. Relationship between Events, Interrupts, and the Loop ## 6 Coroutines and Cooperative Scheduling
The complete flow can be represented as follows: Prometeu provides **coroutines** as the only form of concurrency.
``` Coroutines are:
FRAME N
──────────────
SAMPLEINPUT
PROCESS EVENTS
UPDATE
DRAW
AUDIO
INTERRUPT: VBLANK
SYNC
──────────────
```
Important: * cooperative
* deterministic
* scheduled only at safe points
- events are processed before main logic There is:
- interrupts occur only at safe points
- no interrupt occurs "in the middle" of an instruction * no preemption
* no parallel execution
* no hidden threads
### 6.1 Coroutine lifecycle
Each coroutine can be in one of the following states:
* `Ready`
* `Running`
* `Sleeping`
* `Finished`
### 6.2 Scheduling
At each frame boundary:
1. The scheduler selects the next coroutine.
2. Coroutines run in a deterministic order.
3. Each coroutine executes within the frame budget.
The default scheduling policy is:
* round-robin
* deterministic
--- ---
## 7. Costs and Budget ### 6.3 Coroutine operations
Events and interrupts: Typical coroutine instructions:
- consume cycles | Operation | Description |
- participate in the CAP | --------- | ----------------------------- |
- appear in certification | `spawn` | Create a coroutine |
| `yield` | Voluntarily suspend execution |
| `sleep` | Suspend for N frames |
Example report: `yield` and `sleep` only take effect at safe points.
---
## 7 Relationship Between Events, Coroutines, and the Frame Loop
The high-level frame structure is:
```
FRAME N
------------------------
Sample Input
Deliver Events
Schedule Coroutines
Run VM until:
- budget exhausted, or
- FRAME_SYNC reached
Sync Phase
------------------------
```
Important properties:
* events are processed at known points
* coroutine scheduling is deterministic
* no execution occurs outside the frame loop
---
## 8 Costs and Budget
All event processing and scheduling:
* consumes cycles
* contributes to the CAP (certification and analysis profile)
* appears in profiling reports
Example:
``` ```
Frame 18231: Frame 18231:
Event processing: 120 cycles Event processing: 120 cycles
VBlank interrupt:80cycles Coroutine scheduling: 40 cycles
Frame sync: 80 cycles
``` ```
Nothing is free. Nothing is free.
--- ---
## 8. Determinism and Reproducibility ## 9 Determinism and Reproducibility
PROMETEU guarantees: Prometeu guarantees:
- same sequence of events → same behavior * same sequence of inputs and events → same behavior
- interrupts always at the same point in the frame * frame-based timers
- timers based on frame count, not real time * deterministic coroutine scheduling
This allows: This allows:
- reliable replays * reliable replays
- precise debugging * precise debugging
- fair certification * fair certification
--- ---
## 9. Best Practices ## 10 Best Practices
PROMETEU encourages: Prometeu encourages:
- treating events as data * treating events as data
- querying events explicitly * querying events explicitly
- avoiding heavy logic in handlers * structuring logic around frame steps
- using timers instead of excessive polling * using coroutines for asynchronous flows
PROMETEU discourages: Prometeu discourages:
- simulating asynchronous callbacks * simulating asynchronous callbacks
- depending on implicit order * relying on hidden timing
- using events as a "shortcut" for complex logic * using events as implicit control flow
--- ---
## 10. Relationship with Microcontrollers ## 11 Conceptual Comparison
The model reflects real MCUs: | Traditional System | Prometeu |
| ------------------ | ----------------------- |
| Hardware interrupt | Frame boundary event |
| ISR | System sync phase |
| Main loop | VM frame loop |
| Timer interrupt | Frame-based timer event |
| Threads | Coroutines |
| MCU | PROMETEU | Prometeu teaches reactive system concepts without the unpredictability of real interrupts.
| --- | --- |
| ISR | Explicit interrupt |
| Main Loop | PROMETEU Loop |
| Flags | Events |
| Timers | Per-frame timers |
But without:
- real concurrency
- unpredictable interrupts
PROMETEU **teaches the concept**, not the chaos.
--- ---
## 11. Pedagogical Implications ## 12 Summary
This model allows teaching: * Events inform; they do not execute code.
* The frame boundary is the only global synchronization point.
- the difference between event and interrupt * System faults stop execution.
- safe synchronization * Coroutines provide cooperative concurrency.
- flow control in reactive systems * All behavior is deterministic and measurable.
- the impact of temporal decisions
Everything with **order, clarity, and measurement**.
---
## 12. Summary
- events inform, they do not execute
- interrupts are rare and controlled
- no execution occurs outside the loop
- costs are explicit
- behavior is deterministic
< [Back](chapter-8.md) | [Summary](table-of-contents.md) | [Next](chapter-10.md) > < [Back](chapter-8.md) | [Summary](table-of-contents.md) | [Next](chapter-10.md) >

View File

@ -15,6 +15,7 @@
- [Chapter 13: Cartridge](chapter-13.md) - [Chapter 13: Cartridge](chapter-13.md)
- [Chapter 14: Boot Profiles](chapter-14.md) - [Chapter 14: Boot Profiles](chapter-14.md)
- [Chapter 15: Asset Management](chapter-15.md) - [Chapter 15: Asset Management](chapter-15.md)
- [Chapter 16: Host ABI and Syscalls](chapter-16.md)
--- ---
[Back to README](../README.md) [Back to README](../README.md)

View File

@ -1,19 +0,0 @@
{
"comments": {
"lineComment": "//",
"blockComment": ["/*", "*/"]
},
"brackets": [
["{", "}"],
["[", "]"],
["[[", "]]"],
["(", ")"]
],
"autoClosingPairs": [
{ "open": "{", "close": "}" },
{ "open": "[", "close": "]" },
{ "open": "[[", "close": "]]" },
{ "open": "(", "close": ")" },
{ "open": "\"", "close": "\"" }
]
}

View File

@ -1,34 +0,0 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.activate = activate;
exports.deactivate = deactivate;
const vscode = require("vscode");
const node_1 = require("vscode-languageclient/node");
let client;
function activate(context) {
const cfg = vscode.workspace.getConfiguration("prometeuPbs");
const serverPath = cfg.get("serverPath");
if (!serverPath) {
vscode.window.showErrorMessage("Prometeu PBS: configure 'prometeuPbs.serverPath' com o caminho do bin prometeu-lsp.");
return;
}
const serverOptions = {
command: serverPath,
args: []
};
const clientOptions = {
documentSelector: [{ scheme: "file", language: "pbs" }]
};
client = new node_1.LanguageClient("prometeuPbsLsp", "Prometeu PBS LSP", serverOptions, clientOptions);
// ✅ O client é “parável” no deactivate, não o start() Promise
context.subscriptions.push({
dispose: () => {
void client?.stop();
}
});
void client.start();
}
function deactivate() {
return client?.stop();
}
//# sourceMappingURL=extension.js.map

View File

@ -1 +0,0 @@
{"version":3,"file":"extension.js","sourceRoot":"","sources":["../src/extension.ts"],"names":[],"mappings":";;AAKA,4BA8BC;AAED,gCAEC;AAvCD,iCAAiC;AACjC,qDAAkG;AAElG,IAAI,MAAkC,CAAC;AAEvC,SAAgB,QAAQ,CAAC,OAAgC;IACrD,MAAM,GAAG,GAAG,MAAM,CAAC,SAAS,CAAC,gBAAgB,CAAC,aAAa,CAAC,CAAC;IAC7D,MAAM,UAAU,GAAG,GAAG,CAAC,GAAG,CAAS,YAAY,CAAC,CAAC;IAEjD,IAAI,CAAC,UAAU,EAAE,CAAC;QACd,MAAM,CAAC,MAAM,CAAC,gBAAgB,CAC1B,qFAAqF,CACxF,CAAC;QACF,OAAO;IACX,CAAC;IAED,MAAM,aAAa,GAAkB;QACjC,OAAO,EAAE,UAAU;QACnB,IAAI,EAAE,EAAE;KACX,CAAC;IAEF,MAAM,aAAa,GAA0B;QACzC,gBAAgB,EAAE,CAAC,EAAE,MAAM,EAAE,MAAM,EAAE,QAAQ,EAAE,KAAK,EAAE,CAAC;KAC1D,CAAC;IAEF,MAAM,GAAG,IAAI,qBAAc,CAAC,gBAAgB,EAAE,kBAAkB,EAAE,aAAa,EAAE,aAAa,CAAC,CAAC;IAEhG,8DAA8D;IAC9D,OAAO,CAAC,aAAa,CAAC,IAAI,CAAC;QACvB,OAAO,EAAE,GAAG,EAAE;YACV,KAAK,MAAM,EAAE,IAAI,EAAE,CAAC;QACxB,CAAC;KACJ,CAAC,CAAC;IAEH,KAAK,MAAM,CAAC,KAAK,EAAE,CAAC;AACxB,CAAC;AAED,SAAgB,UAAU;IACtB,OAAO,MAAM,EAAE,IAAI,EAAE,CAAC;AAC1B,CAAC"}

File diff suppressed because it is too large Load Diff

View File

@ -1,50 +0,0 @@
{
"name": "prometeu-pbs",
"displayName": "Prometeu PBS",
"version": "0.0.1",
"publisher": "local",
"engines": {
"vscode": "^1.85.0"
},
"main": "./out/extension.js",
"contributes": {
"languages": [
{
"id": "pbs",
"aliases": [
"PBS",
"Prometeu Base Script"
],
"extensions": [
".pbs"
],
"configuration": "./language-configuration.json"
}
],
"configuration": {
"type": "object",
"title": "Prometeu PBS",
"properties": {
"prometeuPbs.serverPath": {
"type": "string",
"default": "",
"description": "/Users/niltonconstantino/personal/workspace.personal/intrepid/prometeu/runtime/target/debug/prometeu-lsp --stdio"
}
}
}
},
"dependencies": {
"vscode-languageclient": "^9.0.1"
},
"devDependencies": {
"@types/node": "^20.0.0",
"@types/vscode": "^1.85.0",
"generator-code": "^1.11.17",
"typescript": "^5.0.0",
"yo": "^6.0.0"
},
"scripts": {
"compile": "tsc -p .",
"watch": "tsc -watch -p ."
}
}

View File

@ -1,3 +0,0 @@
{
"prometeuPbs.serverPath": "/Users/niltonconstantino/personal/workspace.personal/intrepid/prometeu/runtime/target/debug/prometeu-lsp"
}

View File

@ -1,40 +0,0 @@
import * as vscode from "vscode";
import { LanguageClient, LanguageClientOptions, ServerOptions } from "vscode-languageclient/node";
let client: LanguageClient | undefined;
export function activate(context: vscode.ExtensionContext) {
const cfg = vscode.workspace.getConfiguration("prometeuPbs");
const serverPath = cfg.get<string>("serverPath");
if (!serverPath) {
vscode.window.showErrorMessage(
"Prometeu PBS: configure 'prometeuPbs.serverPath' com o caminho do bin prometeu-lsp."
);
return;
}
const serverOptions: ServerOptions = {
command: serverPath,
args: []
};
const clientOptions: LanguageClientOptions = {
documentSelector: [{ scheme: "file", language: "pbs" }]
};
client = new LanguageClient("prometeuPbsLsp", "Prometeu PBS LSP", serverOptions, clientOptions);
// ✅ O client é “parável” no deactivate, não o start() Promise
context.subscriptions.push({
dispose: () => {
void client?.stop();
}
});
void client.start();
}
export function deactivate(): Thenable<void> | undefined {
return client?.stop();
}

View File

@ -1,12 +0,0 @@
{
"compilerOptions": {
"module": "commonjs",
"target": "ES2020",
"outDir": "out",
"rootDir": "src",
"lib": ["ES2020"],
"sourceMap": true,
"strict": true
},
"exclude": ["node_modules", ".vscode-test"]
}