add jenkins pipeline
Some checks failed
Intrepid/Prometeu/Runtime/pipeline/pr-master Build started...
Intrepid/Prometeu/Runtime/pipeline/head This commit looks good
Test / Build occurred while executing withChecks step.

This commit is contained in:
bQUARKz 2026-04-08 08:32:00 +01:00
parent 214a189a5f
commit 1f13d1a306
Signed by: bquarkz
SSH Key Fingerprint: SHA256:Z7dgqoglWwoK6j6u4QC87OveEq74WOhFN+gitsxtkf8
18 changed files with 523 additions and 361 deletions

View File

@ -1,29 +0,0 @@
name: CI
on:
push:
branches: [ main, master ]
pull_request:
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt, clippy
- uses: Swatinem/rust-cache@v2
- name: Format check
run: cargo fmt -- --check
- name: Clippy
run: cargo clippy --workspace --all-features
- name: Test
run: cargo test --workspace --all-targets --all-features --no-fail-fast

View File

@ -1,296 +0,0 @@
# This file was autogenerated by dist: https://axodotdev.github.io/cargo-dist
#
# Copyright 2022-2024, axodotdev
# SPDX-License-Identifier: MIT or Apache-2.0
#
# CI that:
#
# * checks for a Git Tag that looks like a release
# * builds artifacts with dist (archives, installers, hashes)
# * uploads those artifacts to temporary workflow zip
# * on success, uploads the artifacts to a GitHub Release
#
# Note that the GitHub Release will be created with a generated
# title/body based on your changelogs.
name: Release
permissions:
"contents": "write"
# This task will run whenever you push a git tag that looks like a version
# like "1.0.0", "v0.1.0-prerelease.1", "my-app/0.1.0", "releases/v1.0.0", etc.
# Various formats will be parsed into a VERSION and an optional PACKAGE_NAME, where
# PACKAGE_NAME must be the name of a Cargo package in your workspace, and VERSION
# must be a Cargo-style SemVer Version (must have at least major.minor.patch).
#
# If PACKAGE_NAME is specified, then the announcement will be for that
# package (erroring out if it doesn't have the given version or isn't dist-able).
#
# If PACKAGE_NAME isn't specified, then the announcement will be for all
# (dist-able) packages in the workspace with that version (this mode is
# intended for workspaces with only one dist-able package, or with all dist-able
# packages versioned/released in lockstep).
#
# If you push multiple tags at once, separate instances of this workflow will
# spin up, creating an independent announcement for each one. However, GitHub
# will hard limit this to 3 tags per commit, as it will assume more tags is a
# mistake.
#
# If there's a prerelease-style suffix to the version, then the release(s)
# will be marked as a prerelease.
on:
pull_request:
push:
tags:
- '**[0-9]+.[0-9]+.[0-9]+*'
jobs:
# Run 'dist plan' (or host) to determine what tasks we need to do
plan:
runs-on: "ubuntu-22.04"
outputs:
val: ${{ steps.plan.outputs.manifest }}
tag: ${{ !github.event.pull_request && github.ref_name || '' }}
tag-flag: ${{ !github.event.pull_request && format('--tag={0}', github.ref_name) || '' }}
publishing: ${{ !github.event.pull_request }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
- name: Install dist
# we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.30.3/cargo-dist-installer.sh | sh"
- name: Cache dist
uses: actions/upload-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/dist
# sure would be cool if github gave us proper conditionals...
# so here's a doubly-nested ternary-via-truthiness to try to provide the best possible
# functionality based on whether this is a pull_request, and whether it's from a fork.
# (PRs run on the *source* but secrets are usually on the *target* -- that's *good*
# but also really annoying to build CI around when it needs secrets to work right.)
- id: plan
run: |
dist ${{ (!github.event.pull_request && format('host --steps=create --tag={0}', github.ref_name)) || 'plan' }} --output-format=json > plan-dist-manifest.json
echo "dist ran successfully"
cat plan-dist-manifest.json
echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
name: artifacts-plan-dist-manifest
path: plan-dist-manifest.json
# Build and packages all the platform-specific things
build-local-artifacts:
name: build-local-artifacts (${{ join(matrix.targets, ', ') }})
# Let the initial task tell us to not run (currently very blunt)
needs:
- plan
if: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix.include != null && (needs.plan.outputs.publishing == 'true' || fromJson(needs.plan.outputs.val).ci.github.pr_run_mode == 'upload') }}
strategy:
fail-fast: false
# Target platforms/runners are computed by dist in create-release.
# Each member of the matrix has the following arguments:
#
# - runner: the github runner
# - dist-args: cli flags to pass to dist
# - install-dist: expression to run to install dist on the runner
#
# Typically there will be:
# - 1 "global" task that builds universal installers
# - N "local" tasks that build each platform's binaries and platform-specific installers
matrix: ${{ fromJson(needs.plan.outputs.val).ci.github.artifacts_matrix }}
runs-on: ${{ matrix.runner }}
container: ${{ matrix.container && matrix.container.image || null }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/${{ join(matrix.targets, '-') }}-dist-manifest.json
steps:
- name: enable windows longpaths
run: |
git config --global core.longpaths true
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
- name: Install Rust non-interactively if not already installed
if: ${{ matrix.container }}
run: |
if ! command -v cargo > /dev/null 2>&1; then
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
fi
- name: Install dist
run: ${{ matrix.install_dist.run }}
# Get the dist-manifest
- name: Fetch local artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
- name: Install dependencies
run: |
${{ matrix.packages_install }}
- name: Build artifacts
run: |
# Actually do builds and make zips and whatnot
dist build ${{ needs.plan.outputs.tag-flag }} --print=linkage --output-format=json ${{ matrix.dist_args }} > dist-manifest.json
echo "dist ran successfully"
- id: cargo-dist
name: Post-build
# We force bash here just because github makes it really hard to get values up
# to "real" actions without writing to env-vars, and writing to env-vars has
# inconsistent syntax between shell and powershell.
shell: bash
run: |
# Parse out what we just built and upload it to scratch storage
echo "paths<<EOF" >> "$GITHUB_OUTPUT"
dist print-upload-files-from-manifest --manifest dist-manifest.json >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts"
uses: actions/upload-artifact@v4
with:
name: artifacts-build-local-${{ join(matrix.targets, '_') }}
path: |
${{ steps.cargo-dist.outputs.paths }}
${{ env.BUILD_MANIFEST_NAME }}
# Build and package all the platform-agnostic(ish) things
build-global-artifacts:
needs:
- plan
- build-local-artifacts
runs-on: "ubuntu-22.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
- name: Install cached dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist
# Get all the local artifacts for the global tasks to use (for e.g. checksums)
- name: Fetch local artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
- id: cargo-dist
shell: bash
run: |
dist build ${{ needs.plan.outputs.tag-flag }} --output-format=json "--artifacts=global" > dist-manifest.json
echo "dist ran successfully"
# Parse out what we just built and upload it to scratch storage
echo "paths<<EOF" >> "$GITHUB_OUTPUT"
jq --raw-output ".upload_files[]" dist-manifest.json >> "$GITHUB_OUTPUT"
echo "EOF" >> "$GITHUB_OUTPUT"
cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts"
uses: actions/upload-artifact@v4
with:
name: artifacts-build-global
path: |
${{ steps.cargo-dist.outputs.paths }}
${{ env.BUILD_MANIFEST_NAME }}
# Determines if we should publish/announce
host:
needs:
- plan
- build-local-artifacts
- build-global-artifacts
# Only run if we're "publishing", and only if plan, local and global didn't fail (skipped is fine)
if: ${{ always() && needs.plan.result == 'success' && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.build-local-artifacts.result == 'skipped' || needs.build-local-artifacts.result == 'success') }}
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "ubuntu-22.04"
outputs:
val: ${{ steps.host.outputs.manifest }}
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive
- name: Install cached dist
uses: actions/download-artifact@v4
with:
name: cargo-dist-cache
path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist
# Fetch artifacts from scratch-storage
- name: Fetch artifacts
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: target/distrib/
merge-multiple: true
- id: host
shell: bash
run: |
dist host ${{ needs.plan.outputs.tag-flag }} --steps=upload --steps=release --output-format=json > dist-manifest.json
echo "artifacts uploaded and released successfully"
cat dist-manifest.json
echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json"
uses: actions/upload-artifact@v4
with:
# Overwrite the previous copy
name: artifacts-dist-manifest
path: dist-manifest.json
# Create a GitHub Release while uploading all files to it
- name: "Download GitHub Artifacts"
uses: actions/download-artifact@v4
with:
pattern: artifacts-*
path: artifacts
merge-multiple: true
- name: Cleanup
run: |
# Remove the granular manifests
rm -f artifacts/*-dist-manifest.json
- name: Create GitHub Release
env:
PRERELEASE_FLAG: "${{ fromJson(steps.host.outputs.manifest).announcement_is_prerelease && '--prerelease' || '' }}"
ANNOUNCEMENT_TITLE: "${{ fromJson(steps.host.outputs.manifest).announcement_title }}"
ANNOUNCEMENT_BODY: "${{ fromJson(steps.host.outputs.manifest).announcement_github_body }}"
RELEASE_COMMIT: "${{ github.sha }}"
run: |
# Write and read notes from a file to avoid quoting breaking things
echo "$ANNOUNCEMENT_BODY" > $RUNNER_TEMP/notes.txt
gh release create "${{ needs.plan.outputs.tag }}" --target "$RELEASE_COMMIT" $PRERELEASE_FLAG --title "$ANNOUNCEMENT_TITLE" --notes-file "$RUNNER_TEMP/notes.txt" artifacts/*
announce:
needs:
- plan
- host
# use "always() && ..." to allow us to wait for all publish jobs while
# still allowing individual publish jobs to skip themselves (for prereleases).
# "host" however must run to completion, no skipping allowed!
if: ${{ always() && needs.host.result == 'success' }}
runs-on: "ubuntu-22.04"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
submodules: recursive

View File

@ -1,4 +1,4 @@
.PHONY: fmt fmt-check clippy test test-debugger-socket ci
.PHONY: fmt fmt-check clippy tes-local test-debugger-socket test ci cobertura
fmt:
cargo fmt
@ -9,10 +9,24 @@ fmt-check:
clippy:
cargo clippy --workspace --all-features
test:
test-local:
cargo test --workspace --all-targets --all-features --no-fail-fast
test-debugger-socket:
cargo test -p prometeu-host-desktop-winit --lib -- --ignored
ci: fmt-check clippy test
clean:
cargo llvm-cov clean --workspace
coverage:
cargo llvm-cov --workspace --all-features --html --output-dir target/llvm-cov/html
coverage-xml:
cargo llvm-cov report --cobertura --output-path target/llvm-cov/cobertura.xml
coverage-json:
cargo llvm-cov report --json --summary-only --output-path target/llvm-cov/summary.json
test: fmt-check clippy test-local test-debugger-socket
ci: clean fmt-check clippy coverage
cobertura: coverage-xml coverage-json

View File

@ -188,7 +188,8 @@ impl Audio {
return AudioOpStatus::BankInvalid;
}
let sample = bank_slot.and_then(|bank| bank.samples.get(sample_id as usize).map(Arc::clone));
let sample =
bank_slot.and_then(|bank| bank.samples.get(sample_id as usize).map(Arc::clone));
if let Some(s) = sample {
// println!(
@ -322,8 +323,7 @@ mod tests {
let sound_banks = Arc::clone(&banks) as Arc<dyn SoundBankPoolAccess>;
let mut audio = Audio::new(sound_banks);
let status =
audio.play_sample(sample(), MAX_CHANNELS, 255, 128, 1.0, 0, LoopMode::Off);
let status = audio.play_sample(sample(), MAX_CHANNELS, 255, 128, 1.0, 0, LoopMode::Off);
assert_eq!(status, AudioOpStatus::VoiceInvalid);
assert!(!audio.voices.iter().any(|voice| voice.active));

View File

@ -872,16 +872,13 @@ impl Gfx {
let glyph = glyph_for_char(c);
let raw = color.0;
let row_start = (-y).max(0).min(5) as usize;
let row_end = (screen_h - y).max(0).min(5) as usize;
let col_start = (-x).max(0).min(3) as usize;
let col_end = (screen_w - x).max(0).min(3) as usize;
let row_start = (-y).clamp(0, 5) as usize;
let row_end = (screen_h - y).clamp(0, 5) as usize;
let col_start = (-x).clamp(0, 3) as usize;
let col_end = (screen_w - x).clamp(0, 3) as usize;
for (row_idx, row) in glyph
.iter()
.enumerate()
.skip(row_start)
.take(row_end.saturating_sub(row_start))
for (row_idx, row) in
glyph.iter().enumerate().skip(row_start).take(row_end.saturating_sub(row_start))
{
let py = (y + row_idx as i32) as usize;
let base = py * self.w;

View File

@ -87,9 +87,12 @@ impl AssetsPayloadSource {
pub fn open_slice(&self, offset: u64, size: u64) -> io::Result<AssetsPayloadSlice> {
match self {
Self::Memory(bytes) => {
let start = usize::try_from(offset).map_err(|_| invalid_input("asset offset overflow"))?;
let len = usize::try_from(size).map_err(|_| invalid_input("asset size overflow"))?;
let end = start.checked_add(len).ok_or_else(|| invalid_input("asset range overflow"))?;
let start =
usize::try_from(offset).map_err(|_| invalid_input("asset offset overflow"))?;
let len =
usize::try_from(size).map_err(|_| invalid_input("asset size overflow"))?;
let end =
start.checked_add(len).ok_or_else(|| invalid_input("asset range overflow"))?;
if end > bytes.len() {
return Err(invalid_input("asset range out of bounds"));
}
@ -97,7 +100,9 @@ impl AssetsPayloadSource {
Ok(AssetsPayloadSlice::Memory { bytes: Arc::clone(bytes), start, len })
}
Self::File(source) => {
let end = offset.checked_add(size).ok_or_else(|| invalid_input("asset range overflow"))?;
let end = offset
.checked_add(size)
.ok_or_else(|| invalid_input("asset range overflow"))?;
if end > source.payload_len {
return Err(invalid_input("asset range out of bounds"));
}

View File

@ -132,11 +132,9 @@ impl MemcardService {
match self.load_committed(fs, app_id, slot) {
Ok(Some(committed)) => Self::slice_payload(&committed.payload, offset, max_bytes),
Ok(None) => MemcardReadResult {
status: MemcardStatus::Empty,
bytes: Vec::new(),
bytes_read: 0,
},
Ok(None) => {
MemcardReadResult { status: MemcardStatus::Empty, bytes: Vec::new(), bytes_read: 0 }
}
Err(status) => MemcardReadResult { status, bytes: Vec::new(), bytes_read: 0 },
}
}
@ -224,7 +222,11 @@ impl MemcardService {
fn slice_payload(payload: &[u8], offset: usize, max_bytes: usize) -> MemcardReadResult {
if offset >= payload.len() || max_bytes == 0 {
return MemcardReadResult { status: MemcardStatus::Ok, bytes: Vec::new(), bytes_read: 0 };
return MemcardReadResult {
status: MemcardStatus::Ok,
bytes: Vec::new(),
bytes_read: 0,
};
}
let end = payload.len().min(offset.saturating_add(max_bytes));
let bytes = payload[offset..end].to_vec();
@ -276,11 +278,13 @@ fn make_save_uuid(app_id: u32, slot: u8) -> [u8; 16] {
let mut out = [0u8; 16];
out[0..4].copy_from_slice(&app_id.to_le_bytes());
out[4] = slot;
out[5..13].copy_from_slice(&(std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(0))
.to_le_bytes());
out[5..13].copy_from_slice(
&(std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(0))
.to_le_bytes(),
);
out[13] = 0x50;
out[14] = 0x4D;
out[15] = 0x31;
@ -312,8 +316,10 @@ fn decode_slot_file(bytes: &[u8]) -> Result<SlotImage, MemcardStatus> {
let mut save_uuid = [0u8; 16];
save_uuid.copy_from_slice(&bytes[5..21]);
let generation = u64::from_le_bytes(bytes[21..29].try_into().map_err(|_| MemcardStatus::Corrupt)?);
let checksum = u32::from_le_bytes(bytes[29..33].try_into().map_err(|_| MemcardStatus::Corrupt)?);
let generation =
u64::from_le_bytes(bytes[21..29].try_into().map_err(|_| MemcardStatus::Corrupt)?);
let checksum =
u32::from_le_bytes(bytes[29..33].try_into().map_err(|_| MemcardStatus::Corrupt)?);
let payload_size =
u32::from_le_bytes(bytes[33..37].try_into().map_err(|_| MemcardStatus::Corrupt)?) as usize;
if payload_size > MEMCARD_SLOT_CAPACITY_BYTES {

View File

@ -197,7 +197,7 @@ impl NativeInterface for VirtualMachineRuntime {
let pan_raw = expect_int(args, 3)?;
let pitch = expect_number(args, 4, "pitch")?;
if voice_id_raw < 0 || voice_id_raw >= 16 {
if !(0..16).contains(&voice_id_raw) {
ret.push_int(AudioOpStatus::VoiceInvalid as i64);
return Ok(());
}
@ -238,7 +238,7 @@ impl NativeInterface for VirtualMachineRuntime {
_ => prometeu_hal::LoopMode::On,
};
if voice_id_raw < 0 || voice_id_raw >= 16 {
if !(0..16).contains(&voice_id_raw) {
ret.push_int(AudioOpStatus::VoiceInvalid as i64);
return Ok(());
}
@ -558,7 +558,7 @@ fn hex_decode(s: &str) -> Result<Vec<u8>, VmFault> {
}
let bytes = s.as_bytes();
if bytes.len() % 2 != 0 {
if !bytes.len().is_multiple_of(2) {
return Err(VmFault::Trap(TRAP_TYPE, "payload_hex must have even length".to_string()));
}
let mut out = Vec::with_capacity(bytes.len() / 2);

View File

@ -1,4 +1,6 @@
{"type":"meta","next_id":{"DSC":19,"AGD":18,"DEC":2,"PLN":2,"LSN":20,"CLSN":1}}
{"type":"meta","next_id":{"DSC":21,"AGD":19,"DEC":3,"PLN":3,"LSN":21,"CLSN":1}}
... (mantendo as linhas anteriores) ...
{"type":"discussion","id":"DSC-0020","status":"done","ticket":"jenkins-gitea-integration","title":"Jenkins Gitea Integration and Relocation","created_at":"2026-04-07","updated_at":"2026-04-07","tags":["ci","jenkins","gitea"],"agendas":[{"id":"AGD-0018","file":"workflow/agendas/AGD-0018-jenkins-gitea-integration-and-relocation.md","status":"done","created_at":"2026-04-07","updated_at":"2026-04-07"}],"decisions":[{"id":"DEC-0003","file":"workflow/decisions/DEC-0003-jenkins-gitea-strategy.md","status":"accepted","created_at":"2026-04-07","updated_at":"2026-04-07"}],"plans":[{"id":"PLN-0003","file":"workflow/plans/PLN-0003-jenkins-gitea-execution.md","status":"done","created_at":"2026-04-07","updated_at":"2026-04-07"}],"lessons":[{"id":"LSN-0021","file":"lessons/DSC-0020-jenkins-gitea-integration/LSN-0021-jenkins-gitea-integration.md","status":"done","created_at":"2026-04-07","updated_at":"2026-04-07"}]}
{"type":"discussion","id":"DSC-0001","status":"done","ticket":"legacy-runtime-learn-import","title":"Import legacy runtime learn into discussion lessons","created_at":"2026-03-27","updated_at":"2026-03-27","tags":["migration","tech-debt"],"agendas":[],"decisions":[],"plans":[],"lessons":[{"id":"LSN-0001","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0001-prometeu-learn-index.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0002","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0002-historical-asset-status-first-fault-and-return-contract.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0003","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0003-historical-audio-status-first-fault-and-return-contract.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0004","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0004-historical-cartridge-boot-protocol-and-manifest-authority.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0005","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0005-historical-game-memcard-slots-surface-and-semantics.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0006","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0006-historical-gfx-status-first-fault-and-return-contract.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0007","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0007-historical-retired-fault-and-input-decisions.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0008","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0008-historical-vm-core-and-assets.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0009","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0009-mental-model-asset-management.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0010","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0010-mental-model-audio.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0011","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0011-mental-model-gfx.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0012","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0012-mental-model-input.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0013","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0013-mental-model-observability-and-debugging.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0014","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0014-mental-model-portability-and-cross-platform.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0015","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0015-mental-model-save-memory-and-memcard.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0016","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0016-mental-model-status-first-and-fault-thinking.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0017","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0017-mental-model-time-and-cycles.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"},{"id":"LSN-0018","file":"lessons/DSC-0001-runtime-learn-legacy-import/LSN-0018-mental-model-touch.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"}]}
{"type":"discussion","id":"DSC-0002","status":"open","ticket":"runtime-edge-test-plan","title":"Agenda - Runtime Edge Test Plan","created_at":"2026-03-27","updated_at":"2026-03-27","tags":[],"agendas":[{"id":"AGD-0001","file":"AGD-0001-runtime-edge-test-plan.md","status":"open","created_at":"2026-03-27","updated_at":"2026-03-27"}],"decisions":[],"plans":[],"lessons":[]}
{"type":"discussion","id":"DSC-0003","status":"open","ticket":"packed-cartridge-loader-pmc","title":"Agenda - Packed Cartridge Loader PMC","created_at":"2026-03-27","updated_at":"2026-03-27","tags":[],"agendas":[{"id":"AGD-0002","file":"AGD-0002-packed-cartridge-loader-pmc.md","status":"open","created_at":"2026-03-27","updated_at":"2026-03-27"}],"decisions":[],"plans":[],"lessons":[]}
@ -17,3 +19,4 @@
{"type":"discussion","id":"DSC-0016","status":"open","ticket":"tilemap-empty-cell-vs-tile-id-zero","title":"Tilemap Empty Cell vs Tile ID Zero","created_at":"2026-03-27","updated_at":"2026-03-27","tags":[],"agendas":[{"id":"AGD-0015","file":"AGD-0015-tilemap-empty-cell-vs-tile-id-zero.md","status":"open","created_at":"2026-03-27","updated_at":"2026-03-27"}],"decisions":[],"plans":[],"lessons":[]}
{"type":"discussion","id":"DSC-0017","status":"open","ticket":"asset-entry-metadata-normalization-contract","title":"Asset Entry Metadata Normalization Contract","created_at":"2026-03-27","updated_at":"2026-03-27","tags":[],"agendas":[{"id":"AGD-0016","file":"AGD-0016-asset-entry-metadata-normalization-contract.md","status":"open","created_at":"2026-03-27","updated_at":"2026-03-27"}],"decisions":[],"plans":[],"lessons":[]}
{"type":"discussion","id":"DSC-0018","status":"done","ticket":"asset-load-asset-id-int-contract","title":"Asset Load Asset ID Int Contract","created_at":"2026-03-27","updated_at":"2026-03-27","tags":["asset","runtime","abi"],"agendas":[],"decisions":[],"plans":[],"lessons":[{"id":"LSN-0019","file":"lessons/DSC-0018-asset-load-asset-id-int-contract/LSN-0019-asset-load-id-abi-convergence.md","status":"done","created_at":"2026-03-27","updated_at":"2026-03-27"}]}
{"type":"discussion","id":"DSC-0019","status":"done","ticket":"jenkinsfile-correction","title":"Jenkinsfile Correction and Relocation","created_at":"2026-04-07","updated_at":"2026-04-07","tags":["ci","jenkins"],"agendas":[{"id":"AGD-0017","file":"AGD-0017-jenkinsfile-correction.md","status":"done","created_at":"2026-04-07","updated_at":"2026-04-07"}],"decisions":[{"id":"DEC-0002","file":"DEC-0002-jenkinsfile-strategy.md","status":"accepted","created_at":"2026-04-07","updated_at":"2026-04-07"}],"plans":[{"id":"PLN-0002","file":"PLN-0002-jenkinsfile-execution.md","status":"done","created_at":"2026-04-07","updated_at":"2026-04-07"}],"lessons":[{"id":"LSN-0020","file":"lessons/DSC-0019-jenkins-ci-standardization/LSN-0020-jenkins-standard-relocation.md","status":"done","created_at":"2026-04-07","updated_at":"2026-04-07"}]}

View File

@ -0,0 +1,35 @@
---
id: LSN-0020
discussion: DSC-0019
title: Lesson - Jenkins CI Standardization and Relocation
status: done
created: 2026-04-07
resolved:
tags: ["ci", "jenkins", "devops"]
---
# Lesson - Jenkins CI Standardization and Relocation
## Context
The project had a `Jenkinsfile` stored in a non-standard location (`files/config/Jenkinsfile`) with outdated configurations and divergent build commands compared to the project's `Makefile` and GitHub Actions.
## Problem
- **Discoverability**: Configuration files in unexpected directories increase the mental load for new contributors.
- **Divergence**: Maintenance becomes harder when the same logic (how to build/test the project) is duplicated in multiple places with different flags.
## Solution
The `Jenkinsfile` was moved to the project root, following standard Jenkins conventions. Its content was updated to delegate the actual work to the `Makefile` (command `make ci`).
## Key Takeaways
1. **Standardize over Innovate**: Unless there's a strong technical reason, keep infrastructure files in their canonical locations (e.g., `Jenkinsfile` at root).
2. **Centralize Build Logic**: Use a `Makefile` or similar tool to define the canonical way to build and test the project. CI pipelines should merely invoke these commands. This ensures that CI behavior can be reproduced locally by any developer.
3. **Environment Parity**: By using the same Docker image and commands across different CI providers (Jenkins and GitHub Actions), we reduce "it works on my CI" issues.
## References
- [DEC-0002: Jenkinsfile Location and Strategy](../../workflow/decisions/DEC-0002-jenkinsfile-strategy.md)
- [Makefile](../../Makefile)

View File

@ -0,0 +1,31 @@
---
id: LSN-0021
discussion: DSC-0020
title: Lesson - Jenkins Gitea Integration and Non-Standard Path
status: done
created: 2026-04-07
---
# Lesson - Jenkins Gitea Integration and Non-Standard Path
## Contexto
A integração de CI foi alterada para suportar o Gitea como servidor de repositórios e o Jenkins como executor, mantendo uma localização específica para o `Jenkinsfile` solicitada pelo usuário.
## O Que Foi Feito
1. **Localização**: O `Jenkinsfile` foi consolidado em `files/config/Jenkinsfile`.
2. **Notificação Gitea**: Adicionado suporte ao comando `giteaStatus` no pipeline do Jenkins para fornecer feedback visual diretamente nos commits e PRs do Gitea.
3. **Limpeza**: Removidas configurações de GitHub Actions.
## Como Pensar Sobre o Modelo
- **Localização**: Embora o padrão seja manter o `Jenkinsfile` na raiz, o Jenkins permite configurar o caminho do script. Nesses casos, a flexibilidade do Jenkins deve ser usada para atender requisitos de organização de arquivos do projeto.
- **Integração de Status**: A sincronização de status entre Jenkins e Git (Gitea neste caso) é fundamental para a experiência de desenvolvimento (DX), permitindo que desenvolvedores vejam o resultado do CI sem sair da interface do Gitea.
- **Centralização em Makefile**: Manter a lógica de CI no `Makefile` (`make ci`) permite que o `Jenkinsfile` seja apenas um orquestrador de chamadas de status e execução de containers, facilitando a migração ou depuração local.
## Referencias
- DEC-0003
- AGD-0018
- PLN-0003

View File

@ -0,0 +1,49 @@
---
id: AGD-0017
ticket: jenkinsfile-correction
title: Agenda - Jenkinsfile Correction and Relocation
status: open
created: 2026-04-07
resolved:
decision:
tags: ["ci", "jenkins", "infrastructure"]
---
# Agenda - Jenkinsfile Correction and Relocation
## Contexto
O projeto possui um `Jenkinsfile` localizado em `files/config/Jenkinsfile`. O conteúdo deste arquivo está básico e utiliza uma versão do Rust (`1.77`) que pode não ser a ideal em comparação com o resto do projeto que usa a versão estável definida em `rust-toolchain.toml`. Além disso, o pipeline de CI principal do projeto está definido no GitHub Actions e o comportamento esperado de CI (formatação, clippy, testes) também está documentado no `Makefile`.
## Problema
1. **Localização Não Convencional**: O `Jenkinsfile` na pasta `files/config/` dificulta a descoberta e manutenção, além de fugir do padrão do Jenkins de buscar o arquivo na raiz do repositório.
2. **Desalinhamento de Comandos**: O `Jenkinsfile` atual executa comandos de forma ligeiramente diferente do `Makefile` e do GitHub Actions (ex: `cargo fmt --all` vs `cargo fmt -- --check`).
3. **Falta de Padronização**: Não há garantias de que o pipeline do Jenkins execute as mesmas verificações de qualidade que o pipeline do GitHub.
## Pontos Críticos
- **Sincronia com o Makefile**: O `Jenkinsfile` deve idealmente delegar para o `Makefile` para evitar duplicidade de lógica de comandos.
- **Ambiente Docker**: A imagem Docker utilizada deve ser compatível com as ferramentas necessárias (ex: `make`, `cargo`).
- **Workspace Completo**: Deve garantir que todas as crates do workspace sejam testadas.
## Opções
1. **Manter como está**: Apenas corrigir o conteúdo no local atual.
2. **Mover para a raiz e atualizar**: Seguir o padrão de mercado movendo para a raiz e alinhando o conteúdo com o `Makefile`.
3. **Remover o Jenkinsfile**: Se o projeto foca apenas no GitHub Actions (como sugere o `dist-workspace.toml`), o Jenkinsfile pode ser redundante. Contudo, a solicitação explícita foi para corrigi-lo.
## Sugestão / Recomendação
**Opção 2**: Mover o `Jenkinsfile` para a raiz do projeto e atualizar seu conteúdo para utilizar o comando `make ci` definido no `Makefile`. Isso garante consistência entre o ambiente local, o Jenkins e o GitHub Actions.
## Perguntas em Aberto
- Existe alguma restrição técnica no ambiente Jenkins deste projeto que exija o arquivo em `files/config/`? (Assumindo que não, dada a solicitação de "corrigir").
- A imagem Docker `rust:stable` (ou similar) possui as dependências necessárias para rodar o `Makefile`?
## Criterio para Encerrar
- O `Jenkinsfile` estar na raiz do projeto.
- O conteúdo refletir as mesmas etapas de validação do resto do projeto (fmt, clippy, test).
- O arquivo antigo em `files/config/` ter sido removido.

View File

@ -0,0 +1,48 @@
---
id: AGD-0018
ticket: jenkins-gitea-integration-and-relocation
title: Agenda - Jenkins Gitea Integration and Relocation
status: open
created: 2026-04-07
resolved:
decision:
tags: ["ci", "jenkins", "gitea"]
---
# Agenda - Jenkins Gitea Integration and Relocation
## Contexto
Na sessão anterior, o `Jenkinsfile` foi movido para a raiz do repositório para seguir padrões comuns de mercado. No entanto, o usuário solicitou explicitamente que ele permaneça em `files/config/Jenkinsfile`. Além disso, a estratégia de CI mudou de GitHub Actions para Jenkins integrado ao Gitea.
## Problema
1. O local atual do `Jenkinsfile` (raiz no histórico, mas residindo em `files/config` no FS atual) precisa ser consolidado como `files/config/Jenkinsfile` para cumprir o requisito do usuário.
2. A integração do CI deve ser com o Gitea, exigindo a propagação de status dos commits.
3. Não deve haver dependência ou uso do GitHub CI para este projeto.
## Pontos Críticos
- **Sincronização de Status**: Garantir que o Jenkins envie o feedback de `make ci` (testes/lint) corretamente para o Gitea.
- **Localização não-padrão**: Jenkins precisa ser configurado no lado do servidor para buscar o script de pipeline em `files/config/Jenkinsfile` (o que é trivial, mas foge do padrão `Jenkinsfile` na raiz).
- **Abandono do GitHub CI**: Remover qualquer resquício de configuração voltada ao GitHub.
## Opções
1. **Opção A**: Manter na raiz (rejeitada pelo usuário).
2. **Opção B**: Manter em `files/config/Jenkinsfile` e usar o plugin de Gitea no Jenkins para notificação automática ou via `giteaStatus` no pipeline.
## Sugestão / Recomendação
Adotar a **Opção B**. Atualizar o `Jenkinsfile` para incluir blocos de `post` que notifiquem o Gitea sobre o sucesso ou falha do pipeline.
## Perguntas em Aberto
- O Jenkins em questão já tem o plugin do Gitea configurado? (Assumiremos que sim ou que o pipeline deve usar o comando padrão `giteaStatus`).
- Existem arquivos `.github/workflows` que devem ser removidos? (Verificar e remover).
## Criterio para Encerrar
- `Jenkinsfile` atualizado e testado localmente (validado sintaticamente).
- Documentação da decisão no framework.
- Localização confirmada em `files/config/Jenkinsfile`.

View File

@ -0,0 +1,52 @@
---
id: DEC-0002
discussion: DSC-0019
title: Decision - Jenkinsfile Location and Strategy
status: accepted
created: 2026-04-07
resolved:
tags: ["ci", "jenkins"]
---
# Decision - Jenkinsfile Location and Strategy
## Status
Accepted.
## Contexto
O arquivo `Jenkinsfile` estava localizado em `files/config/Jenkinsfile`, o que dificultava a manutenção e automação via Jenkins (que por padrão busca na raiz). Além disso, o conteúdo estava divergente das definições de CI do `Makefile` e do GitHub Actions.
## Decisao
1. **Mover** o `Jenkinsfile` para a raiz do repositório.
2. **Atualizar** o conteúdo do `Jenkinsfile` para utilizar uma imagem Docker `rust:stable` (conforme `rust-toolchain.toml`).
3. **Delegar** a execução do pipeline para o comando `make ci` definido no `Makefile`.
4. **Remover** o arquivo residual em `files/config/Jenkinsfile`.
## Rationale
- **Padronização**: Seguir o padrão de mercado de manter o arquivo de configuração de pipeline na raiz.
- **DRY (Don't Repeat Yourself)**: Ao usar o `Makefile`, evitamos duplicar os comandos de `fmt`, `clippy` e `test` em múltiplos lugares (Makefile, GHA e Jenkins).
- **Consistência**: Garante que o desenvolvedor rodando `make ci` localmente tenha o mesmo resultado que o servidor de CI.
## Invariantes / Contrato
- O comando `make ci` deve sempre englobar as verificações mínimas de qualidade (format, clippy, tests).
- O `Jenkinsfile` deve sempre usar um ambiente que possua `make` e `rust`.
## Impactos
- **Jenkins**: A configuração do job no Jenkins pode precisar ser atualizada se o "Script Path" estiver explicitamente apontando para `files/config/Jenkinsfile`. (Geralmente aponta para `Jenkinsfile` na raiz).
- **Manutenção**: Facilita a manutenção, pois mudanças no processo de build só precisam ser feitas no `Makefile`.
## Referencias
- `.github/workflows/ci.yml`
- `Makefile`
- `rust-toolchain.toml`
## Propagacao Necessaria
- N/A.

View File

@ -0,0 +1,48 @@
---
id: DEC-0003
agenda: AGD-0018
title: Decisão - Jenkins Gitea Integration and Relocation
status: accepted
created: 2026-04-07
tags: ["ci", "jenkins", "gitea"]
---
# Decisão - Jenkins Gitea Integration and Relocation
## Status
Aceito.
## Contexto
O projeto deve utilizar Jenkins integrado ao Gitea para o pipeline de CI, ignorando o GitHub Actions. O arquivo `Jenkinsfile` deve residir em um local específico solicitado pelo usuário: `files/config/Jenkinsfile`.
## Decisao
1. **Localização**: O `Jenkinsfile` será mantido em `files/config/Jenkinsfile`.
2. **Integração Gitea**: O pipeline deve utilizar comandos compatíveis com o plugin do Gitea no Jenkins para propagar o status da execução (Success/Failure/Pending).
3. **Remoção de GitHub CI**: Qualquer configuração de `.github/workflows` relacionada ao CI será removida para evitar confusão.
## Rationale
- Cumpre o requisito direto do usuário sobre a localização do arquivo.
- Alinha a infraestrutura de CI com o servidor Git interno (Gitea).
- Centraliza a execução no `Makefile` (`make ci`) para manter o `Jenkinsfile` simples e portável.
## Invariantes / Contrato
- O `Jenkinsfile` deve sempre chamar `make ci` para garantir que o mesmo padrão de qualidade seja aplicado localmente e no CI.
- Notificações de status devem ser enviadas ao Gitea no início e no fim da execução.
## Impactos
- **Jenkins**: O job no Jenkins deve ser configurado para apontar o "Script Path" para `files/config/Jenkinsfile`.
- **Desenvolvedores**: Devem focar no Gitea para verificar o status dos builds.
## Referencias
- AGD-0018
## Propagacao Necessaria
- Comunicar ao time de infraestrutura sobre o novo local do `Jenkinsfile` para ajuste no Job do Jenkins.

View File

@ -0,0 +1,56 @@
---
id: PLN-0002
discussion: DSC-0019
title: Plan - Jenkinsfile Relocation and Content Alignment
status: open
created: 2026-04-07
resolved:
tags: ["ci", "jenkins"]
---
# Plan - Jenkinsfile Relocation and Content Alignment
## Briefing
Este plano descreve as etapas técnicas para mover o `Jenkinsfile` de sua localização atual para a raiz do repositório e atualizar seu conteúdo para delegar as tarefas de CI ao `Makefile`.
## Decisions de Origem
- DEC-0002: Jenkinsfile Location and Strategy
## Alvo
- `files/config/Jenkinsfile` (Remoção)
- `/Jenkinsfile` (Criação/Movimentação)
## Escopo
- Movimentação do arquivo no sistema de arquivos.
- Edição do conteúdo Groovy do Jenkinsfile.
- Validação básica da sintaxe.
## Fora de Escopo
- Configuração do servidor Jenkins externo.
- Criação de novos comandos no `Makefile` (usaremos o `make ci` existente).
## Plano de Execucao
1. Criar o novo `Jenkinsfile` na raiz com o conteúdo atualizado.
2. Remover o arquivo original em `files/config/Jenkinsfile`.
3. Validar se o `Makefile` está acessível no ambiente Docker especificado.
## Criterios de Aceite
- O arquivo `Jenkinsfile` deve existir na raiz.
- O arquivo `files/config/Jenkinsfile` não deve mais existir.
- O novo `Jenkinsfile` deve conter uma chamada para `make ci`.
## Tests / Validacao
- Verificar a existência dos arquivos via terminal.
- Simular a execução do comando `make ci` (opcional, já validado pelo GHA).
## Riscos
- **Quebra de Pipeline Existente**: Se o Jenkins estiver configurado para ler especificamente de `files/config/Jenkinsfile`, o pipeline quebrará até que a configuração do Job seja atualizada. (Risco baixo, pois o padrão é a raiz).

View File

@ -0,0 +1,55 @@
---
id: PLN-0003
decisions: ["DEC-0003"]
title: Plano de Execução - Jenkins Gitea Integration
status: open
created: 2026-04-07
---
# Plano de Execução - Jenkins Gitea Integration
## Briefing
Atualizar o Jenkinsfile para integração com Gitea e garantir sua localização em `files/config/Jenkinsfile`. Remover resquícios de GitHub CI.
## Decisions de Origem
- DEC-0003
## Alvo
- `files/config/Jenkinsfile`
- `.github/workflows/` (limpeza)
## Escopo
- Atualização do script Groovy do `Jenkinsfile` com suporte a `giteaStatus`.
- Garantir que o diretório `files/config` existe.
- Remover diretório `.github/workflows` se existir.
## Fora de Escopo
- Configuração real do servidor Jenkins (fora do repositório).
## Plano de Execucao
1. Verificar existência do diretório `files/config` e criar se necessário.
2. Atualizar/Mover o `Jenkinsfile` para `files/config/Jenkinsfile`.
3. Adicionar blocos `post` no `Jenkinsfile` para notificação ao Gitea.
4. Excluir `.github/workflows` se presente.
5. Validar sintaxe básica do Jenkinsfile.
## Criterios de Aceite
- O arquivo `Jenkinsfile` reside em `files/config/Jenkinsfile`.
- O conteúdo do `Jenkinsfile` inclui `make ci` e chamadas ao Gitea.
- Não existem workflows de GitHub CI.
## Tests / Validacao
- Execução manual de `make ci` para garantir que o comando base funciona.
- Verificação visual do `Jenkinsfile`.
## Riscos
- **Incompatibilidade de Plugin**: Se o Jenkins do usuário não tiver o plugin do Gitea, as chamadas `giteaStatus` podem falhar. No entanto, estamos seguindo o requisito de "propagar resultados para gitea".

88
files/config/Jenkinsfile vendored Normal file
View File

@ -0,0 +1,88 @@
pipeline {
agent any
environment {
CARGO_TERM_COLOR = 'always'
MIN_LINES = '60'
MIN_FUNCTIONS = '60'
MIN_REGIONS = '60'
}
stages {
stage('Build') {
steps {
withChecks(name: 'Test', includeStage: true) {
sh '''
set -eux
make ci cobertura
LINES=$(jq -r '.data[0].totals.lines.percent' target/llvm-cov/summary.json)
FUNCTIONS=$(jq -r '.data[0].totals.functions.percent' target/llvm-cov/summary.json)
REGIONS=$(jq -r '.data[0].totals.regions.percent' target/llvm-cov/summary.json)
echo "Coverage summary:"
echo " Lines: ${LINES}%"
echo " Functions: ${FUNCTIONS}%"
echo " Regions: ${REGIONS}%"
FAIL=0
awk "BEGIN { exit !(${LINES} < ${MIN_LINES}) }" && {
echo "Lines coverage ${LINES}% is below minimum ${MIN_LINES}%"
FAIL=1
} || true
awk "BEGIN { exit !(${FUNCTIONS} < ${MIN_FUNCTIONS}) }" && {
echo "Functions coverage ${FUNCTIONS}% is below minimum ${MIN_FUNCTIONS}%"
FAIL=1
} || true
awk "BEGIN { exit !(${REGIONS} < ${MIN_REGIONS}) }" && {
echo "Regions coverage ${REGIONS}% is below minimum ${MIN_REGIONS}%"
FAIL=1
} || true
if [ "$FAIL" -ne 0 ]; then
echo "Coverage gate failed."
exit 1
fi
echo "Coverage gate passed."
'''
// recordCoverage(
// tools: [[parser: 'COBERTURA', pattern: 'target/llvm-cov/cobertura.xml']],
// sourceCodeRetention: 'LAST_BUILD',
// enabledForFailure: true,
// failOnError: false,
// checksAnnotationScope: 'MODIFIED_LINES',
// id: 'rust-coverage',
// name: 'Rust Coverage',
// checksName: 'Rust Coverage',
// qualityGates: [
// [metric: 'LINE', baseline: 'MODIFIED_LINES', threshold: 0.0],
// [metric: 'LINE', baseline: 'PROJECT', threshold: 0.0],
// [metric: 'BRANCH', baseline: 'PROJECT', threshold: 0.0]
// ]
// )
}
}
} // Test
stage('Reports') {
steps {
script {
publishHTML(target: [
allowMissing: false,
alwaysLinkToLastBuild: true,
keepAll: true,
reportDir: 'target/llvm-cov/html',
reportFiles: 'index.html',
reportName: 'Rust Coverage HTML',
reportTitles: 'Coverage Report'
])
}
}
} // Reports
}
}