From 94407eb9ebf3ed077ea4924b4d8ca30c7ea96c76 Mon Sep 17 00:00:00 2001 From: danbugs Date: Sat, 7 Mar 2026 03:42:42 +0000 Subject: [PATCH 1/8] feat: add GuestSemaphore behind nanvix-unstable feature flag Add a GuestSemaphore type that wraps a u64 counter at a known guest physical address, enabling the host to signal the guest via volatile stores. Gated behind nanvix-unstable. - GuestSemaphore: struct with increment(), decrement(), value() - UninitializedSandbox::guest_semaphore(gpa): unsafe constructor that validates GPA alignment, bounds (including u64 size), and reads the initial value from memory via volatile read - Unit tests for valid/invalid GPA, alignment, bounds, increment, decrement, and underflow Signed-off-by: danbugs --- src/hyperlight_host/src/lib.rs | 3 + .../src/sandbox/uninitialized.rs | 204 ++++++++++++++++++ 2 files changed, 207 insertions(+) diff --git a/src/hyperlight_host/src/lib.rs b/src/hyperlight_host/src/lib.rs index 2b3e7168f..43d767b8b 100644 --- a/src/hyperlight_host/src/lib.rs +++ b/src/hyperlight_host/src/lib.rs @@ -94,6 +94,9 @@ pub use sandbox::MultiUseSandbox; pub use sandbox::UninitializedSandbox; /// The re-export for the `GuestBinary` type pub use sandbox::uninitialized::GuestBinary; +/// The re-export for the `GuestSemaphore` type +#[cfg(feature = "nanvix-unstable")] +pub use sandbox::uninitialized::GuestSemaphore; /// The universal `Result` type used throughout the Hyperlight codebase. pub type Result = core::result::Result; diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index 2167dca43..c6e99d68f 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -29,9 +29,13 @@ use crate::func::host_functions::{HostFunction, register_host_function}; use crate::func::{ParameterTuple, SupportedReturnType}; #[cfg(feature = "build-metadata")] use crate::log_build_details; +#[cfg(feature = "nanvix-unstable")] +use crate::mem::layout::SandboxMemoryLayout; use crate::mem::memory_region::{DEFAULT_GUEST_BLOB_MEM_FLAGS, MemoryRegionFlags}; use crate::mem::mgr::SandboxMemoryManager; use crate::mem::shared_mem::ExclusiveSharedMemory; +#[cfg(feature = "nanvix-unstable")] +use crate::mem::shared_mem::SharedMemory; use crate::sandbox::SandboxConfiguration; use crate::{MultiUseSandbox, Result, new_error}; @@ -54,6 +58,56 @@ pub(crate) struct SandboxRuntimeConfig { pub(crate) entry_point: Option, } +/// A counting semaphore backed by a u64 in guest shared memory. +/// +/// Created via [`UninitializedSandbox::guest_semaphore()`]. The host +/// manipulates the counter with [`increment()`](Self::increment) and +/// [`decrement()`](Self::decrement); the guest reads the same location +/// with a volatile load at the agreed-upon GPA. +#[cfg(feature = "nanvix-unstable")] +#[derive(Debug)] +pub struct GuestSemaphore { + ptr: *mut u64, + value: u64, +} + +// SAFETY: The pointer targets mmap'd shared memory owned by the sandbox. +// Access must be externally synchronised (e.g. via Mutex). +#[cfg(feature = "nanvix-unstable")] +unsafe impl Send for GuestSemaphore {} + +#[cfg(feature = "nanvix-unstable")] +impl GuestSemaphore { + /// Increments the counter by one and writes it to guest memory with + /// a volatile store. + pub fn increment(&mut self) -> Result<()> { + self.value = self + .value + .checked_add(1) + .ok_or_else(|| new_error!("GuestSemaphore overflow"))?; + // SAFETY: `ptr` was validated as aligned and in-bounds at construction time. + unsafe { core::ptr::write_volatile(self.ptr, self.value) }; + Ok(()) + } + + /// Decrements the counter by one and writes it to guest memory with + /// a volatile store. + pub fn decrement(&mut self) -> Result<()> { + self.value = self + .value + .checked_sub(1) + .ok_or_else(|| new_error!("GuestSemaphore underflow"))?; + // SAFETY: `ptr` was validated as aligned and in-bounds at construction time. + unsafe { core::ptr::write_volatile(self.ptr, self.value) }; + Ok(()) + } + + /// Returns the current host-side value of the counter. + pub fn value(&self) -> u64 { + self.value + } +} + /// A preliminary sandbox that represents allocated memory and registered host functions, /// but has not yet created the underlying virtual machine. /// @@ -169,6 +223,57 @@ impl<'a> From> for GuestEnvironment<'a, '_> { } impl UninitializedSandbox { + /// Creates a [`GuestSemaphore`] backed by a u64 counter at the given + /// guest physical address (GPA). + /// + /// The GPA must fall within the sandbox's allocated memory region and + /// be 8-byte aligned. + /// + /// # Safety + /// + /// The caller must ensure the returned `GuestSemaphore` does not + /// outlive the sandbox's underlying shared memory mapping (which + /// stays alive through `evolve()` into `MultiUseSandbox`). + #[cfg(feature = "nanvix-unstable")] + pub unsafe fn guest_semaphore(&mut self, gpa: usize) -> Result { + let base = SandboxMemoryLayout::BASE_ADDRESS; + let mem_size = self.mgr.shared_mem.mem_size(); + + if gpa < base { + return Err(new_error!( + "GPA {:#x} is below the sandbox base address ({:#x})", + gpa, + base + )); + } + + let offset = gpa - base; + + if gpa % core::mem::align_of::() != 0 { + return Err(new_error!( + "GPA {:#x} is not 8-byte aligned", + gpa, + )); + } + + let end = offset.checked_add(core::mem::size_of::()).ok_or_else(|| { + new_error!("GPA {:#x} causes offset overflow", gpa) + })?; + if end > mem_size { + return Err(new_error!( + "GPA {:#x} (offset {:#x}, end {:#x}) is outside the sandbox memory region (size {:#x})", + gpa, + offset, + end, + mem_size + )); + } + + let ptr = unsafe { self.mgr.shared_mem.base_ptr().add(offset) as *mut u64 }; + let value = unsafe { core::ptr::read_volatile(ptr) }; + Ok(GuestSemaphore { ptr, value }) + } + // Creates a new uninitialized sandbox from a pre-built snapshot. // Note that since memory configuration is part of the snapshot the only configuration // that can be changed (from the original snapshot) is the configuration defines the behaviour of @@ -1314,4 +1419,103 @@ mod tests { let _evolved = new_sandbox.evolve().expect("Failed to evolve new_sandbox"); } } + + #[cfg(feature = "nanvix-unstable")] + mod guest_semaphore_tests { + use hyperlight_testing::simple_guest_as_string; + + use crate::mem::shared_mem::SharedMemory; + use crate::sandbox::uninitialized::GuestBinary; + use crate::UninitializedSandbox; + + fn make_sandbox() -> UninitializedSandbox { + UninitializedSandbox::new( + GuestBinary::FilePath(simple_guest_as_string().expect("Guest Binary Missing")), + None, + ) + .expect("Failed to create sandbox") + } + + #[test] + fn valid_gpa() { + use crate::mem::layout::SandboxMemoryLayout; + + let mut sandbox = make_sandbox(); + let mem_size = sandbox.mgr.shared_mem.mem_size(); + // Pick an aligned GPA well within bounds + let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1000; + assert!(gpa % 8 == 0); + assert!(gpa - SandboxMemoryLayout::BASE_ADDRESS + 8 <= mem_size); + + let sem = unsafe { sandbox.guest_semaphore(gpa) }; + assert!(sem.is_ok()); + } + + #[test] + fn out_of_range_gpa() { + let mut sandbox = make_sandbox(); + // GPA far beyond any reasonable allocation + let sem = unsafe { sandbox.guest_semaphore(0xFFFF_FFFF_FFFF_FFF0) }; + assert!(sem.is_err()); + } + + #[test] + fn below_base_or_misaligned_gpa() { + let mut sandbox = make_sandbox(); + // GPA 1 is either below BASE_ADDRESS (default) or misaligned (nanvix-unstable) + let sem = unsafe { sandbox.guest_semaphore(1) }; + assert!(sem.is_err()); + } + + #[test] + fn misaligned_gpa() { + use crate::mem::layout::SandboxMemoryLayout; + + let mut sandbox = make_sandbox(); + // Pick a GPA that's within range but not 8-byte aligned + let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1001; + let sem = unsafe { sandbox.guest_semaphore(gpa) }; + assert!(sem.is_err()); + let err = sem.unwrap_err().to_string(); + assert!(err.contains("not 8-byte aligned"), "got: {err}"); + } + + #[test] + fn increment_decrement() { + use crate::mem::layout::SandboxMemoryLayout; + + let mut sandbox = make_sandbox(); + let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1000; + let mut sem = unsafe { sandbox.guest_semaphore(gpa) }.unwrap(); + + let initial = sem.value(); + sem.increment().unwrap(); + assert_eq!(sem.value(), initial + 1); + sem.increment().unwrap(); + assert_eq!(sem.value(), initial + 2); + sem.decrement().unwrap(); + assert_eq!(sem.value(), initial + 1); + } + + #[test] + fn underflow_returns_error() { + use crate::mem::layout::SandboxMemoryLayout; + + let mut sandbox = make_sandbox(); + let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1000; + + // Zero the memory at the GPA first + let base = SandboxMemoryLayout::BASE_ADDRESS; + let offset = gpa - base; + unsafe { + let ptr = sandbox.mgr.shared_mem.base_ptr().add(offset) as *mut u64; + core::ptr::write_volatile(ptr, 0); + } + + let mut sem = unsafe { sandbox.guest_semaphore(gpa) }.unwrap(); + assert_eq!(sem.value(), 0); + let result = sem.decrement(); + assert!(result.is_err()); + } + } } From fc6215714b0048926f454da7be83472072cf9472 Mon Sep 17 00:00:00 2001 From: danbugs Date: Tue, 10 Mar 2026 15:13:53 +0000 Subject: [PATCH 2/8] Fix formatting (rustfmt nightly) Signed-off-by: danbugs --- src/hyperlight_host/src/sandbox/uninitialized.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index c6e99d68f..8182b9250 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -250,15 +250,12 @@ impl UninitializedSandbox { let offset = gpa - base; if gpa % core::mem::align_of::() != 0 { - return Err(new_error!( - "GPA {:#x} is not 8-byte aligned", - gpa, - )); + return Err(new_error!("GPA {:#x} is not 8-byte aligned", gpa,)); } - let end = offset.checked_add(core::mem::size_of::()).ok_or_else(|| { - new_error!("GPA {:#x} causes offset overflow", gpa) - })?; + let end = offset + .checked_add(core::mem::size_of::()) + .ok_or_else(|| new_error!("GPA {:#x} causes offset overflow", gpa))?; if end > mem_size { return Err(new_error!( "GPA {:#x} (offset {:#x}, end {:#x}) is outside the sandbox memory region (size {:#x})", @@ -1424,9 +1421,9 @@ mod tests { mod guest_semaphore_tests { use hyperlight_testing::simple_guest_as_string; + use crate::UninitializedSandbox; use crate::mem::shared_mem::SharedMemory; use crate::sandbox::uninitialized::GuestBinary; - use crate::UninitializedSandbox; fn make_sandbox() -> UninitializedSandbox { UninitializedSandbox::new( From 9151259f2d441090427cc435f34c5dd05fcc79ce Mon Sep 17 00:00:00 2001 From: danbugs Date: Tue, 10 Mar 2026 21:22:20 +0000 Subject: [PATCH 3/8] refactor: rename GuestSemaphore to GuestCounter with internal Mutex and fixed offset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename GuestSemaphore → GuestCounter - Add internal Mutex so increment()/decrement() take &self, not &mut self - Replace GPA parameter with fixed SCRATCH_TOP_GUEST_COUNTER_OFFSET (0x1008) in scratch memory, so both host and guest locate the counter independently - Add guest_counter_gva() helper in hyperlight-guest - Add nanvix-unstable feature to hyperlight-guest (forwards to hyperlight-common) - Update tests to match new API Signed-off-by: danbugs --- src/hyperlight_common/src/layout.rs | 9 + src/hyperlight_guest/Cargo.toml | 1 + src/hyperlight_guest/src/layout.rs | 7 + src/hyperlight_host/src/lib.rs | 4 +- .../src/sandbox/uninitialized.rs | 196 +++++++----------- 5 files changed, 93 insertions(+), 124 deletions(-) diff --git a/src/hyperlight_common/src/layout.rs b/src/hyperlight_common/src/layout.rs index b53897bb1..64b79d982 100644 --- a/src/hyperlight_common/src/layout.rs +++ b/src/hyperlight_common/src/layout.rs @@ -39,6 +39,15 @@ pub const SCRATCH_TOP_ALLOCATOR_OFFSET: u64 = 0x10; pub const SCRATCH_TOP_SNAPSHOT_PT_GPA_BASE_OFFSET: u64 = 0x18; pub const SCRATCH_TOP_EXN_STACK_OFFSET: u64 = 0x20; +/// Offset from the top of scratch memory for a shared host-guest u64 counter. +/// +/// This is placed at 0x1008 (rather than the next sequential 0x28) so that the +/// counter falls in scratch page 0xffffe000 instead of the very last page +/// 0xfffff000, which on i686 guests would require frame 0xfffff — exceeding the +/// maximum representable frame number. +#[cfg(feature = "nanvix-unstable")] +pub const SCRATCH_TOP_GUEST_COUNTER_OFFSET: u64 = 0x1008; + pub fn scratch_base_gpa(size: usize) -> u64 { (MAX_GPA - size + 1) as u64 } diff --git a/src/hyperlight_guest/Cargo.toml b/src/hyperlight_guest/Cargo.toml index 08becd29a..3c985d1dc 100644 --- a/src/hyperlight_guest/Cargo.toml +++ b/src/hyperlight_guest/Cargo.toml @@ -24,3 +24,4 @@ hyperlight-guest-tracing = { workspace = true, default-features = false, optiona [features] default = [] trace_guest = ["dep:hyperlight-guest-tracing", "hyperlight-guest-tracing?/trace"] +nanvix-unstable = ["hyperlight-common/nanvix-unstable"] diff --git a/src/hyperlight_guest/src/layout.rs b/src/hyperlight_guest/src/layout.rs index 431a17c67..fd9d04a4d 100644 --- a/src/hyperlight_guest/src/layout.rs +++ b/src/hyperlight_guest/src/layout.rs @@ -32,3 +32,10 @@ pub fn snapshot_pt_gpa_base_gva() -> *mut u64 { (MAX_GVA as u64 - SCRATCH_TOP_SNAPSHOT_PT_GPA_BASE_OFFSET + 1) as *mut u64 } pub use arch::{scratch_base_gpa, scratch_base_gva}; + +/// Returns a pointer to the guest counter u64 in scratch memory. +#[cfg(feature = "nanvix-unstable")] +pub fn guest_counter_gva() -> *const u64 { + use hyperlight_common::layout::{MAX_GVA, SCRATCH_TOP_GUEST_COUNTER_OFFSET}; + (MAX_GVA as u64 - SCRATCH_TOP_GUEST_COUNTER_OFFSET + 1) as *const u64 +} diff --git a/src/hyperlight_host/src/lib.rs b/src/hyperlight_host/src/lib.rs index 43d767b8b..928f82cd2 100644 --- a/src/hyperlight_host/src/lib.rs +++ b/src/hyperlight_host/src/lib.rs @@ -94,9 +94,9 @@ pub use sandbox::MultiUseSandbox; pub use sandbox::UninitializedSandbox; /// The re-export for the `GuestBinary` type pub use sandbox::uninitialized::GuestBinary; -/// The re-export for the `GuestSemaphore` type +/// The re-export for the `GuestCounter` type #[cfg(feature = "nanvix-unstable")] -pub use sandbox::uninitialized::GuestSemaphore; +pub use sandbox::uninitialized::GuestCounter; /// The universal `Result` type used throughout the Hyperlight codebase. pub type Result = core::result::Result; diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index 8182b9250..a32908f7a 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -58,53 +58,72 @@ pub(crate) struct SandboxRuntimeConfig { pub(crate) entry_point: Option, } -/// A counting semaphore backed by a u64 in guest shared memory. +/// A host-authoritative shared counter backed by a u64 in guest scratch memory. /// -/// Created via [`UninitializedSandbox::guest_semaphore()`]. The host -/// manipulates the counter with [`increment()`](Self::increment) and -/// [`decrement()`](Self::decrement); the guest reads the same location -/// with a volatile load at the agreed-upon GPA. +/// Created via [`UninitializedSandbox::guest_counter()`]. The host owns +/// the counter value and is the only writer: [`increment()`](Self::increment) +/// and [`decrement()`](Self::decrement) update the cached value and issue a +/// single `write_volatile` to shared memory. [`value()`](Self::value) returns +/// the cached value — the host never reads back from guest memory, preventing +/// a confused-deputy attack where a malicious guest could manipulate the counter. +/// +/// Thread safety is provided by an internal `Mutex`, so `increment()` and +/// `decrement()` take `&self` rather than `&mut self`. #[cfg(feature = "nanvix-unstable")] -#[derive(Debug)] -pub struct GuestSemaphore { +pub struct GuestCounter { + inner: Mutex, +} + +#[cfg(feature = "nanvix-unstable")] +struct GuestCounterInner { ptr: *mut u64, value: u64, } +#[cfg(feature = "nanvix-unstable")] +impl core::fmt::Debug for GuestCounter { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("GuestCounter").finish_non_exhaustive() + } +} + // SAFETY: The pointer targets mmap'd shared memory owned by the sandbox. -// Access must be externally synchronised (e.g. via Mutex). +// The internal Mutex serialises all host-side access. #[cfg(feature = "nanvix-unstable")] -unsafe impl Send for GuestSemaphore {} +unsafe impl Send for GuestCounterInner {} #[cfg(feature = "nanvix-unstable")] -impl GuestSemaphore { +impl GuestCounter { /// Increments the counter by one and writes it to guest memory with /// a volatile store. - pub fn increment(&mut self) -> Result<()> { - self.value = self + pub fn increment(&self) -> Result<()> { + let mut inner = self.inner.lock().map_err(|e| new_error!("{e}"))?; + inner.value = inner .value .checked_add(1) - .ok_or_else(|| new_error!("GuestSemaphore overflow"))?; + .ok_or_else(|| new_error!("GuestCounter overflow"))?; // SAFETY: `ptr` was validated as aligned and in-bounds at construction time. - unsafe { core::ptr::write_volatile(self.ptr, self.value) }; + unsafe { core::ptr::write_volatile(inner.ptr, inner.value) }; Ok(()) } /// Decrements the counter by one and writes it to guest memory with /// a volatile store. - pub fn decrement(&mut self) -> Result<()> { - self.value = self + pub fn decrement(&self) -> Result<()> { + let mut inner = self.inner.lock().map_err(|e| new_error!("{e}"))?; + inner.value = inner .value .checked_sub(1) - .ok_or_else(|| new_error!("GuestSemaphore underflow"))?; + .ok_or_else(|| new_error!("GuestCounter underflow"))?; // SAFETY: `ptr` was validated as aligned and in-bounds at construction time. - unsafe { core::ptr::write_volatile(self.ptr, self.value) }; + unsafe { core::ptr::write_volatile(inner.ptr, inner.value) }; Ok(()) } /// Returns the current host-side value of the counter. - pub fn value(&self) -> u64 { - self.value + pub fn value(&self) -> Result { + let inner = self.inner.lock().map_err(|e| new_error!("{e}"))?; + Ok(inner.value) } } @@ -223,52 +242,38 @@ impl<'a> From> for GuestEnvironment<'a, '_> { } impl UninitializedSandbox { - /// Creates a [`GuestSemaphore`] backed by a u64 counter at the given - /// guest physical address (GPA). + /// Creates a [`GuestCounter`] backed by a u64 at a fixed offset in + /// scratch memory. /// - /// The GPA must fall within the sandbox's allocated memory region and - /// be 8-byte aligned. + /// The counter lives at `SCRATCH_TOP_GUEST_COUNTER_OFFSET` from + /// the top of scratch, so both host and guest can locate it without + /// an explicit GPA parameter. /// /// # Safety /// - /// The caller must ensure the returned `GuestSemaphore` does not + /// The caller must ensure the returned `GuestCounter` does not /// outlive the sandbox's underlying shared memory mapping (which /// stays alive through `evolve()` into `MultiUseSandbox`). #[cfg(feature = "nanvix-unstable")] - pub unsafe fn guest_semaphore(&mut self, gpa: usize) -> Result { - let base = SandboxMemoryLayout::BASE_ADDRESS; - let mem_size = self.mgr.shared_mem.mem_size(); + pub unsafe fn guest_counter(&mut self) -> Result { + use hyperlight_common::layout::SCRATCH_TOP_GUEST_COUNTER_OFFSET; - if gpa < base { - return Err(new_error!( - "GPA {:#x} is below the sandbox base address ({:#x})", - gpa, - base - )); - } - - let offset = gpa - base; - - if gpa % core::mem::align_of::() != 0 { - return Err(new_error!("GPA {:#x} is not 8-byte aligned", gpa,)); - } - - let end = offset - .checked_add(core::mem::size_of::()) - .ok_or_else(|| new_error!("GPA {:#x} causes offset overflow", gpa))?; - if end > mem_size { + let scratch_size = self.mgr.scratch_mem.mem_size(); + if (SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize) + core::mem::size_of::() > scratch_size + { return Err(new_error!( - "GPA {:#x} (offset {:#x}, end {:#x}) is outside the sandbox memory region (size {:#x})", - gpa, - offset, - end, - mem_size + "scratch memory too small for guest counter (size {:#x}, need offset {:#x})", + scratch_size, + SCRATCH_TOP_GUEST_COUNTER_OFFSET, )); } - let ptr = unsafe { self.mgr.shared_mem.base_ptr().add(offset) as *mut u64 }; + let offset = scratch_size - SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize; + let ptr = unsafe { self.mgr.scratch_mem.base_ptr().add(offset) as *mut u64 }; let value = unsafe { core::ptr::read_volatile(ptr) }; - Ok(GuestSemaphore { ptr, value }) + Ok(GuestCounter { + inner: Mutex::new(GuestCounterInner { ptr, value }), + }) } // Creates a new uninitialized sandbox from a pre-built snapshot. @@ -1418,11 +1423,10 @@ mod tests { } #[cfg(feature = "nanvix-unstable")] - mod guest_semaphore_tests { + mod guest_counter_tests { use hyperlight_testing::simple_guest_as_string; use crate::UninitializedSandbox; - use crate::mem::shared_mem::SharedMemory; use crate::sandbox::uninitialized::GuestBinary; fn make_sandbox() -> UninitializedSandbox { @@ -1434,84 +1438,32 @@ mod tests { } #[test] - fn valid_gpa() { - use crate::mem::layout::SandboxMemoryLayout; - + fn create_guest_counter() { let mut sandbox = make_sandbox(); - let mem_size = sandbox.mgr.shared_mem.mem_size(); - // Pick an aligned GPA well within bounds - let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1000; - assert!(gpa % 8 == 0); - assert!(gpa - SandboxMemoryLayout::BASE_ADDRESS + 8 <= mem_size); - - let sem = unsafe { sandbox.guest_semaphore(gpa) }; - assert!(sem.is_ok()); - } - - #[test] - fn out_of_range_gpa() { - let mut sandbox = make_sandbox(); - // GPA far beyond any reasonable allocation - let sem = unsafe { sandbox.guest_semaphore(0xFFFF_FFFF_FFFF_FFF0) }; - assert!(sem.is_err()); - } - - #[test] - fn below_base_or_misaligned_gpa() { - let mut sandbox = make_sandbox(); - // GPA 1 is either below BASE_ADDRESS (default) or misaligned (nanvix-unstable) - let sem = unsafe { sandbox.guest_semaphore(1) }; - assert!(sem.is_err()); - } - - #[test] - fn misaligned_gpa() { - use crate::mem::layout::SandboxMemoryLayout; - - let mut sandbox = make_sandbox(); - // Pick a GPA that's within range but not 8-byte aligned - let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1001; - let sem = unsafe { sandbox.guest_semaphore(gpa) }; - assert!(sem.is_err()); - let err = sem.unwrap_err().to_string(); - assert!(err.contains("not 8-byte aligned"), "got: {err}"); + let counter = unsafe { sandbox.guest_counter() }; + assert!(counter.is_ok()); } #[test] fn increment_decrement() { - use crate::mem::layout::SandboxMemoryLayout; - let mut sandbox = make_sandbox(); - let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1000; - let mut sem = unsafe { sandbox.guest_semaphore(gpa) }.unwrap(); - - let initial = sem.value(); - sem.increment().unwrap(); - assert_eq!(sem.value(), initial + 1); - sem.increment().unwrap(); - assert_eq!(sem.value(), initial + 2); - sem.decrement().unwrap(); - assert_eq!(sem.value(), initial + 1); + let counter = unsafe { sandbox.guest_counter() }.unwrap(); + + let initial = counter.value().unwrap(); + counter.increment().unwrap(); + assert_eq!(counter.value().unwrap(), initial + 1); + counter.increment().unwrap(); + assert_eq!(counter.value().unwrap(), initial + 2); + counter.decrement().unwrap(); + assert_eq!(counter.value().unwrap(), initial + 1); } #[test] fn underflow_returns_error() { - use crate::mem::layout::SandboxMemoryLayout; - let mut sandbox = make_sandbox(); - let gpa = SandboxMemoryLayout::BASE_ADDRESS + 0x1000; - - // Zero the memory at the GPA first - let base = SandboxMemoryLayout::BASE_ADDRESS; - let offset = gpa - base; - unsafe { - let ptr = sandbox.mgr.shared_mem.base_ptr().add(offset) as *mut u64; - core::ptr::write_volatile(ptr, 0); - } - - let mut sem = unsafe { sandbox.guest_semaphore(gpa) }.unwrap(); - assert_eq!(sem.value(), 0); - let result = sem.decrement(); + let counter = unsafe { sandbox.guest_counter() }.unwrap(); + assert_eq!(counter.value().unwrap(), 0); + let result = counter.decrement(); assert!(result.is_err()); } } From c9c56f6a50aa30cbf283832a89186271ad0f8ff0 Mon Sep 17 00:00:00 2001 From: danbugs Date: Tue, 10 Mar 2026 21:43:56 +0000 Subject: [PATCH 4/8] fix: remove unused SandboxMemoryLayout import The GuestCounter constructor now uses scratch_mem directly instead of computing an offset from SandboxMemoryLayout::BASE_ADDRESS. Signed-off-by: danbugs --- src/hyperlight_host/src/sandbox/uninitialized.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index a32908f7a..3d09d74d4 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -29,8 +29,6 @@ use crate::func::host_functions::{HostFunction, register_host_function}; use crate::func::{ParameterTuple, SupportedReturnType}; #[cfg(feature = "build-metadata")] use crate::log_build_details; -#[cfg(feature = "nanvix-unstable")] -use crate::mem::layout::SandboxMemoryLayout; use crate::mem::memory_region::{DEFAULT_GUEST_BLOB_MEM_FLAGS, MemoryRegionFlags}; use crate::mem::mgr::SandboxMemoryManager; use crate::mem::shared_mem::ExclusiveSharedMemory; From c896a2a58dc6c58506a4e643f9d45dcfe93f2516 Mon Sep 17 00:00:00 2001 From: danbugs Date: Thu, 12 Mar 2026 05:23:49 +0000 Subject: [PATCH 5/8] refactor: address PR 1270 review feedback for GuestCounter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address review comments from syntactically and jsturtevant: - Doc: replace "backed by" with "exposed to the guest via" (syntactically) - Doc: remove "confused-deputy" framing; clarify that the host never reads back from guest memory so a malicious guest cannot influence the host's view (syntactically) - Doc: add Implementation Note explaining why raw write_volatile is used instead of SharedMemory methods — GuestCounter is created on UninitializedSandbox (ExclusiveSharedMemory) but must survive across evolve() into MultiUseSandbox (HostSharedMemory); the pointer stays valid because evolve() internally clones the Arc (syntactically) - Doc: document single-instance requirement — multiple GuestCounter instances would cause cached values to diverge (jsturtevant) - Safety: move "must not outlive sandbox" and single-instance requirements into a structured # Safety doc on the constructor (jsturtevant) - Bounds check: remove sizeof(u64) addend since the offset constant (0x1008) already statically accounts for it (syntactically) - Remove unused SandboxMemoryLayout import Signed-off-by: danbugs --- .../src/sandbox/uninitialized.rs | 44 +++++++++++++------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index 3d09d74d4..c7a41236a 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -56,17 +56,33 @@ pub(crate) struct SandboxRuntimeConfig { pub(crate) entry_point: Option, } -/// A host-authoritative shared counter backed by a u64 in guest scratch memory. +/// A host-authoritative shared counter exposed to the guest via a `u64` +/// in guest scratch memory. /// /// Created via [`UninitializedSandbox::guest_counter()`]. The host owns /// the counter value and is the only writer: [`increment()`](Self::increment) /// and [`decrement()`](Self::decrement) update the cached value and issue a /// single `write_volatile` to shared memory. [`value()`](Self::value) returns -/// the cached value — the host never reads back from guest memory, preventing -/// a confused-deputy attack where a malicious guest could manipulate the counter. +/// the cached value — the host never reads back from guest memory, so a +/// malicious guest cannot influence the host's view of the counter. /// /// Thread safety is provided by an internal `Mutex`, so `increment()` and /// `decrement()` take `&self` rather than `&mut self`. +/// +/// # Implementation note +/// +/// `GuestCounter` uses raw `write_volatile` instead of going through +/// [`SharedMemory`] methods because it is created on an +/// [`UninitializedSandbox`] (which holds `ExclusiveSharedMemory`), but +/// must survive across [`evolve()`](UninitializedSandbox::evolve) into +/// `MultiUseSandbox` (which holds `HostSharedMemory`). The pointer +/// remains valid because `evolve()` internally clones the backing +/// `Arc` into both `HostSharedMemory` and +/// `GuestSharedMemory`, keeping the mmap alive. +/// +/// Only **one** `GuestCounter` should be created per sandbox. Creating +/// multiple instances is safe from a memory perspective, but the +/// internal cached values will diverge, leading to incorrect writes. #[cfg(feature = "nanvix-unstable")] pub struct GuestCounter { inner: Mutex, @@ -240,25 +256,27 @@ impl<'a> From> for GuestEnvironment<'a, '_> { } impl UninitializedSandbox { - /// Creates a [`GuestCounter`] backed by a u64 at a fixed offset in - /// scratch memory. + /// Creates a [`GuestCounter`] at a fixed offset in scratch memory. /// - /// The counter lives at `SCRATCH_TOP_GUEST_COUNTER_OFFSET` from - /// the top of scratch, so both host and guest can locate it without - /// an explicit GPA parameter. + /// The counter lives at `SCRATCH_TOP_GUEST_COUNTER_OFFSET` bytes from + /// the top of scratch memory, so both host and guest can locate it + /// without an explicit GPA parameter. /// /// # Safety /// - /// The caller must ensure the returned `GuestCounter` does not - /// outlive the sandbox's underlying shared memory mapping (which - /// stays alive through `evolve()` into `MultiUseSandbox`). + /// The caller must ensure: + /// - The returned `GuestCounter` does not outlive the sandbox's + /// underlying shared memory mapping (which stays alive through + /// `evolve()` into `MultiUseSandbox`). + /// - Only **one** `GuestCounter` is created per sandbox. Multiple + /// instances are memory-safe but their cached values will diverge, + /// leading to incorrect writes. #[cfg(feature = "nanvix-unstable")] pub unsafe fn guest_counter(&mut self) -> Result { use hyperlight_common::layout::SCRATCH_TOP_GUEST_COUNTER_OFFSET; let scratch_size = self.mgr.scratch_mem.mem_size(); - if (SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize) + core::mem::size_of::() > scratch_size - { + if (SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize) > scratch_size { return Err(new_error!( "scratch memory too small for guest counter (size {:#x}, need offset {:#x})", scratch_size, From 4d829b269903b2f0612bb15b02626c6954dfb4cf Mon Sep 17 00:00:00 2001 From: danbugs Date: Thu, 12 Mar 2026 18:25:54 +0000 Subject: [PATCH 6/8] refactor: use deferred HostSharedMemory for GuestCounter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the raw-pointer GuestCounter with an approach that uses Arc>> shared between ExclusiveSharedMemory and GuestCounter. The Option is None until build() populates it, so: - ExclusiveSharedMemory retains its lock-free exclusive phase - GuestCounter uses HostSharedMemory::write() (volatile, RwLock-guarded) - No unsafe code in the counter — constructor is safe (&mut self) - Counter operations fail with a clear error before build() Signed-off-by: danbugs --- src/hyperlight_host/src/mem/shared_mem.rs | 31 +++-- .../src/sandbox/uninitialized.rs | 126 +++++++++++------- 2 files changed, 102 insertions(+), 55 deletions(-) diff --git a/src/hyperlight_host/src/mem/shared_mem.rs b/src/hyperlight_host/src/mem/shared_mem.rs index c31ec5525..ea13cc78e 100644 --- a/src/hyperlight_host/src/mem/shared_mem.rs +++ b/src/hyperlight_host/src/mem/shared_mem.rs @@ -20,7 +20,7 @@ use std::io::Error; use std::mem::{align_of, size_of}; #[cfg(target_os = "linux")] use std::ptr::null_mut; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, Mutex, RwLock}; use hyperlight_common::mem::PAGE_SIZE_USIZE; use tracing::{Span, instrument}; @@ -136,7 +136,12 @@ impl Drop for HostMapping { /// and taking snapshots. #[derive(Debug)] pub struct ExclusiveSharedMemory { - region: Arc, + pub(crate) region: Arc, + /// Populated by [`build()`](Self::build) with a [`HostSharedMemory`] + /// view of this region. Code that needs host-style volatile access + /// before `build()` (e.g. `GuestCounter`) can clone this `Arc` and + /// will see `Some` once `build()` completes. + pub(crate) deferred_hshm: Arc>>, } unsafe impl Send for ExclusiveSharedMemory {} @@ -321,8 +326,8 @@ unsafe impl Send for GuestSharedMemory {} /// becoming completely divorced from the view of the VM. #[derive(Clone, Debug)] pub struct HostSharedMemory { - region: Arc, - lock: Arc>, + pub(crate) region: Arc, + pub(crate) lock: Arc>, } unsafe impl Send for HostSharedMemory {} @@ -424,6 +429,7 @@ impl ExclusiveSharedMemory { ptr: addr as *mut u8, size: total_size, }), + deferred_hshm: Arc::new(Mutex::new(None)), }) } @@ -542,6 +548,7 @@ impl ExclusiveSharedMemory { size: total_size, handle, }), + deferred_hshm: Arc::new(Mutex::new(None)), }) } @@ -648,14 +655,18 @@ impl ExclusiveSharedMemory { /// the GuestSharedMemory. pub fn build(self) -> (HostSharedMemory, GuestSharedMemory) { let lock = Arc::new(RwLock::new(())); + let hshm = HostSharedMemory { + region: self.region.clone(), + lock: lock.clone(), + }; + // Publish the HostSharedMemory so any pre-existing GuestCounter + // can begin issuing volatile writes via the proper protocol. + *self.deferred_hshm.lock().unwrap() = Some(hshm.clone()); ( - HostSharedMemory { - region: self.region.clone(), - lock: lock.clone(), - }, + hshm, GuestSharedMemory { region: self.region.clone(), - lock: lock.clone(), + lock, }, ) } @@ -847,6 +858,7 @@ impl SharedMemory for GuestSharedMemory { .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?; let mut excl = ExclusiveSharedMemory { region: self.region.clone(), + deferred_hshm: Arc::new(Mutex::new(None)), }; let ret = f(&mut excl); drop(excl); @@ -1212,6 +1224,7 @@ impl SharedMemory for HostSharedMemory { .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?; let mut excl = ExclusiveSharedMemory { region: self.region.clone(), + deferred_hshm: Arc::new(Mutex::new(None)), }; let ret = f(&mut excl); drop(excl); diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index c7a41236a..29b71c471 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -61,28 +61,23 @@ pub(crate) struct SandboxRuntimeConfig { /// /// Created via [`UninitializedSandbox::guest_counter()`]. The host owns /// the counter value and is the only writer: [`increment()`](Self::increment) -/// and [`decrement()`](Self::decrement) update the cached value and issue a -/// single `write_volatile` to shared memory. [`value()`](Self::value) returns -/// the cached value — the host never reads back from guest memory, so a -/// malicious guest cannot influence the host's view of the counter. +/// and [`decrement()`](Self::decrement) update the cached value and write +/// to shared memory via [`HostSharedMemory::write()`]. [`value()`](Self::value) +/// returns the cached value — the host never reads back from guest memory, +/// so a malicious guest cannot influence the host's view of the counter. /// /// Thread safety is provided by an internal `Mutex`, so `increment()` and /// `decrement()` take `&self` rather than `&mut self`. /// -/// # Implementation note +/// The counter holds an `Arc>>` that is +/// shared with [`ExclusiveSharedMemory`]. The `Option` is `None` until +/// [`build()`](ExclusiveSharedMemory::build) populates it, at which point +/// the counter can issue volatile writes via the proper protocol. This +/// avoids adding lock overhead to `ExclusiveSharedMemory` during the +/// exclusive setup phase. /// -/// `GuestCounter` uses raw `write_volatile` instead of going through -/// [`SharedMemory`] methods because it is created on an -/// [`UninitializedSandbox`] (which holds `ExclusiveSharedMemory`), but -/// must survive across [`evolve()`](UninitializedSandbox::evolve) into -/// `MultiUseSandbox` (which holds `HostSharedMemory`). The pointer -/// remains valid because `evolve()` internally clones the backing -/// `Arc` into both `HostSharedMemory` and -/// `GuestSharedMemory`, keeping the mmap alive. -/// -/// Only **one** `GuestCounter` should be created per sandbox. Creating -/// multiple instances is safe from a memory perspective, but the -/// internal cached values will diverge, leading to incorrect writes. +/// The constructor takes `&mut self` on `UninitializedSandbox`, which +/// prevents creating multiple instances simultaneously. #[cfg(feature = "nanvix-unstable")] pub struct GuestCounter { inner: Mutex, @@ -90,7 +85,8 @@ pub struct GuestCounter { #[cfg(feature = "nanvix-unstable")] struct GuestCounterInner { - ptr: *mut u64, + deferred_hshm: Arc>>, + offset: usize, value: u64, } @@ -101,36 +97,45 @@ impl core::fmt::Debug for GuestCounter { } } -// SAFETY: The pointer targets mmap'd shared memory owned by the sandbox. -// The internal Mutex serialises all host-side access. -#[cfg(feature = "nanvix-unstable")] -unsafe impl Send for GuestCounterInner {} - #[cfg(feature = "nanvix-unstable")] impl GuestCounter { - /// Increments the counter by one and writes it to guest memory with - /// a volatile store. + /// Increments the counter by one and writes it to guest memory. pub fn increment(&self) -> Result<()> { let mut inner = self.inner.lock().map_err(|e| new_error!("{e}"))?; + let shm = { + let guard = inner.deferred_hshm.lock().map_err(|e| new_error!("{e}"))?; + guard + .as_ref() + .ok_or_else(|| { + new_error!("GuestCounter cannot be used before shared memory is built") + })? + .clone() + }; inner.value = inner .value .checked_add(1) .ok_or_else(|| new_error!("GuestCounter overflow"))?; - // SAFETY: `ptr` was validated as aligned and in-bounds at construction time. - unsafe { core::ptr::write_volatile(inner.ptr, inner.value) }; + shm.write::(inner.offset, inner.value)?; Ok(()) } - /// Decrements the counter by one and writes it to guest memory with - /// a volatile store. + /// Decrements the counter by one and writes it to guest memory. pub fn decrement(&self) -> Result<()> { let mut inner = self.inner.lock().map_err(|e| new_error!("{e}"))?; + let shm = { + let guard = inner.deferred_hshm.lock().map_err(|e| new_error!("{e}"))?; + guard + .as_ref() + .ok_or_else(|| { + new_error!("GuestCounter cannot be used before shared memory is built") + })? + .clone() + }; inner.value = inner .value .checked_sub(1) .ok_or_else(|| new_error!("GuestCounter underflow"))?; - // SAFETY: `ptr` was validated as aligned and in-bounds at construction time. - unsafe { core::ptr::write_volatile(inner.ptr, inner.value) }; + shm.write::(inner.offset, inner.value)?; Ok(()) } @@ -262,17 +267,16 @@ impl UninitializedSandbox { /// the top of scratch memory, so both host and guest can locate it /// without an explicit GPA parameter. /// - /// # Safety + /// The returned counter holds an `Arc` clone of the scratch memory's + /// `deferred_hshm`, so it will automatically gain access to the + /// [`HostSharedMemory`] once [`build()`](ExclusiveSharedMemory::build) + /// is called during [`evolve()`](Self::evolve). /// - /// The caller must ensure: - /// - The returned `GuestCounter` does not outlive the sandbox's - /// underlying shared memory mapping (which stays alive through - /// `evolve()` into `MultiUseSandbox`). - /// - Only **one** `GuestCounter` is created per sandbox. Multiple - /// instances are memory-safe but their cached values will diverge, - /// leading to incorrect writes. + /// Takes `&mut self` to prevent creating multiple instances + /// simultaneously (multiple instances would have divergent cached + /// values). #[cfg(feature = "nanvix-unstable")] - pub unsafe fn guest_counter(&mut self) -> Result { + pub fn guest_counter(&mut self) -> Result { use hyperlight_common::layout::SCRATCH_TOP_GUEST_COUNTER_OFFSET; let scratch_size = self.mgr.scratch_mem.mem_size(); @@ -285,10 +289,15 @@ impl UninitializedSandbox { } let offset = scratch_size - SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize; - let ptr = unsafe { self.mgr.scratch_mem.base_ptr().add(offset) as *mut u64 }; - let value = unsafe { core::ptr::read_volatile(ptr) }; + let deferred_hshm = self.mgr.scratch_mem.deferred_hshm.clone(); + let value = self.mgr.scratch_mem.read_u64(offset)?; + Ok(GuestCounter { - inner: Mutex::new(GuestCounterInner { ptr, value }), + inner: Mutex::new(GuestCounterInner { + deferred_hshm, + offset, + value, + }), }) } @@ -1440,9 +1449,12 @@ mod tests { #[cfg(feature = "nanvix-unstable")] mod guest_counter_tests { + use std::sync::{Arc, RwLock}; + use hyperlight_testing::simple_guest_as_string; use crate::UninitializedSandbox; + use crate::mem::shared_mem::HostSharedMemory; use crate::sandbox::uninitialized::GuestBinary; fn make_sandbox() -> UninitializedSandbox { @@ -1453,17 +1465,37 @@ mod tests { .expect("Failed to create sandbox") } + /// Simulate `build()` for the scratch region by storing a + /// `HostSharedMemory` into the deferred slot. + fn simulate_build(sandbox: &UninitializedSandbox) { + let lock = Arc::new(RwLock::new(())); + let hshm = HostSharedMemory { + region: sandbox.mgr.scratch_mem.region.clone(), + lock, + }; + *sandbox.mgr.scratch_mem.deferred_hshm.lock().unwrap() = Some(hshm); + } + #[test] fn create_guest_counter() { let mut sandbox = make_sandbox(); - let counter = unsafe { sandbox.guest_counter() }; + let counter = sandbox.guest_counter(); assert!(counter.is_ok()); } + #[test] + fn fails_before_build() { + let mut sandbox = make_sandbox(); + let counter = sandbox.guest_counter().unwrap(); + assert!(counter.increment().is_err()); + assert!(counter.decrement().is_err()); + } + #[test] fn increment_decrement() { let mut sandbox = make_sandbox(); - let counter = unsafe { sandbox.guest_counter() }.unwrap(); + let counter = sandbox.guest_counter().unwrap(); + simulate_build(&sandbox); let initial = counter.value().unwrap(); counter.increment().unwrap(); @@ -1477,7 +1509,9 @@ mod tests { #[test] fn underflow_returns_error() { let mut sandbox = make_sandbox(); - let counter = unsafe { sandbox.guest_counter() }.unwrap(); + let counter = sandbox.guest_counter().unwrap(); + simulate_build(&sandbox); + assert_eq!(counter.value().unwrap(), 0); let result = counter.decrement(); assert!(result.is_err()); From d4d5fac3a739b1d498bbaabe1e82e64bf921ab34 Mon Sep 17 00:00:00 2001 From: danbugs Date: Thu, 12 Mar 2026 18:52:06 +0000 Subject: [PATCH 7/8] refactor: enforce single GuestCounter, harden encapsulation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add counter_taken AtomicBool to ExclusiveSharedMemory; guest_counter() returns an error on a second call instead of silently creating a duplicate with divergent cached state. - Initialize value to 0 instead of reading from the offset — scratch memory is always zero at that point. - Revert HostSharedMemory fields to private and add a #[cfg(test)] simulate_build() method so tests don't leak shm internals. - Add only_one_counter_allowed test. Signed-off-by: danbugs --- src/hyperlight_host/src/mem/shared_mem.rs | 40 ++++++++++++- .../src/sandbox/uninitialized.rs | 58 ++++++++++--------- 2 files changed, 68 insertions(+), 30 deletions(-) diff --git a/src/hyperlight_host/src/mem/shared_mem.rs b/src/hyperlight_host/src/mem/shared_mem.rs index ea13cc78e..830590b31 100644 --- a/src/hyperlight_host/src/mem/shared_mem.rs +++ b/src/hyperlight_host/src/mem/shared_mem.rs @@ -20,6 +20,8 @@ use std::io::Error; use std::mem::{align_of, size_of}; #[cfg(target_os = "linux")] use std::ptr::null_mut; +#[cfg(feature = "nanvix-unstable")] +use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex, RwLock}; use hyperlight_common::mem::PAGE_SIZE_USIZE; @@ -142,6 +144,11 @@ pub struct ExclusiveSharedMemory { /// before `build()` (e.g. `GuestCounter`) can clone this `Arc` and /// will see `Some` once `build()` completes. pub(crate) deferred_hshm: Arc>>, + /// Set to `true` once a [`GuestCounter`] has been handed out via + /// [`UninitializedSandbox::guest_counter()`]. Prevents creating + /// multiple counters that would have divergent cached values. + #[cfg(feature = "nanvix-unstable")] + pub(crate) counter_taken: AtomicBool, } unsafe impl Send for ExclusiveSharedMemory {} @@ -326,8 +333,8 @@ unsafe impl Send for GuestSharedMemory {} /// becoming completely divorced from the view of the VM. #[derive(Clone, Debug)] pub struct HostSharedMemory { - pub(crate) region: Arc, - pub(crate) lock: Arc>, + region: Arc, + lock: Arc>, } unsafe impl Send for HostSharedMemory {} @@ -430,6 +437,8 @@ impl ExclusiveSharedMemory { size: total_size, }), deferred_hshm: Arc::new(Mutex::new(None)), + #[cfg(feature = "nanvix-unstable")] + counter_taken: AtomicBool::new(false), }) } @@ -549,6 +558,8 @@ impl ExclusiveSharedMemory { handle, }), deferred_hshm: Arc::new(Mutex::new(None)), + #[cfg(feature = "nanvix-unstable")] + counter_taken: AtomicBool::new(false), }) } @@ -661,7 +672,13 @@ impl ExclusiveSharedMemory { }; // Publish the HostSharedMemory so any pre-existing GuestCounter // can begin issuing volatile writes via the proper protocol. - *self.deferred_hshm.lock().unwrap() = Some(hshm.clone()); + // The mutex is only locked here and during GuestCounter + // operations; since build() consumes self, there is no + // concurrent access that could poison it. + #[allow(clippy::unwrap_used)] + { + *self.deferred_hshm.lock().unwrap() = Some(hshm.clone()); + } ( hshm, GuestSharedMemory { @@ -671,6 +688,19 @@ impl ExclusiveSharedMemory { ) } + /// Populate the deferred `HostSharedMemory` slot without consuming + /// `self`. Used in tests where `evolve()` / full `build()` is not + /// available. + #[cfg(all(test, feature = "nanvix-unstable"))] + pub(crate) fn simulate_build(&self) { + let lock = Arc::new(RwLock::new(())); + let hshm = HostSharedMemory { + region: self.region.clone(), + lock, + }; + *self.deferred_hshm.lock().unwrap() = Some(hshm); + } + /// Gets the file handle of the shared memory region for this Sandbox #[cfg(target_os = "windows")] pub fn get_mmap_file_handle(&self) -> HANDLE { @@ -859,6 +889,8 @@ impl SharedMemory for GuestSharedMemory { let mut excl = ExclusiveSharedMemory { region: self.region.clone(), deferred_hshm: Arc::new(Mutex::new(None)), + #[cfg(feature = "nanvix-unstable")] + counter_taken: AtomicBool::new(false), }; let ret = f(&mut excl); drop(excl); @@ -1225,6 +1257,8 @@ impl SharedMemory for HostSharedMemory { let mut excl = ExclusiveSharedMemory { region: self.region.clone(), deferred_hshm: Arc::new(Mutex::new(None)), + #[cfg(feature = "nanvix-unstable")] + counter_taken: AtomicBool::new(false), }; let ret = f(&mut excl); drop(excl); diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index 29b71c471..f66453e93 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -76,8 +76,8 @@ pub(crate) struct SandboxRuntimeConfig { /// avoids adding lock overhead to `ExclusiveSharedMemory` during the /// exclusive setup phase. /// -/// The constructor takes `&mut self` on `UninitializedSandbox`, which -/// prevents creating multiple instances simultaneously. +/// Only one `GuestCounter` may be created per sandbox; a second call to +/// [`UninitializedSandbox::guest_counter()`] returns an error. #[cfg(feature = "nanvix-unstable")] pub struct GuestCounter { inner: Mutex, @@ -272,13 +272,25 @@ impl UninitializedSandbox { /// [`HostSharedMemory`] once [`build()`](ExclusiveSharedMemory::build) /// is called during [`evolve()`](Self::evolve). /// - /// Takes `&mut self` to prevent creating multiple instances - /// simultaneously (multiple instances would have divergent cached - /// values). + /// This method can only be called once; a second call returns an error + /// because multiple counters would have divergent cached values. #[cfg(feature = "nanvix-unstable")] pub fn guest_counter(&mut self) -> Result { + use std::sync::atomic::Ordering; + use hyperlight_common::layout::SCRATCH_TOP_GUEST_COUNTER_OFFSET; + if self + .mgr + .scratch_mem + .counter_taken + .swap(true, Ordering::Relaxed) + { + return Err(new_error!( + "GuestCounter has already been created for this sandbox" + )); + } + let scratch_size = self.mgr.scratch_mem.mem_size(); if (SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize) > scratch_size { return Err(new_error!( @@ -290,13 +302,12 @@ impl UninitializedSandbox { let offset = scratch_size - SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize; let deferred_hshm = self.mgr.scratch_mem.deferred_hshm.clone(); - let value = self.mgr.scratch_mem.read_u64(offset)?; Ok(GuestCounter { inner: Mutex::new(GuestCounterInner { deferred_hshm, offset, - value, + value: 0, }), }) } @@ -1449,12 +1460,9 @@ mod tests { #[cfg(feature = "nanvix-unstable")] mod guest_counter_tests { - use std::sync::{Arc, RwLock}; - use hyperlight_testing::simple_guest_as_string; use crate::UninitializedSandbox; - use crate::mem::shared_mem::HostSharedMemory; use crate::sandbox::uninitialized::GuestBinary; fn make_sandbox() -> UninitializedSandbox { @@ -1465,17 +1473,6 @@ mod tests { .expect("Failed to create sandbox") } - /// Simulate `build()` for the scratch region by storing a - /// `HostSharedMemory` into the deferred slot. - fn simulate_build(sandbox: &UninitializedSandbox) { - let lock = Arc::new(RwLock::new(())); - let hshm = HostSharedMemory { - region: sandbox.mgr.scratch_mem.region.clone(), - lock, - }; - *sandbox.mgr.scratch_mem.deferred_hshm.lock().unwrap() = Some(hshm); - } - #[test] fn create_guest_counter() { let mut sandbox = make_sandbox(); @@ -1483,6 +1480,14 @@ mod tests { assert!(counter.is_ok()); } + #[test] + fn only_one_counter_allowed() { + let mut sandbox = make_sandbox(); + let _c1 = sandbox.guest_counter().unwrap(); + let c2 = sandbox.guest_counter(); + assert!(c2.is_err()); + } + #[test] fn fails_before_build() { let mut sandbox = make_sandbox(); @@ -1495,22 +1500,21 @@ mod tests { fn increment_decrement() { let mut sandbox = make_sandbox(); let counter = sandbox.guest_counter().unwrap(); - simulate_build(&sandbox); + sandbox.mgr.scratch_mem.simulate_build(); - let initial = counter.value().unwrap(); counter.increment().unwrap(); - assert_eq!(counter.value().unwrap(), initial + 1); + assert_eq!(counter.value().unwrap(), 1); counter.increment().unwrap(); - assert_eq!(counter.value().unwrap(), initial + 2); + assert_eq!(counter.value().unwrap(), 2); counter.decrement().unwrap(); - assert_eq!(counter.value().unwrap(), initial + 1); + assert_eq!(counter.value().unwrap(), 1); } #[test] fn underflow_returns_error() { let mut sandbox = make_sandbox(); let counter = sandbox.guest_counter().unwrap(); - simulate_build(&sandbox); + sandbox.mgr.scratch_mem.simulate_build(); assert_eq!(counter.value().unwrap(), 0); let result = counter.decrement(); From df9e3c2890c0bfda2aa80cd81b607da7e8e12e6a Mon Sep 17 00:00:00 2001 From: danbugs Date: Thu, 12 Mar 2026 20:47:56 +0000 Subject: [PATCH 8/8] refactor: move deferred_hshm and counter_taken to UninitializedSandbox ExclusiveSharedMemory's purpose is lock-free bulk memory operations. Storing deferred_hshm (Arc>>) and counter_taken (AtomicBool) on it added state that conceptually belongs to the sandbox lifetime, not the memory region lifecycle. This commit moves both fields to UninitializedSandbox, populates deferred_hshm in evolve_impl_multi_use() after build() returns the HostSharedMemory, and adds as_host_shared_memory() on ExclusiveSharedMemory (test-only) so UninitializedSandbox::simulate_build() can construct the HostSharedMemory without accessing private fields across modules. Signed-off-by: danbugs --- src/hyperlight_host/src/mem/shared_mem.rs | 62 ++++------------- .../src/sandbox/uninitialized.rs | 67 +++++++++++++------ .../src/sandbox/uninitialized_evolve.rs | 15 +++++ 3 files changed, 74 insertions(+), 70 deletions(-) diff --git a/src/hyperlight_host/src/mem/shared_mem.rs b/src/hyperlight_host/src/mem/shared_mem.rs index 830590b31..5c4a1a63f 100644 --- a/src/hyperlight_host/src/mem/shared_mem.rs +++ b/src/hyperlight_host/src/mem/shared_mem.rs @@ -20,9 +20,7 @@ use std::io::Error; use std::mem::{align_of, size_of}; #[cfg(target_os = "linux")] use std::ptr::null_mut; -#[cfg(feature = "nanvix-unstable")] -use std::sync::atomic::AtomicBool; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, RwLock}; use hyperlight_common::mem::PAGE_SIZE_USIZE; use tracing::{Span, instrument}; @@ -138,17 +136,7 @@ impl Drop for HostMapping { /// and taking snapshots. #[derive(Debug)] pub struct ExclusiveSharedMemory { - pub(crate) region: Arc, - /// Populated by [`build()`](Self::build) with a [`HostSharedMemory`] - /// view of this region. Code that needs host-style volatile access - /// before `build()` (e.g. `GuestCounter`) can clone this `Arc` and - /// will see `Some` once `build()` completes. - pub(crate) deferred_hshm: Arc>>, - /// Set to `true` once a [`GuestCounter`] has been handed out via - /// [`UninitializedSandbox::guest_counter()`]. Prevents creating - /// multiple counters that would have divergent cached values. - #[cfg(feature = "nanvix-unstable")] - pub(crate) counter_taken: AtomicBool, + region: Arc, } unsafe impl Send for ExclusiveSharedMemory {} @@ -436,9 +424,6 @@ impl ExclusiveSharedMemory { ptr: addr as *mut u8, size: total_size, }), - deferred_hshm: Arc::new(Mutex::new(None)), - #[cfg(feature = "nanvix-unstable")] - counter_taken: AtomicBool::new(false), }) } @@ -557,9 +542,6 @@ impl ExclusiveSharedMemory { size: total_size, handle, }), - deferred_hshm: Arc::new(Mutex::new(None)), - #[cfg(feature = "nanvix-unstable")] - counter_taken: AtomicBool::new(false), }) } @@ -670,15 +652,6 @@ impl ExclusiveSharedMemory { region: self.region.clone(), lock: lock.clone(), }; - // Publish the HostSharedMemory so any pre-existing GuestCounter - // can begin issuing volatile writes via the proper protocol. - // The mutex is only locked here and during GuestCounter - // operations; since build() consumes self, there is no - // concurrent access that could poison it. - #[allow(clippy::unwrap_used)] - { - *self.deferred_hshm.lock().unwrap() = Some(hshm.clone()); - } ( hshm, GuestSharedMemory { @@ -688,24 +661,23 @@ impl ExclusiveSharedMemory { ) } - /// Populate the deferred `HostSharedMemory` slot without consuming - /// `self`. Used in tests where `evolve()` / full `build()` is not - /// available. - #[cfg(all(test, feature = "nanvix-unstable"))] - pub(crate) fn simulate_build(&self) { - let lock = Arc::new(RwLock::new(())); - let hshm = HostSharedMemory { - region: self.region.clone(), - lock, - }; - *self.deferred_hshm.lock().unwrap() = Some(hshm); - } - /// Gets the file handle of the shared memory region for this Sandbox #[cfg(target_os = "windows")] pub fn get_mmap_file_handle(&self) -> HANDLE { self.region.handle } + + /// Create a [`HostSharedMemory`] view of this region without + /// consuming `self`. Used in tests where the full `build()` / + /// `evolve()` pipeline is not available. + #[cfg(all(test, feature = "nanvix-unstable"))] + pub(crate) fn as_host_shared_memory(&self) -> HostSharedMemory { + let lock = Arc::new(RwLock::new(())); + HostSharedMemory { + region: self.region.clone(), + lock, + } + } } impl GuestSharedMemory { @@ -888,9 +860,6 @@ impl SharedMemory for GuestSharedMemory { .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?; let mut excl = ExclusiveSharedMemory { region: self.region.clone(), - deferred_hshm: Arc::new(Mutex::new(None)), - #[cfg(feature = "nanvix-unstable")] - counter_taken: AtomicBool::new(false), }; let ret = f(&mut excl); drop(excl); @@ -1256,9 +1225,6 @@ impl SharedMemory for HostSharedMemory { .map_err(|e| new_error!("Error locking at {}:{}: {}", file!(), line!(), e))?; let mut excl = ExclusiveSharedMemory { region: self.region.clone(), - deferred_hshm: Arc::new(Mutex::new(None)), - #[cfg(feature = "nanvix-unstable")] - counter_taken: AtomicBool::new(false), }; let ret = f(&mut excl); drop(excl); diff --git a/src/hyperlight_host/src/sandbox/uninitialized.rs b/src/hyperlight_host/src/sandbox/uninitialized.rs index f66453e93..66c2c875b 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized.rs @@ -33,6 +33,8 @@ use crate::mem::memory_region::{DEFAULT_GUEST_BLOB_MEM_FLAGS, MemoryRegionFlags} use crate::mem::mgr::SandboxMemoryManager; use crate::mem::shared_mem::ExclusiveSharedMemory; #[cfg(feature = "nanvix-unstable")] +use crate::mem::shared_mem::HostSharedMemory; +#[cfg(feature = "nanvix-unstable")] use crate::mem::shared_mem::SharedMemory; use crate::sandbox::SandboxConfiguration; use crate::{MultiUseSandbox, Result, new_error}; @@ -70,11 +72,9 @@ pub(crate) struct SandboxRuntimeConfig { /// `decrement()` take `&self` rather than `&mut self`. /// /// The counter holds an `Arc>>` that is -/// shared with [`ExclusiveSharedMemory`]. The `Option` is `None` until -/// [`build()`](ExclusiveSharedMemory::build) populates it, at which point -/// the counter can issue volatile writes via the proper protocol. This -/// avoids adding lock overhead to `ExclusiveSharedMemory` during the -/// exclusive setup phase. +/// shared with [`UninitializedSandbox`]. The `Option` is `None` until +/// [`evolve()`](UninitializedSandbox::evolve) populates it, at which point +/// the counter can issue volatile writes via the proper protocol. /// /// Only one `GuestCounter` may be created per sandbox; a second call to /// [`UninitializedSandbox::guest_counter()`] returns an error. @@ -85,7 +85,7 @@ pub struct GuestCounter { #[cfg(feature = "nanvix-unstable")] struct GuestCounterInner { - deferred_hshm: Arc>>, + deferred_hshm: Arc>>, offset: usize, value: u64, } @@ -111,11 +111,12 @@ impl GuestCounter { })? .clone() }; - inner.value = inner + let new_value = inner .value .checked_add(1) .ok_or_else(|| new_error!("GuestCounter overflow"))?; - shm.write::(inner.offset, inner.value)?; + shm.write::(inner.offset, new_value)?; + inner.value = new_value; Ok(()) } @@ -131,11 +132,12 @@ impl GuestCounter { })? .clone() }; - inner.value = inner + let new_value = inner .value .checked_sub(1) .ok_or_else(|| new_error!("GuestCounter underflow"))?; - shm.write::(inner.offset, inner.value)?; + shm.write::(inner.offset, new_value)?; + inner.value = new_value; Ok(()) } @@ -170,6 +172,17 @@ pub struct UninitializedSandbox { // This is needed to convey the stack pointer between the snapshot // and the HyperlightVm creation pub(crate) stack_top_gva: u64, + /// Populated by [`evolve()`](Self::evolve) with a [`HostSharedMemory`] + /// view of scratch memory. Code that needs host-style volatile access + /// before `evolve()` (e.g. `GuestCounter`) can clone this `Arc` and + /// will see `Some` once `evolve()` completes. + #[cfg(feature = "nanvix-unstable")] + pub(crate) deferred_hshm: Arc>>, + /// Set to `true` once a [`GuestCounter`] has been handed out via + /// [`guest_counter()`](Self::guest_counter). Prevents creating + /// multiple counters that would have divergent cached values. + #[cfg(feature = "nanvix-unstable")] + counter_taken: std::sync::atomic::AtomicBool, } impl Debug for UninitializedSandbox { @@ -267,10 +280,9 @@ impl UninitializedSandbox { /// the top of scratch memory, so both host and guest can locate it /// without an explicit GPA parameter. /// - /// The returned counter holds an `Arc` clone of the scratch memory's + /// The returned counter holds an `Arc` clone of the sandbox's /// `deferred_hshm`, so it will automatically gain access to the - /// [`HostSharedMemory`] once [`build()`](ExclusiveSharedMemory::build) - /// is called during [`evolve()`](Self::evolve). + /// [`HostSharedMemory`] once [`evolve()`](Self::evolve) completes. /// /// This method can only be called once; a second call returns an error /// because multiple counters would have divergent cached values. @@ -280,12 +292,7 @@ impl UninitializedSandbox { use hyperlight_common::layout::SCRATCH_TOP_GUEST_COUNTER_OFFSET; - if self - .mgr - .scratch_mem - .counter_taken - .swap(true, Ordering::Relaxed) - { + if self.counter_taken.swap(true, Ordering::Relaxed) { return Err(new_error!( "GuestCounter has already been created for this sandbox" )); @@ -301,7 +308,7 @@ impl UninitializedSandbox { } let offset = scratch_size - SCRATCH_TOP_GUEST_COUNTER_OFFSET as usize; - let deferred_hshm = self.mgr.scratch_mem.deferred_hshm.clone(); + let deferred_hshm = self.deferred_hshm.clone(); Ok(GuestCounter { inner: Mutex::new(GuestCounterInner { @@ -370,6 +377,10 @@ impl UninitializedSandbox { rt_cfg, load_info: snapshot.load_info(), stack_top_gva: snapshot.stack_top_gva(), + #[cfg(feature = "nanvix-unstable")] + deferred_hshm: Arc::new(Mutex::new(None)), + #[cfg(feature = "nanvix-unstable")] + counter_taken: std::sync::atomic::AtomicBool::new(false), }; // If we were passed a writer for host print register it otherwise use the default. @@ -449,6 +460,18 @@ impl UninitializedSandbox { ) -> Result<()> { self.register("HostPrint", print_func) } + + /// Populate the deferred `HostSharedMemory` slot without running + /// the full `evolve()` pipeline. Used in tests where guest boot + /// is not available. + #[cfg(all(test, feature = "nanvix-unstable"))] + fn simulate_build(&self) { + let hshm = self.mgr.scratch_mem.as_host_shared_memory(); + #[allow(clippy::unwrap_used)] + { + *self.deferred_hshm.lock().unwrap() = Some(hshm); + } + } } // Check to see if the current version of Windows is supported // Hyperlight is only supported on Windows 11 and Windows Server 2022 and later @@ -1500,7 +1523,7 @@ mod tests { fn increment_decrement() { let mut sandbox = make_sandbox(); let counter = sandbox.guest_counter().unwrap(); - sandbox.mgr.scratch_mem.simulate_build(); + sandbox.simulate_build(); counter.increment().unwrap(); assert_eq!(counter.value().unwrap(), 1); @@ -1514,7 +1537,7 @@ mod tests { fn underflow_returns_error() { let mut sandbox = make_sandbox(); let counter = sandbox.guest_counter().unwrap(); - sandbox.mgr.scratch_mem.simulate_build(); + sandbox.simulate_build(); assert_eq!(counter.value().unwrap(), 0); let result = counter.decrement(); diff --git a/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs b/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs index d1450cab1..b4c95013b 100644 --- a/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs +++ b/src/hyperlight_host/src/sandbox/uninitialized_evolve.rs @@ -38,6 +38,21 @@ use crate::{MultiUseSandbox, Result, UninitializedSandbox}; #[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")] pub(super) fn evolve_impl_multi_use(u_sbox: UninitializedSandbox) -> Result { let (mut hshm, gshm) = u_sbox.mgr.build()?; + + // Publish the HostSharedMemory for scratch so any pre-existing + // GuestCounter can begin issuing volatile writes. + #[cfg(feature = "nanvix-unstable")] + { + #[allow(clippy::unwrap_used)] + // The mutex can only be poisoned if a previous lock holder + // panicked. Since we are the only writer at this point (the + // GuestCounter only reads inside `increment`/`decrement`), + // poisoning cannot happen. + { + *u_sbox.deferred_hshm.lock().unwrap() = Some(hshm.scratch_mem.clone()); + } + } + let mut vm = set_up_hypervisor_partition( gshm, &u_sbox.config,