Skip to content

Remove kernel stack and boot stack memory regions #451

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions src/hyperlight_host/src/hypervisor/kvm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -569,8 +569,9 @@ impl Hypervisor for KVMDriver {
}
},
Ok(other) => {
crate::debug!("KVM Other Exit {:?}", other);
HyperlightExit::Unknown(format!("Unexpected KVM Exit {:?}", other))
let err_msg = format!("Unexpected KVM Exit {:?}", other);
crate::debug!("KVM Other Exit Details: {:#?}", &self);
HyperlightExit::Unknown(err_msg)
}
};
Ok(result)
Expand Down
208 changes: 5 additions & 203 deletions src/hyperlight_host/src/mem/layout.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ use rand::{rng, RngCore};
use tracing::{instrument, Span};

use super::memory_region::MemoryRegionType::{
BootStack, Code, GuardPage, GuestErrorData, Heap, HostExceptionData, HostFunctionDefinitions,
InputData, KernelStack, OutputData, PageTables, PanicContext, Peb, Stack,
Code, GuardPage, GuestErrorData, Heap, HostExceptionData, HostFunctionDefinitions, InputData,
OutputData, PageTables, PanicContext, Peb, Stack,
};
use super::memory_region::{MemoryRegion, MemoryRegionFlags, MemoryRegionVecBuilder};
use super::mgr::AMOUNT_OF_MEMORY_PER_PT;
Expand All @@ -32,14 +32,6 @@ use crate::error::HyperlightError::{GuestOffsetIsInvalid, MemoryRequestTooBig};
use crate::sandbox::SandboxConfiguration;
use crate::{log_then_return, new_error, Result};

// +-------------------------------------------+
// | Boot Stack (4KiB) |
// +-------------------------------------------+
// | Kernel Stack Guard Page (4KiB) |
// +-------------------------------------------+
// | Kernel Stack |
// +-------------------------------------------+
// | Guest Stack Guard Page (4KiB) |
// +-------------------------------------------+
// | Guest (User) Stack |
// +-------------------------------------------+
Expand Down Expand Up @@ -71,10 +63,6 @@ use crate::{log_then_return, new_error, Result};
// +-------------------------------------------+ 0x1_000
// | PML4 |
// +-------------------------------------------+ 0x0_000
// | ⋮ |
// | Unmapped |
// | ⋮ |
// +-------------------------------------------+ 0x0

///
/// - `HostDefinitions` - the length of this is the `HostFunctionDefinitionSize`
Expand Down Expand Up @@ -106,11 +94,6 @@ use crate::{log_then_return, new_error, Result};
/// - `GuestPanicContext` - contains a buffer for context associated with any guest
/// panic that occurred.
/// the length of this field is returned by the `guest_panic_context_size()` fn of this struct.
///
/// Boot Stack - this is the stack that is used before the TSS is set up. It is fixed to 4K
/// Kernel Stack Guard Page is to Guard against boot stack overflow so we dont corrupt the kernel stack
/// Kernel Stack - this is the stack that is used for kernel mode operations we switch to this early in the initialization function
/// Guest Stack Guard Page is to Guard against kernel stack overflow so we dont corrupt the user stack

#[derive(Copy, Clone)]
pub(crate) struct SandboxMemoryLayout {
Expand Down Expand Up @@ -147,12 +130,6 @@ pub(crate) struct SandboxMemoryLayout {
guest_heap_buffer_offset: usize,
guard_page_offset: usize,
guest_user_stack_buffer_offset: usize, // the lowest address of the user stack
user_stack_guard_page_offset: usize,
kernel_stack_buffer_offset: usize,
kernel_stack_guard_page_offset: usize,
#[allow(dead_code)]
pub(super) kernel_stack_size_rounded: usize,
boot_stack_buffer_offset: usize,

// other
pub(crate) peb_address: usize,
Expand Down Expand Up @@ -263,22 +240,6 @@ impl Debug for SandboxMemoryLayout {
"Guest Code Offset",
&format_args!("{:#x}", self.guest_code_offset),
)
.field(
"User Stack Guard Page Offset",
&format_args!("{:#x}", self.user_stack_guard_page_offset),
)
.field(
"Kernel Stack Buffer Offset",
&format_args!("{:#x}", self.kernel_stack_buffer_offset),
)
.field(
"Kernel Stack Guard Page Offset",
&format_args!("{:#x}", self.kernel_stack_guard_page_offset),
)
.field(
"Boot Stack Buffer Offset",
&format_args!("{:#x}", self.boot_stack_buffer_offset),
)
.finish()
}
}
Expand Down Expand Up @@ -384,12 +345,6 @@ impl SandboxMemoryLayout {
// round up stack size to page size. This is needed for MemoryRegion
let stack_size_rounded = round_up_to(stack_size, PAGE_SIZE_USIZE);

let user_stack_guard_page_offset = guest_user_stack_buffer_offset + stack_size_rounded;
let kernel_stack_buffer_offset = user_stack_guard_page_offset + PAGE_SIZE_USIZE;
let kernel_stack_size_rounded = round_up_to(cfg.get_kernel_stack_size(), PAGE_SIZE_USIZE);
let kernel_stack_guard_page_offset = kernel_stack_buffer_offset + kernel_stack_size_rounded;
let boot_stack_buffer_offset = kernel_stack_guard_page_offset + PAGE_SIZE_USIZE;

Ok(Self {
peb_offset,
stack_size: stack_size_rounded,
Expand Down Expand Up @@ -420,11 +375,6 @@ impl SandboxMemoryLayout {
guard_page_offset,
total_page_table_size,
guest_code_offset,
user_stack_guard_page_offset,
kernel_stack_buffer_offset,
kernel_stack_guard_page_offset,
kernel_stack_size_rounded,
boot_stack_buffer_offset,
})
}

Expand Down Expand Up @@ -591,22 +541,6 @@ impl SandboxMemoryLayout {
self.get_min_guest_stack_address_offset() + size_of::<u64>()
}

/// Get the offset of the kernel stack pointer in guest memory,
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_kernel_stack_pointer_offset(&self) -> usize {
// The kernelStackAddress is immediately after the
// userStackAddress in the `GuestStackData` struct which is a `u64`.
self.get_user_stack_pointer_offset() + size_of::<u64>()
}

/// Get the offset of the boot stack pointer in guest memory,
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_boot_stack_pointer_offset(&self) -> usize {
// The bootStackAddress is immediately after the
// kernelStackAddress in the `GuestStackData` struct which is a `u64`.
self.get_kernel_stack_pointer_offset() + size_of::<u64>()
}

// Get the offset in guest memory to the start of the guest panic context data
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(crate) fn get_guest_panic_context_offset(&self) -> usize {
Expand Down Expand Up @@ -644,7 +578,7 @@ impl SandboxMemoryLayout {
/// layout.
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
fn get_unaligned_memory_size(&self) -> usize {
self.get_boot_stack_buffer_offset() + PAGE_SIZE_USIZE
self.get_top_of_user_stack_offset() + self.stack_size
}

/// get the code offset
Expand All @@ -660,34 +594,6 @@ impl SandboxMemoryLayout {
Self::BASE_ADDRESS + self.guest_code_offset
}

/// Get the offset in guest memory to the user stack guard page
/// This is the offset in the sandbox memory where the user stack guard page starts
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_user_stack_guard_page_offset(&self) -> usize {
self.user_stack_guard_page_offset
}

/// Get the offset in guest memory to the kernel stack buffer
/// This is the offset in the sandbox memory where the kernel stack starts
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_kernel_stack_buffer_offset(&self) -> usize {
self.kernel_stack_buffer_offset
}

/// Get the offset in guest memory to the kernel stack guard page
/// This is the offset in the sandbox memory where the kernel stack guard page starts
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_kernel_stack_guard_page_offset(&self) -> usize {
self.kernel_stack_guard_page_offset
}

/// Get the offset in guest memory to the boot stack buffer
/// This is the offset in the sandbox memory where the boot stack starts
#[instrument(skip_all, parent = Span::current(), level= "Trace")]
pub(super) fn get_boot_stack_buffer_offset(&self) -> usize {
self.boot_stack_buffer_offset
}

#[cfg(test)]
/// Get the page table size
fn get_page_table_size(&self) -> usize {
Expand Down Expand Up @@ -971,80 +877,12 @@ impl SandboxMemoryLayout {
}

// stack
let user_stack_guard_page_offset = builder.push_page_aligned(
let final_offset = builder.push_page_aligned(
self.get_guest_stack_size(),
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
Stack,
);

let expected_user_stack_guard_page_offset =
TryInto::<usize>::try_into(self.get_top_of_user_stack_offset())?
+ self.get_guest_stack_size();

if user_stack_guard_page_offset != expected_user_stack_guard_page_offset {
return Err(new_error!(
"User Guard Page offset does not match expected User Guard Page offset expected: {}, actual: {}",
expected_user_stack_guard_page_offset,
user_stack_guard_page_offset
));
}

let kernel_stack_offset = builder.push_page_aligned(
PAGE_SIZE_USIZE,
MemoryRegionFlags::READ | MemoryRegionFlags::STACK_GUARD,
GuardPage,
);

let expected_kernel_stack_offset =
TryInto::<usize>::try_into(self.kernel_stack_buffer_offset)?;

if kernel_stack_offset != expected_kernel_stack_offset {
return Err(new_error!(
"Kernel Stack offset does not match expected Kernel Stack offset expected: {}, actual: {}",
expected_kernel_stack_offset,
kernel_stack_offset
));
}

let kernel_stack_guard_page_offset = builder.push_page_aligned(
self.kernel_stack_size_rounded,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
KernelStack,
);

let expected_kernel_stack_guard_page_offset =
TryInto::<usize>::try_into(self.kernel_stack_guard_page_offset)?;

if kernel_stack_guard_page_offset != expected_kernel_stack_guard_page_offset {
return Err(new_error!(
"Kernel Guard Page offset does not match expected Kernel Guard Page offset expected: {}, actual: {}",
expected_kernel_stack_guard_page_offset,
kernel_stack_guard_page_offset
));
}

let boot_stack_offset = builder.push_page_aligned(
PAGE_SIZE_USIZE,
MemoryRegionFlags::READ | MemoryRegionFlags::STACK_GUARD,
GuardPage,
);

let expected_boot_stack_offset = TryInto::<usize>::try_into(self.boot_stack_buffer_offset)?;

if boot_stack_offset != expected_boot_stack_offset {
return Err(new_error!(
"Boot Stack offset does not match expected Boot Stack offset expected: {}, actual: {}",
expected_boot_stack_offset,
boot_stack_offset
));
}

let final_offset = builder.push_page_aligned(
PAGE_SIZE_USIZE,
MemoryRegionFlags::READ | MemoryRegionFlags::WRITE,
BootStack,
);

let expected_final_offset = TryInto::<usize>::try_into(self.get_memory_size()?)?;

if final_offset != expected_final_offset {
Expand Down Expand Up @@ -1187,17 +1025,9 @@ impl SandboxMemoryLayout {

// The top of the user stack is calculated as the size of the guest memory + the guest offset which gives us the
// address at the bottom of the guest memory.
// we then subtract the size of the stack, the size of the kernel stack,
// the size of the boot stack, the size of the user stack guard page and the size of the kernel stack guard page
// which are all 4K

let bottom = guest_offset + size;
let min_user_stack_address = bottom
- self.stack_size
- self.kernel_stack_size_rounded
- PAGE_SIZE_USIZE
- PAGE_SIZE_USIZE
- PAGE_SIZE_USIZE;
let min_user_stack_address = bottom - self.stack_size;

// Top of user stack

Expand All @@ -1212,26 +1042,6 @@ impl SandboxMemoryLayout {

shared_mem.write_u64(self.get_user_stack_pointer_offset(), start_of_user_stack)?;

// Start of kernel stack

// There is a guard page between the user stack and the kernel stack and then we need to add the size of the kernel stack

let start_of_kernel_stack: u64 =
start_of_user_stack + (PAGE_SIZE_USIZE + self.kernel_stack_size_rounded) as u64;

shared_mem.write_u64(
self.get_kernel_stack_pointer_offset(),
start_of_kernel_stack,
)?;

// Start of boot stack

// There is a guard page between the kernel stack and the boot stack and then we need to add the size of the boot stack

let start_of_boot_stack: u64 = start_of_kernel_stack + (PAGE_SIZE_USIZE * 2) as u64;

shared_mem.write_u64(self.get_boot_stack_pointer_offset(), start_of_boot_stack)?;

// End of setting up the PEB

// Initialize the stack pointers of input data and output data
Expand Down Expand Up @@ -1306,14 +1116,6 @@ mod tests {

expected_size += round_up_to(layout.stack_size, PAGE_SIZE_USIZE);

expected_size += PAGE_SIZE_USIZE; // user stack guard page

expected_size += round_up_to(layout.kernel_stack_size_rounded, PAGE_SIZE_USIZE);

expected_size += PAGE_SIZE_USIZE; // kernel stack guard page

expected_size += PAGE_SIZE_USIZE; // boot stack

expected_size
}

Expand Down
4 changes: 0 additions & 4 deletions src/hyperlight_host/src/mem/memory_region.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,10 +149,6 @@ pub enum MemoryRegionType {
GuardPage,
/// The region contains the Stack
Stack,
/// The region contains the Kernel Stack
KernelStack,
/// The region contains the Boot Stack
BootStack,
}

/// represents a single memory region inside the guest. All memory within a region has
Expand Down
4 changes: 1 addition & 3 deletions src/hyperlight_host/src/mem/mgr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ const PAGE_USER: u64 = 1 << 2; // User/Supervisor (if this bit is set then the p
const PAGE_NX: u64 = 1 << 63; // Execute Disable (if this bit is set then data in the page cannot be executed)

// The amount of memory that can be mapped per page table
pub(super) const AMOUNT_OF_MEMORY_PER_PT: usize = 0x200000;
pub(super) const AMOUNT_OF_MEMORY_PER_PT: usize = 0x200_000;
/// Read/write permissions flag for the 64-bit PDE
/// The page size for the 64-bit PDE
/// The size of stack guard cookies
Expand Down Expand Up @@ -213,8 +213,6 @@ where
// Host Exception Data are readonly in the guest
MemoryRegionType::HostExceptionData => PAGE_PRESENT | PAGE_NX,
MemoryRegionType::PageTables => PAGE_PRESENT | PAGE_RW | PAGE_NX,
MemoryRegionType::KernelStack => PAGE_PRESENT | PAGE_RW | PAGE_NX,
MemoryRegionType::BootStack => PAGE_PRESENT | PAGE_RW | PAGE_NX,
},
// If there is an error then the address isn't mapped so mark it as not present
Err(_) => 0,
Expand Down