Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion etc/syscalls_linux_aarch64.md
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@
| 0xdf (223) | fadvise64_64 | (int fd, loff_t offset, loff_t len, int advice) | __arm64_sys_fadvise64_64 | false |
| 0xe0 (224) | swapon | (const char *specialfile, int swap_flags) | __arm64_sys_swapon | false |
| 0xe1 (225) | swapoff | (const char *specialfile) | __arm64_sys_swapoff | false |
| 0xe2 (226) | mprotect | (unsigned long start, size_t len, unsigned long prot) | __arm64_sys_mprotect | false |
| 0xe2 (226) | mprotect | (unsigned long start, size_t len, unsigned long prot) | __arm64_sys_mprotect | true |
| 0xe3 (227) | msync | (unsigned long start, size_t len, int flags) | __arm64_sys_msync | false |
| 0xe4 (228) | mlock | (unsigned long start, size_t len) | __arm64_sys_mlock | false |
| 0xe5 (229) | munlock | (unsigned long start, size_t len) | __arm64_sys_munlock | false |
Expand Down
1 change: 1 addition & 0 deletions libkernel/src/error/syscall_error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ pub fn kern_err_to_syscall(err: KernelError) -> isize {
KernelError::NotATty => ENOTTY,
KernelError::SeekPipe => ESPIPE,
KernelError::NotSupported => ENOSYS,
KernelError::NoMemory => ENOMEM,
_ => todo!(),
}
}
61 changes: 61 additions & 0 deletions libkernel/src/memory/proc_vm/memory_map/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,67 @@ impl<AS: UserAddressSpace> MemoryMap<AS> {
self.unmap_region(range.align_to_page_boundary(), None)
}

pub fn mprotect(
&mut self,
protect_region: VirtMemoryRegion,
new_perms: VMAPermissions,
) -> Result<()> {
if !protect_region.is_page_aligned() {
return Err(KernelError::InvalidValue);
}

if protect_region.size() == 0 {
return Err(KernelError::InvalidValue);
}

let affected_vma_addr = self
.find_vma(protect_region.start_address())
.map(|x| x.region.start_address())
.ok_or(KernelError::NoMemory)?;

let affected_vma = self
.vmas
.remove(&affected_vma_addr)
.expect("Should have the same key as the start address");

// Easy case, the entire VMA is changing.
if affected_vma.region == protect_region {
let old_vma = affected_vma.clone();
let mut new_vma = old_vma.clone();
new_vma.permissions = new_perms;

self.insert_and_merge(new_vma.clone());
self.address_space
.protect_range(protect_region, new_perms.into())?;

return Ok(());
}

// Next case, a sub-region of a VMA is changing, requring a split.
if affected_vma.region.contains(protect_region) {
let (left, right) = affected_vma.region.punch_hole(protect_region);
let mut new_vma = affected_vma.clone().shrink_to(protect_region);
new_vma.permissions = new_perms;

if let Some(left) = left {
self.insert_and_merge(affected_vma.shrink_to(left));
}

self.address_space
.protect_range(protect_region, new_perms.into())?;
self.insert_and_merge(new_vma);

if let Some(right) = right {
self.insert_and_merge(affected_vma.shrink_to(right));
}

return Ok(());
}

// TODO: protecting over contiguous VMAreas.
Err(KernelError::NoMemory)
}

/// Checks if a given virtual memory region is completely free.
fn is_region_free(&self, region: VirtMemoryRegion) -> bool {
// Find the VMA that might overlap with the start of our desired region.
Expand Down
207 changes: 207 additions & 0 deletions libkernel/src/memory/proc_vm/memory_map/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,37 @@ fn assert_vma_exists(pvm: &MemoryMap<MockAddressSpace>, start: usize, size: usiz
assert_eq!(vma.region.size(), size, "VMA size mismatch");
}

fn assert_vma_perms(pvm: &MemoryMap<MockAddressSpace>, start: usize, perms: VMAPermissions) {
let vma = pvm
.find_vma(VA::from_value(start))
.expect("VMA not found for permission check");
assert_eq!(
vma.permissions(),
perms,
"VMA permissions mismatch at {:#x}",
start
);
}

fn assert_ops_log_protect(
pvm: &MemoryMap<MockAddressSpace>,
expected_region: VirtMemoryRegion,
expected_perms: VMAPermissions,
) {
let log = pvm.address_space.ops_log.lock().unwrap();
let found = log.iter().any(|op| match op {
MockPageTableOp::ProtectRange { region, perms } => {
*region == expected_region && *perms == expected_perms.into()
}
_ => false,
});
assert!(
found,
"Did not find ProtectRange op for {:?} with {:?}",
expected_region, expected_perms
);
}

#[test]
fn test_mmap_any_empty() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
Expand Down Expand Up @@ -685,3 +716,179 @@ fn test_munmap_over_multiple_vmas() {
]
);
}

#[test]
fn mprotect_full_vma() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = MMAP_BASE - 4 * PAGE_SIZE;
let size = 4 * PAGE_SIZE;

pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));

// Protect entire region to RO
let region = VirtMemoryRegion::new(VA::from_value(start), size);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();

assert_eq!(pvm.vmas.len(), 1); // Should still be 1 VMA
assert_vma_exists(&pvm, start, size);
assert_vma_perms(&pvm, start, VMAPermissions::ro());
assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}

#[test]
fn test_mprotect_split_middle() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x10000;
let size = 3 * PAGE_SIZE; // [0x10000, 0x11000, 0x12000]

pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));

let protect_start = start + PAGE_SIZE;
let protect_len = PAGE_SIZE;
let region = VirtMemoryRegion::new(VA::from_value(protect_start), protect_len);

pvm.mprotect(region, VMAPermissions::ro()).unwrap();

// Should now be 3 VMAs: RW - RO - RW
assert_eq!(pvm.vmas.len(), 3);

// Left
assert_vma_exists(&pvm, start, PAGE_SIZE);
assert_vma_perms(&pvm, start, VMAPermissions::rw());

// Middle
assert_vma_exists(&pvm, protect_start, PAGE_SIZE);
assert_vma_perms(&pvm, protect_start, VMAPermissions::ro());

// Right
assert_vma_exists(&pvm, start + 2 * PAGE_SIZE, PAGE_SIZE);
assert_vma_perms(&pvm, start + 2 * PAGE_SIZE, VMAPermissions::rw());

assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}

#[test]
fn test_mprotect_split_start() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x20000;
let size = 2 * PAGE_SIZE;

pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));

let region = VirtMemoryRegion::new(VA::from_value(start), PAGE_SIZE);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();

// Should be 2 VMAs: RO - RW
assert_eq!(pvm.vmas.len(), 2);

assert_vma_exists(&pvm, start, PAGE_SIZE);
assert_vma_perms(&pvm, start, VMAPermissions::ro());

assert_vma_exists(&pvm, start + PAGE_SIZE, PAGE_SIZE);
assert_vma_perms(&pvm, start + PAGE_SIZE, VMAPermissions::rw());

assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}

#[test]
fn test_mprotect_split_end() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x30000;
let size = 2 * PAGE_SIZE;

pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));

let region = VirtMemoryRegion::new(VA::from_value(start + PAGE_SIZE), PAGE_SIZE);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();

// Should be 2 VMAs: RW - RO
assert_eq!(pvm.vmas.len(), 2);

assert_vma_exists(&pvm, start, PAGE_SIZE);
assert_vma_perms(&pvm, start, VMAPermissions::rw());

assert_vma_exists(&pvm, start + PAGE_SIZE, PAGE_SIZE);
assert_vma_perms(&pvm, start + PAGE_SIZE, VMAPermissions::ro());

assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}

#[test]
fn test_mprotect_file_backed_split() {
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x40000;
let size = 3 * PAGE_SIZE;
let file_offset = 0x1000;
let inode = Arc::new(DummyTestInode);

// VMA: [0x40000 - 0x43000), File Offset: 0x1000
pvm.insert_and_merge(create_file_vma(
start,
size,
VMAPermissions::rw(),
file_offset,
inode.clone(),
));

// Protect Middle Page [0x41000 - 0x42000)
let region = VirtMemoryRegion::new(VA::from_value(start + PAGE_SIZE), PAGE_SIZE);
pvm.mprotect(region, VMAPermissions::ro()).unwrap();

// Left VMA: 0x40000, Len 0x1000, Offset 0x1000
let left = pvm.find_vma(VA::from_value(start)).unwrap();
if let VMAreaKind::File(f) = &left.kind {
assert_eq!(f.offset, 0x1000);
assert_eq!(f.len, PAGE_SIZE as u64);
} else {
panic!("Left VMA lost file backing");
}

// Middle VMA: 0x41000, Len 0x1000, Offset 0x2000 (0x1000 + 0x1000)
let middle = pvm.find_vma(VA::from_value(start + PAGE_SIZE)).unwrap();
assert_eq!(middle.permissions(), VMAPermissions::ro());
if let VMAreaKind::File(f) = &middle.kind {
assert_eq!(f.offset, 0x2000);
assert_eq!(f.len, PAGE_SIZE as u64);
} else {
panic!("Middle VMA lost file backing");
}

// Right VMA: 0x42000, Len 0x1000, Offset 0x3000 (0x1000 + 0x2000)
let right = pvm.find_vma(VA::from_value(start + 2 * PAGE_SIZE)).unwrap();
if let VMAreaKind::File(f) = &right.kind {
assert_eq!(f.offset, 0x3000);
assert_eq!(f.len, PAGE_SIZE as u64);
} else {
panic!("Right VMA lost file backing");
}

assert_ops_log_protect(&pvm, region, VMAPermissions::ro());
}

#[test]
fn test_mprotect_merge_restoration() {
// Ensures that if we split permissions, then restore them, the VMAs
// merge back together.
let mut pvm: MemoryMap<MockAddressSpace> = MemoryMap::new().unwrap();
let start = 0x50000;
let size = 2 * PAGE_SIZE;

pvm.insert_and_merge(create_anon_vma(start, size, VMAPermissions::rw()));

// Split.
let region1 = VirtMemoryRegion::new(VA::from_value(start), PAGE_SIZE);
pvm.mprotect(region1, VMAPermissions::ro()).unwrap();
assert_eq!(pvm.vmas.len(), 2);

// Restore back to RW
pvm.mprotect(region1, VMAPermissions::rw()).unwrap();

// 3. Should merge back to 1 VMA
assert_eq!(
pvm.vmas.len(),
1,
"VMAs failed to merge back after permissions restored"
);
assert_vma_exists(&pvm, start, size);
assert_vma_perms(&pvm, start, VMAPermissions::rw());
}
Loading