Skip to content

Commit fb431a4

Browse files
authored
implement OwnedPhysicalMemory which deallocates physical memory on drop (#164)
* add OwnedPhysicalMemory which deallocates phys mem on drop The OwnedPhysicalMemory is the logical pendant to the OwnedSegment, with the difference that we assume that there is only one global physical memory, so we don't need to worry about lifetimes and back references to the allocator. Upon allocation, such an owned object is handed out, and the caller must keep it, leak it or drop it. If the caller decides to leak it (because for example a third party library needs to take ownership over the memory - see HalImpl in this commit), he is responsible for deallocation. This design decision may or may not be beneficial for future decisions, in part because it may be too simple or even incorrect. * fix unnecessary dereference * fix some compiler warnings that show up in my IDE * fix some clippy warnings in tests * run clippy with `--tests` in CI * remove clone impl from OwnedPhysicalMemory
1 parent 0f75e78 commit fb431a4

File tree

11 files changed

+113
-58
lines changed

11 files changed

+113
-58
lines changed

.github/workflows/build.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ jobs:
3636
cargo fmt -- --check
3737
- name: Clippy
3838
run: |
39-
cargo clippy -- -D clippy::all
39+
cargo clippy --tests -- -D clippy::all
4040
4141
test:
4242
name: "Test"

kernel/crates/kernel_vfs/src/path/absolute.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,7 @@ mod tests {
200200
#[test]
201201
fn test_deref_to_path() {
202202
let abs_path: &AbsolutePath = "/foo/bar".try_into().unwrap();
203-
let path: &Path = &**abs_path;
203+
let path: &Path = abs_path;
204204
assert_eq!(&**path, "/foo/bar");
205205
}
206206

kernel/crates/kernel_vfs/src/path/absolute_owned.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ mod tests {
164164
#[test]
165165
fn test_deref() {
166166
let abs_path = AbsoluteOwnedPath::new();
167-
let owned: &OwnedPath = &*abs_path;
167+
let owned: &OwnedPath = &abs_path;
168168
assert_eq!(owned.as_str(), "/");
169169
}
170170

kernel/crates/kernel_vfs/src/path/filenames.rs

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -274,19 +274,22 @@ mod tests {
274274
let path = Path::new("/foo/bar/baz/qux");
275275
let mut iter = path.filenames();
276276

277-
assert_eq!(iter.nth(0), Some("foo"));
278-
assert_eq!(iter.nth(0), Some("bar"));
279-
assert_eq!(iter.nth(0), Some("baz"));
280-
assert_eq!(iter.nth(0), Some("qux"));
281-
assert_eq!(iter.nth(0), None);
277+
assert_eq!(iter.next(), Some("foo"));
278+
assert_eq!(iter.next(), Some("bar"));
279+
assert_eq!(iter.next(), Some("baz"));
280+
assert_eq!(iter.next(), Some("qux"));
281+
assert_eq!(iter.next(), None);
282282
}
283283

284284
#[test]
285285
fn test_filenames_last() {
286-
assert_eq!(Path::new("/foo/bar/baz").filenames().last(), Some("baz"));
287-
assert_eq!(Path::new("/foo").filenames().last(), Some("foo"));
288-
assert_eq!(Path::new("").filenames().last(), None);
289-
assert_eq!(Path::new("/").filenames().last(), None);
286+
assert_eq!(
287+
Path::new("/foo/bar/baz").filenames().next_back(),
288+
Some("baz")
289+
);
290+
assert_eq!(Path::new("/foo").filenames().next_back(), Some("foo"));
291+
assert_eq!(Path::new("").filenames().next_back(), None);
292+
assert_eq!(Path::new("/").filenames().next_back(), None);
290293
}
291294

292295
#[test]

kernel/crates/kernel_vfs/src/path/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -399,6 +399,7 @@ mod tests {
399399
}
400400

401401
#[test]
402+
#[allow(clippy::useless_asref)]
402403
fn test_path_as_ref() {
403404
let path = Path::new("/foo/bar");
404405
let path_ref: &Path = path.as_ref();

kernel/crates/kernel_vfs/src/path/owned.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@ mod tests {
242242
#[test]
243243
fn test_deref() {
244244
let owned = OwnedPath::new("/foo/bar");
245-
let path: &Path = &*owned;
245+
let path: &Path = &owned;
246246
assert_eq!(&**path, "/foo/bar");
247247

248248
// Should have access to Path methods

kernel/src/driver/virtio/hal.rs

Lines changed: 23 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ use x86_64::{PhysAddr, VirtAddr};
1313

1414
use crate::driver::pci::VirtIoCam;
1515
use crate::mem::address_space::AddressSpace;
16-
use crate::mem::phys::PhysicalMemory;
16+
use crate::mem::phys::{OwnedPhysicalMemory, PhysicalMemory};
1717
use crate::mem::virt::{VirtualMemoryAllocator, VirtualMemoryHigherHalf};
1818
use crate::{U64Ext, UsizeExt};
1919

@@ -34,37 +34,50 @@ pub struct HalImpl;
3434

3535
unsafe impl Hal for HalImpl {
3636
fn dma_alloc(pages: usize, _: BufferDirection) -> (u64, NonNull<u8>) {
37-
let frames = PhysicalMemory::allocate_frames(pages).unwrap();
37+
let frames = PhysicalMemory::allocate_frames::<Size4KiB>(pages).unwrap();
38+
let start_address = frames.start.start_address().as_u64();
3839
let segment = VirtualMemoryHigherHalf.reserve(pages).unwrap();
3940
AddressSpace::kernel()
4041
.map_range::<Size4KiB>(
4142
&*segment,
42-
frames,
43-
PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
43+
frames.leak(), // this has to be deallocated in dma_dealloc
44+
PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE,
4445
)
4546
.unwrap();
4647
let segment = segment.leak();
4748
let addr = NonNull::new(segment.start.as_mut_ptr::<u8>()).unwrap();
48-
(frames.start.start_address().as_u64(), addr)
49+
(start_address, addr)
4950
}
5051

5152
unsafe fn dma_dealloc(paddr: u64, vaddr: NonNull<u8>, pages: usize) -> i32 {
52-
let frames = PhysFrameRangeInclusive::<Size4KiB> {
53-
start: PhysFrame::containing_address(PhysAddr::new(paddr)),
54-
end: PhysFrame::containing_address(PhysAddr::new(
53+
// strictly speaking this is not necessary, but since we allocate it through
54+
// an [`OwnedPhysicalMemory`] object, we should also deallocate it. That way,
55+
// for allocation and deallocation, everything goes through
56+
// [`OwnedPhysicalMemory`] which maybe makes debugging easier.
57+
//
58+
// Alternatively, we could only create the [`PhysFrameRangeInclusive`] and
59+
// then deallocate that directly.
60+
let frames = {
61+
let start = PhysFrame::containing_address(PhysAddr::new(paddr));
62+
let end = PhysFrame::containing_address(PhysAddr::new(
5563
paddr + (pages * Size4KiB::SIZE.into_usize()).into_u64() - 1,
56-
)),
64+
));
65+
let range = PhysFrameRangeInclusive::<Size4KiB> { start, end };
66+
OwnedPhysicalMemory::from_physical_frame_range(range)
5767
};
68+
5869
let segment = Segment::new(
5970
VirtAddr::from_ptr(vaddr.as_ptr()),
6071
pages.into_u64() * Size4KiB::SIZE,
6172
);
6273
unsafe {
6374
AddressSpace::kernel().unmap_range::<Size4KiB>(&segment, |_| {});
6475
assert!(VirtualMemoryHigherHalf.release(segment));
65-
PhysicalMemory::deallocate_frames(frames);
6676
}
6777

78+
// we can not drop this any earlier, especially not until the respective virtual memory is unmapped
79+
drop(frames);
80+
6881
0
6982
}
7083

kernel/src/main.rs

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@ extern crate alloc;
55
use alloc::boxed::Box;
66
use alloc::sync::Arc;
77
use core::error::Error;
8-
use core::panic::PanicInfo;
98

109
use ext2::Ext2Fs;
1110
use kernel::driver::KernelDeviceId;
@@ -17,9 +16,8 @@ use kernel::mcore;
1716
use kernel::mcore::mtask::process::Process;
1817
use kernel_device::block::{BlockBuf, BlockDevice};
1918
use kernel_vfs::path::{AbsolutePath, ROOT};
20-
use log::{error, info};
19+
use log::info;
2120
use spin::RwLock;
22-
use x86_64::instructions::hlt;
2321

2422
#[unsafe(export_name = "kernel_main")]
2523
unsafe extern "C" fn main() -> ! {
@@ -87,15 +85,17 @@ impl<const N: usize> filesystem::BlockDevice for ArcLockedBlockDevice<N> {
8785

8886
#[panic_handler]
8987
#[cfg(not(test))]
90-
fn rust_panic(info: &PanicInfo) -> ! {
88+
fn rust_panic(info: &core::panic::PanicInfo) -> ! {
9189
handle_panic(info);
9290
loop {
93-
hlt();
91+
x86_64::instructions::hlt();
9492
}
9593
}
9694

9795
#[cfg(not(test))]
98-
fn handle_panic(info: &PanicInfo) {
96+
fn handle_panic(info: &core::panic::PanicInfo) {
97+
use log::error;
98+
9999
let location = info.location().unwrap();
100100
error!(
101101
"kernel panicked at {}:{}:{}:",

kernel/src/mcore/mtask/process/mem.rs

Lines changed: 5 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,9 @@ use core::slice;
44
use kernel_vfs::node::VfsNode;
55
use spin::mutex::Mutex;
66
use x86_64::VirtAddr;
7-
use x86_64::structures::paging::PhysFrame;
8-
use x86_64::structures::paging::frame::PhysFrameRangeInclusive;
97

108
use crate::UsizeExt;
9+
use crate::mem::phys::OwnedPhysicalMemory;
1110
use crate::mem::virt::OwnedSegment;
1211

1312
pub struct MemoryRegions {
@@ -113,50 +112,32 @@ pub struct LazyMemoryRegion {
113112
size: usize,
114113
/// The physical frames that were mapped for this lazy
115114
/// memory region.
116-
_physical_frames: Mutex<Vec<PhysFrame>>,
117-
}
118-
119-
impl Drop for LazyMemoryRegion {
120-
fn drop(&mut self) {
121-
todo!("deallocate physical memory")
122-
}
115+
_physical_frames: Mutex<Vec<OwnedPhysicalMemory>>,
123116
}
124117

125118
#[derive(Debug)]
126119
pub struct MappedMemoryRegion {
127120
segment: OwnedSegment<'static>,
128121
size: usize,
129-
_physical_frames: PhysFrameRangeInclusive,
122+
_physical_memory: OwnedPhysicalMemory,
130123
}
131124

132125
impl MappedMemoryRegion {
133126
pub fn new(
134127
segment: OwnedSegment<'static>,
135128
size: usize,
136-
physical_frames: PhysFrameRangeInclusive,
129+
physical_memory: OwnedPhysicalMemory,
137130
) -> Self {
138131
Self {
139132
segment,
140133
size,
141-
_physical_frames: physical_frames,
134+
_physical_memory: physical_memory,
142135
}
143136
}
144137
}
145138

146-
impl Drop for MappedMemoryRegion {
147-
fn drop(&mut self) {
148-
todo!("deallocate physical memory")
149-
}
150-
}
151-
152139
#[derive(Debug)]
153140
pub struct FileBackedMemoryRegion {
154141
region: LazyMemoryRegion,
155142
_node: VfsNode,
156143
}
157-
158-
impl Drop for FileBackedMemoryRegion {
159-
fn drop(&mut self) {
160-
todo!("deallocate physical memory")
161-
}
162-
}

kernel/src/mem/phys.rs

Lines changed: 60 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
use alloc::vec::Vec;
22
use core::iter::from_fn;
3-
use core::mem::swap;
3+
use core::mem::{ManuallyDrop, swap};
4+
use core::ops::Deref;
45

56
use conquer_once::spin::OnceCell;
67
use kernel_physical_memory::{PhysicalFrameAllocator, PhysicalMemoryManager};
@@ -21,6 +22,59 @@ fn allocator() -> &'static Mutex<MultiStageAllocator> {
2122
.expect("physical allocator not initialized")
2223
}
2324

25+
#[derive(Debug, Eq, PartialEq)]
26+
pub struct OwnedPhysicalMemory {
27+
range: PhysFrameRangeInclusive<Size4KiB>,
28+
}
29+
30+
impl !Clone for OwnedPhysicalMemory {}
31+
32+
impl OwnedPhysicalMemory {
33+
pub fn from_physical_frame<S: PageSize>(frame: PhysFrame<S>) -> Self {
34+
let start_address = frame.start_address();
35+
let start = unsafe {
36+
// Safety: safe because the start address is guaranteed to be valid
37+
PhysFrame::<Size4KiB>::from_start_address_unchecked(start_address)
38+
};
39+
let end = PhysFrame::<Size4KiB>::containing_address(start_address + S::SIZE - 1);
40+
Self {
41+
range: PhysFrameRangeInclusive { start, end },
42+
}
43+
}
44+
45+
pub fn from_physical_frame_range<S: PageSize>(range: PhysFrameRangeInclusive<S>) -> Self {
46+
let start_address = range.start.start_address();
47+
let start = PhysFrame::<Size4KiB>::containing_address(start_address);
48+
let end_address = range.end.start_address() + range.end.size() - 1;
49+
let end = PhysFrame::<Size4KiB>::containing_address(end_address);
50+
Self {
51+
range: PhysFrameRangeInclusive { start, end },
52+
}
53+
}
54+
55+
pub fn is_single_frame(&self) -> bool {
56+
self.range.start == self.range.end
57+
}
58+
59+
pub fn leak(self) -> PhysFrameRangeInclusive<Size4KiB> {
60+
ManuallyDrop::new(self).range
61+
}
62+
}
63+
64+
impl Deref for OwnedPhysicalMemory {
65+
type Target = PhysFrameRangeInclusive<Size4KiB>;
66+
67+
fn deref(&self) -> &Self::Target {
68+
&self.range
69+
}
70+
}
71+
72+
impl Drop for OwnedPhysicalMemory {
73+
fn drop(&mut self) {
74+
PhysicalMemory::deallocate_frames(self.range);
75+
}
76+
}
77+
2478
/// Zero-sized facade to the global physical memory allocator.
2579
///
2680
/// This provides a stateless interface to a two-stage allocator:
@@ -87,11 +141,14 @@ impl PhysicalMemory {
87141
/// Panics if stage 1 allocator is still active (stage 1 doesn't support contiguous
88142
/// allocation). Stage 2 is initialized after the heap becomes available.
89143
#[must_use]
90-
pub fn allocate_frames<S: PageSize>(n: usize) -> Option<PhysFrameRangeInclusive<S>>
144+
pub fn allocate_frames<S: PageSize>(n: usize) -> Option<OwnedPhysicalMemory>
91145
where
92146
PhysicalMemoryManager: PhysicalFrameAllocator<S>,
93147
{
94-
allocator().lock().allocate_frames(n)
148+
allocator()
149+
.lock()
150+
.allocate_frames(n)
151+
.map(OwnedPhysicalMemory::from_physical_frame_range)
95152
}
96153

97154
/// Returns the frame to the free pool for future allocations.

0 commit comments

Comments
 (0)