Skip to content

Commit 10881eb

Browse files
authored
Merge pull request #179 from immunant/kkysen/switch-to-standard-SAFETY-comments
Change `// Safe because` comments to proper `// SAFETY:` comments `clippy` recognizes
2 parents 4915e9b + dddfe2e commit 10881eb

File tree

9 files changed

+66
-69
lines changed

9 files changed

+66
-69
lines changed

src/device/console.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ impl<H: Hal, T: Transport> VirtIOConsole<H, T> {
142142
/// receive request or some data already received and not yet returned.
143143
fn poll_retrieve(&mut self) -> Result<()> {
144144
if self.receive_token.is_none() && self.cursor == self.pending_len {
145-
// Safe because the buffer lasts at least as long as the queue, and there are no other
145+
// SAFETY: The buffer lasts at least as long as the queue, and there are no other
146146
// outstanding requests using the buffer.
147147
self.receive_token = Some(unsafe {
148148
self.receiveq
@@ -174,7 +174,7 @@ impl<H: Hal, T: Transport> VirtIOConsole<H, T> {
174174
let mut flag = false;
175175
if let Some(receive_token) = self.receive_token {
176176
if self.receive_token == self.receiveq.peek_used() {
177-
// Safe because we are passing the same buffer as we passed to `VirtQueue::add` in
177+
// SAFETY: We are passing the same buffer as we passed to `VirtQueue::add` in
178178
// `poll_retrieve` and it is still valid.
179179
let len = unsafe {
180180
self.receiveq.pop_used(

src/device/input.rs

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ impl<H: Hal, T: Transport> VirtIOInput<H, T> {
4343
negotiated_features.contains(Feature::RING_EVENT_IDX),
4444
)?;
4545
for (i, event) in event_buf.as_mut().iter_mut().enumerate() {
46-
// Safe because the buffer lasts as long as the queue.
46+
// SAFETY: The buffer lasts as long as the queue.
4747
let token = unsafe { event_queue.add(&[], &mut [event.as_mut_bytes()])? };
4848
assert_eq!(token, i as u16);
4949
}
@@ -70,16 +70,15 @@ impl<H: Hal, T: Transport> VirtIOInput<H, T> {
7070
pub fn pop_pending_event(&mut self) -> Option<InputEvent> {
7171
if let Some(token) = self.event_queue.peek_used() {
7272
let event = &mut self.event_buf[token as usize];
73-
// Safe because we are passing the same buffer as we passed to `VirtQueue::add` and it
74-
// is still valid.
73+
// SAFETY: We are passing the same buffer as we passed to `VirtQueue::add` and it is still valid.
7574
unsafe {
7675
self.event_queue
7776
.pop_used(token, &[], &mut [event.as_mut_bytes()])
7877
.ok()?;
7978
}
8079
let event_saved = *event;
8180
// requeue
82-
// Safe because buffer lasts as long as the queue.
81+
// SAFETY: The buffer lasts as long as the queue.
8382
if let Ok(new_token) = unsafe { self.event_queue.add(&[], &mut [event.as_mut_bytes()]) }
8483
{
8584
// This only works because nothing happen between `pop_used` and `add` that affects

src/device/net/dev.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONet<H, T, QUEUE_SIZE>
3131
let mut rx_buffers = [NONE_BUF; QUEUE_SIZE];
3232
for (i, rx_buf_place) in rx_buffers.iter_mut().enumerate() {
3333
let mut rx_buf = RxBuffer::new(i, buf_len);
34-
// Safe because the buffer lives as long as the queue.
34+
// SAFETY: The buffer lives as long as the queue.
3535
let token = unsafe { inner.receive_begin(rx_buf.as_bytes_mut())? };
3636
assert_eq!(token, i as u16);
3737
*rx_buf_place = Some(rx_buf);
@@ -84,7 +84,7 @@ impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONet<H, T, QUEUE_SIZE>
8484
return Err(Error::WrongToken);
8585
}
8686

87-
// Safe because `token` == `rx_buf.idx`, we are passing the same
87+
// SAFETY: `token` == `rx_buf.idx`, so we are passing the same
8888
// buffer as we passed to `VirtQueue::add` and it is still valid.
8989
let (_hdr_len, pkt_len) =
9090
unsafe { self.inner.receive_complete(token, rx_buf.as_bytes_mut())? };
@@ -99,8 +99,8 @@ impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONet<H, T, QUEUE_SIZE>
9999
///
100100
/// It will add the buffer back to the NIC queue.
101101
pub fn recycle_rx_buffer(&mut self, mut rx_buf: RxBuffer) -> Result {
102-
// Safe because we take the ownership of `rx_buf` back to `rx_buffers`,
103-
// it lives as long as the queue.
102+
// SAFETY: We take the ownership of `rx_buf` back to `rx_buffers`,
103+
// so it lives as long as the queue.
104104
let new_token = unsafe { self.inner.receive_begin(rx_buf.as_bytes_mut()) }?;
105105
// `rx_buffers[new_token]` is expected to be `None` since it was taken
106106
// away at `Self::receive()` and has not been added back.

src/hal.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ impl<H: Hal> Dma<H> {
6262

6363
impl<H: Hal> Drop for Dma<H> {
6464
fn drop(&mut self) {
65-
// Safe because the memory was previously allocated by `dma_alloc` in `Dma::new`, not yet
66-
// deallocated, and we are passing the values from then.
65+
// SAFETY: The memory was previously allocated by `dma_alloc` in `Dma::new`,
66+
// not yet deallocated, and we are passing the values from then.
6767
let err = unsafe { H::dma_dealloc(self.paddr, self.vaddr, self.pages) };
6868
assert_eq!(err, 0, "failed to deallocate DMA");
6969
}

src/queue.rs

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -114,8 +114,8 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
114114
// Link descriptors together.
115115
for i in 0..(size - 1) {
116116
desc_shadow[i as usize].next = i + 1;
117-
// Safe because `desc` is properly aligned, dereferenceable, initialised, and the device
118-
// won't access the descriptors for the duration of this unsafe block.
117+
// SAFETY: `desc` is properly aligned, dereferenceable, initialised,
118+
// and the device won't access the descriptors for the duration of this unsafe block.
119119
unsafe {
120120
(*desc.as_ptr())[i as usize].next = i + 1;
121121
}
@@ -185,7 +185,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
185185
let head = self.add_direct(inputs, outputs);
186186

187187
let avail_slot = self.avail_idx & (SIZE as u16 - 1);
188-
// Safe because self.avail is properly aligned, dereferenceable and initialised.
188+
// SAFETY: `self.avail` is properly aligned, dereferenceable and initialised.
189189
unsafe {
190190
(*self.avail.as_ptr()).ring[avail_slot as usize] = head;
191191
}
@@ -196,7 +196,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
196196

197197
// increase head of avail ring
198198
self.avail_idx = self.avail_idx.wrapping_add(1);
199-
// Safe because self.avail is properly aligned, dereferenceable and initialised.
199+
// SAFETY: `self.avail` is properly aligned, dereferenceable and initialised.
200200
unsafe {
201201
(*self.avail.as_ptr())
202202
.idx
@@ -220,7 +220,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
220220

221221
// Write to desc_shadow then copy.
222222
let desc = &mut self.desc_shadow[usize::from(self.free_head)];
223-
// Safe because our caller promises that the buffers live at least until `pop_used`
223+
// SAFETY: Our caller promises that the buffers live at least until `pop_used`
224224
// returns them.
225225
unsafe {
226226
desc.set_buf::<H>(buffer, direction, DescFlags::NEXT);
@@ -255,7 +255,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
255255
<[Descriptor]>::new_box_zeroed_with_elems(inputs.len() + outputs.len()).unwrap();
256256
for (i, (buffer, direction)) in InputOutputIter::new(inputs, outputs).enumerate() {
257257
let desc = &mut indirect_list[i];
258-
// Safe because our caller promises that the buffers live at least until `pop_used`
258+
// SAFETY: Our caller promises that the buffers live at least until `pop_used`
259259
// returns them.
260260
unsafe {
261261
desc.set_buf::<H>(buffer, direction, DescFlags::NEXT);
@@ -303,7 +303,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
303303
outputs: &'a mut [&'a mut [u8]],
304304
transport: &mut impl Transport,
305305
) -> Result<u32> {
306-
// Safe because we don't return until the same token has been popped, so the buffers remain
306+
// SAFETY: We don't return until the same token has been popped, so the buffers remain
307307
// valid and are not otherwise accessed until then.
308308
let token = unsafe { self.add(inputs, outputs) }?;
309309

@@ -317,8 +317,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
317317
spin_loop();
318318
}
319319

320-
// Safe because these are the same buffers as we passed to `add` above and they are still
321-
// valid.
320+
// SAFETY: These are the same buffers as we passed to `add` above and they are still valid.
322321
unsafe { self.pop_used(token, inputs, outputs) }
323322
}
324323

@@ -328,8 +327,8 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
328327
pub fn set_dev_notify(&mut self, enable: bool) {
329328
let avail_ring_flags = if enable { 0x0000 } else { 0x0001 };
330329
if !self.event_idx {
331-
// Safe because self.avail points to a valid, aligned, initialised, dereferenceable, readable
332-
// instance of AvailRing.
330+
// SAFETY: `self.avail` points to a valid, aligned, initialised, dereferenceable, readable
331+
// instance of `AvailRing`.
333332
unsafe {
334333
(*self.avail.as_ptr())
335334
.flags
@@ -344,13 +343,13 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
344343
/// This will be false if the device has supressed notifications.
345344
pub fn should_notify(&self) -> bool {
346345
if self.event_idx {
347-
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
348-
// instance of UsedRing.
346+
// SAFETY: `self.used` points to a valid, aligned, initialised, dereferenceable, readable
347+
// instance of `UsedRing`.
349348
let avail_event = unsafe { (*self.used.as_ptr()).avail_event.load(Ordering::Acquire) };
350349
self.avail_idx >= avail_event.wrapping_add(1)
351350
} else {
352-
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
353-
// instance of UsedRing.
351+
// SAFETY: `self.used` points to a valid, aligned, initialised, dereferenceable, readable
352+
// instance of `UsedRing`.
354353
unsafe { (*self.used.as_ptr()).flags.load(Ordering::Acquire) & 0x0001 == 0 }
355354
}
356355
}
@@ -359,7 +358,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
359358
/// the device.
360359
fn write_desc(&mut self, index: u16) {
361360
let index = usize::from(index);
362-
// Safe because self.desc is properly aligned, dereferenceable and initialised, and nothing
361+
// SAFETY: `self.desc` is properly aligned, dereferenceable and initialised, and nothing
363362
// else reads or writes the descriptor during this block.
364363
unsafe {
365364
(*self.desc.as_ptr())[index] = self.desc_shadow[index].clone();
@@ -368,8 +367,8 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
368367

369368
/// Returns whether there is a used element that can be popped.
370369
pub fn can_pop(&self) -> bool {
371-
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
372-
// instance of UsedRing.
370+
// SAFETY: `self.used` points to a valid, aligned, initialised, dereferenceable, readable
371+
// instance of `UsedRing`.
373372
self.last_used_idx != unsafe { (*self.used.as_ptr()).idx.load(Ordering::Acquire) }
374373
}
375374

@@ -378,8 +377,8 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
378377
pub fn peek_used(&self) -> Option<u16> {
379378
if self.can_pop() {
380379
let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
381-
// Safe because self.used points to a valid, aligned, initialised, dereferenceable,
382-
// readable instance of UsedRing.
380+
// SAFETY: `self.used` points to a valid, aligned, initialised, dereferenceable,
381+
// readable instance of `UsedRing`.
383382
Some(unsafe { (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16 })
384383
} else {
385384
None
@@ -513,8 +512,8 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
513512
let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
514513
let index;
515514
let len;
516-
// Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
517-
// instance of UsedRing.
515+
// SAFETY: `self.used` points to a valid, aligned, initialised, dereferenceable, readable
516+
// instance of `UsedRing`.
518517
unsafe {
519518
index = (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16;
520519
len = (*self.used.as_ptr()).ring[last_used_slot as usize].len;
@@ -525,7 +524,7 @@ impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
525524
return Err(Error::WrongToken);
526525
}
527526

528-
// Safe because the caller ensures the buffers are valid and match the descriptor.
527+
// SAFETY: The caller ensures the buffers are valid and match the descriptor.
529528
unsafe {
530529
self.recycle_descriptors(index, inputs, outputs);
531530
}
@@ -718,7 +717,7 @@ impl Descriptor {
718717
direction: BufferDirection,
719718
extra_flags: DescFlags,
720719
) {
721-
// Safe because our caller promises that the buffer is valid.
720+
// SAFETY: Our caller promises that the buffer is valid.
722721
unsafe {
723722
self.addr = H::share(buf, direction) as u64;
724723
}

src/queue/owning.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ impl<H: Hal, const SIZE: usize, const BUFFER_SIZE: usize> Drop
146146
{
147147
fn drop(&mut self) {
148148
for buffer in self.buffers {
149-
// Safe because we obtained the buffer pointer from Box::into_raw, and it won't be used
149+
// SAFETY: We obtained the buffer pointer from `Box::into_raw`, and it won't be used
150150
// anywhere else after the queue is destroyed.
151151
unsafe { drop(Box::from_raw(buffer.as_ptr())) };
152152
}

src/transport/mmio.rs

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ impl MmioTransport {
304304

305305
/// Gets the vendor ID.
306306
pub fn vendor_id(&self) -> u32 {
307-
// Safe because self.header points to a valid VirtIO MMIO region.
307+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
308308
unsafe { volread!(self.header, vendor_id) }
309309
}
310310
}
@@ -318,13 +318,13 @@ unsafe impl Sync for MmioTransport {}
318318

319319
impl Transport for MmioTransport {
320320
fn device_type(&self) -> DeviceType {
321-
// Safe because self.header points to a valid VirtIO MMIO region.
321+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
322322
let device_id = unsafe { volread!(self.header, device_id) };
323323
device_id.into()
324324
}
325325

326326
fn read_device_features(&mut self) -> u64 {
327-
// Safe because self.header points to a valid VirtIO MMIO region.
327+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
328328
unsafe {
329329
volwrite!(self.header, device_features_sel, 0); // device features [0, 32)
330330
let mut device_features_bits = volread!(self.header, device_features).into();
@@ -335,7 +335,7 @@ impl Transport for MmioTransport {
335335
}
336336

337337
fn write_driver_features(&mut self, driver_features: u64) {
338-
// Safe because self.header points to a valid VirtIO MMIO region.
338+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
339339
unsafe {
340340
volwrite!(self.header, driver_features_sel, 0); // driver features [0, 32)
341341
volwrite!(self.header, driver_features, driver_features as u32);
@@ -345,27 +345,27 @@ impl Transport for MmioTransport {
345345
}
346346

347347
fn max_queue_size(&mut self, queue: u16) -> u32 {
348-
// Safe because self.header points to a valid VirtIO MMIO region.
348+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
349349
unsafe {
350350
volwrite!(self.header, queue_sel, queue.into());
351351
volread!(self.header, queue_num_max)
352352
}
353353
}
354354

355355
fn notify(&mut self, queue: u16) {
356-
// Safe because self.header points to a valid VirtIO MMIO region.
356+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
357357
unsafe {
358358
volwrite!(self.header, queue_notify, queue.into());
359359
}
360360
}
361361

362362
fn get_status(&self) -> DeviceStatus {
363-
// Safe because self.header points to a valid VirtIO MMIO region.
363+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
364364
unsafe { volread!(self.header, status) }
365365
}
366366

367367
fn set_status(&mut self, status: DeviceStatus) {
368-
// Safe because self.header points to a valid VirtIO MMIO region.
368+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
369369
unsafe {
370370
volwrite!(self.header, status, status);
371371
}
@@ -374,7 +374,7 @@ impl Transport for MmioTransport {
374374
fn set_guest_page_size(&mut self, guest_page_size: u32) {
375375
match self.version {
376376
MmioVersion::Legacy => {
377-
// Safe because self.header points to a valid VirtIO MMIO region.
377+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
378378
unsafe {
379379
volwrite!(self.header, legacy_guest_page_size, guest_page_size);
380380
}
@@ -416,7 +416,7 @@ impl Transport for MmioTransport {
416416
let align = PAGE_SIZE as u32;
417417
let pfn = (descriptors / PAGE_SIZE) as u32;
418418
assert_eq!(pfn as usize * PAGE_SIZE, descriptors);
419-
// Safe because self.header points to a valid VirtIO MMIO region.
419+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
420420
unsafe {
421421
volwrite!(self.header, queue_sel, queue.into());
422422
volwrite!(self.header, queue_num, size);
@@ -425,7 +425,7 @@ impl Transport for MmioTransport {
425425
}
426426
}
427427
MmioVersion::Modern => {
428-
// Safe because self.header points to a valid VirtIO MMIO region.
428+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
429429
unsafe {
430430
volwrite!(self.header, queue_sel, queue.into());
431431
volwrite!(self.header, queue_num, size);
@@ -444,7 +444,7 @@ impl Transport for MmioTransport {
444444
fn queue_unset(&mut self, queue: u16) {
445445
match self.version {
446446
MmioVersion::Legacy => {
447-
// Safe because self.header points to a valid VirtIO MMIO region.
447+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
448448
unsafe {
449449
volwrite!(self.header, queue_sel, queue.into());
450450
volwrite!(self.header, queue_num, 0);
@@ -453,7 +453,7 @@ impl Transport for MmioTransport {
453453
}
454454
}
455455
MmioVersion::Modern => {
456-
// Safe because self.header points to a valid VirtIO MMIO region.
456+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
457457
unsafe {
458458
volwrite!(self.header, queue_sel, queue.into());
459459

@@ -474,7 +474,7 @@ impl Transport for MmioTransport {
474474
}
475475

476476
fn queue_used(&mut self, queue: u16) -> bool {
477-
// Safe because self.header points to a valid VirtIO MMIO region.
477+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
478478
unsafe {
479479
volwrite!(self.header, queue_sel, queue.into());
480480
match self.version {
@@ -485,7 +485,7 @@ impl Transport for MmioTransport {
485485
}
486486

487487
fn ack_interrupt(&mut self) -> bool {
488-
// Safe because self.header points to a valid VirtIO MMIO region.
488+
// SAFETY: `self.header` points to a valid VirtIO MMIO region.
489489
unsafe {
490490
let interrupt = volread!(self.header, interrupt_status);
491491
if interrupt != 0 {

0 commit comments

Comments
 (0)