Skip to content

deny(unsafe_op_in_unsafe_fn) in rustc_data_structures #110554

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions compiler/rustc_data_structures/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
#![deny(unsafe_op_in_unsafe_fn)]

#[macro_use]
extern crate tracing;
Expand Down
3 changes: 2 additions & 1 deletion compiler/rustc_data_structures/src/memmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ pub struct Mmap(Vec<u8>);
impl Mmap {
#[inline]
pub unsafe fn map(file: File) -> io::Result<Self> {
memmap2::Mmap::map(&file).map(Mmap)
// Safety: this is in fact not safe.
unsafe { memmap2::Mmap::map(&file).map(Mmap) }
}
}

Expand Down
196 changes: 101 additions & 95 deletions compiler/rustc_data_structures/src/sip128.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,28 +96,30 @@ macro_rules! compress {
unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
debug_assert!(count <= 8);

if count == 8 {
ptr::copy_nonoverlapping(src, dst, 8);
return;
}
unsafe {
if count == 8 {
ptr::copy_nonoverlapping(src, dst, 8);
return;
}

let mut i = 0;
if i + 3 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
i += 4;
}
let mut i = 0;
if i + 3 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
i += 4;
}

if i + 1 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
i += 2
}
if i + 1 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
i += 2
}

if i < count {
*dst.add(i) = *src.add(i);
i += 1;
}
if i < count {
*dst.add(i) = *src.add(i);
i += 1;
}

debug_assert_eq!(i, count);
debug_assert_eq!(i, count);
}
}

// # Implementation
Expand Down Expand Up @@ -232,38 +234,40 @@ impl SipHasher128 {
// overflow) if it wasn't already.
#[inline(never)]
unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) {
let nbuf = self.nbuf;
debug_assert!(LEN <= 8);
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + LEN >= BUFFER_SIZE);
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
unsafe {
let nbuf = self.nbuf;
debug_assert!(LEN <= 8);
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + LEN >= BUFFER_SIZE);
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);

// Copy first part of input into end of buffer, possibly into spill
// element. The memcpy call is optimized away because the size is known.
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);

// Process buffer.
for i in 0..BUFFER_CAPACITY {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
}

// Copy first part of input into end of buffer, possibly into spill
// element. The memcpy call is optimized away because the size is known.
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);

// Process buffer.
for i in 0..BUFFER_CAPACITY {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
// Copy remaining input into start of buffer by copying LEN - 1
// elements from spill (at most LEN - 1 bytes could have overflowed
// into the spill). The memcpy call is optimized away because the size
// is known. And the whole copy is optimized away for LEN == 1.
let dst = self.buf.as_mut_ptr() as *mut u8;
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
ptr::copy_nonoverlapping(src, dst, LEN - 1);

// This function should only be called when the write fills the buffer.
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
// LEN is statically known, so the branch is optimized away.
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
self.processed += BUFFER_SIZE;
}

// Copy remaining input into start of buffer by copying LEN - 1
// elements from spill (at most LEN - 1 bytes could have overflowed
// into the spill). The memcpy call is optimized away because the size
// is known. And the whole copy is optimized away for LEN == 1.
let dst = self.buf.as_mut_ptr() as *mut u8;
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
ptr::copy_nonoverlapping(src, dst, LEN - 1);

// This function should only be called when the write fills the buffer.
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
// LEN is statically known, so the branch is optimized away.
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
self.processed += BUFFER_SIZE;
}

// A write function for byte slices.
Expand Down Expand Up @@ -301,57 +305,59 @@ impl SipHasher128 {
// containing the byte offset `self.nbuf`.
#[inline(never)]
unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
let length = msg.len();
let nbuf = self.nbuf;
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + length >= BUFFER_SIZE);

// Always copy first part of input into current element of buffer.
// This function should only be called when the write fills the buffer,
// so we know that there is enough input to fill the current element.
let valid_in_elem = nbuf % ELEM_SIZE;
let needed_in_elem = ELEM_SIZE - valid_in_elem;

let src = msg.as_ptr();
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
copy_nonoverlapping_small(src, dst, needed_in_elem);

// Process buffer.
unsafe {
let length = msg.len();
let nbuf = self.nbuf;
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + length >= BUFFER_SIZE);

// Always copy first part of input into current element of buffer.
// This function should only be called when the write fills the buffer,
// so we know that there is enough input to fill the current element.
let valid_in_elem = nbuf % ELEM_SIZE;
let needed_in_elem = ELEM_SIZE - valid_in_elem;

let src = msg.as_ptr();
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
copy_nonoverlapping_small(src, dst, needed_in_elem);

// Process buffer.

// Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
// We know that is true, because last step ensured we have a full
// element in the buffer.
let last = nbuf / ELEM_SIZE + 1;

for i in 0..last {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
}

// Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
// We know that is true, because last step ensured we have a full
// element in the buffer.
let last = nbuf / ELEM_SIZE + 1;
// Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem;
let input_left = length - processed;
let elems_left = input_left / ELEM_SIZE;
let extra_bytes_left = input_left % ELEM_SIZE;

for _ in 0..elems_left {
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
processed += ELEM_SIZE;
}

for i in 0..last {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
}
// Copy remaining input into start of buffer.
let src = msg.as_ptr().add(processed);
let dst = self.buf.as_mut_ptr() as *mut u8;
copy_nonoverlapping_small(src, dst, extra_bytes_left);

// Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem;
let input_left = length - processed;
let elems_left = input_left / ELEM_SIZE;
let extra_bytes_left = input_left % ELEM_SIZE;

for _ in 0..elems_left {
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
processed += ELEM_SIZE;
self.nbuf = extra_bytes_left;
self.processed += nbuf + processed;
}

// Copy remaining input into start of buffer.
let src = msg.as_ptr().add(processed);
let dst = self.buf.as_mut_ptr() as *mut u8;
copy_nonoverlapping_small(src, dst, extra_bytes_left);

self.nbuf = extra_bytes_left;
self.processed += nbuf + processed;
}

#[inline]
Expand Down
10 changes: 5 additions & 5 deletions compiler/rustc_data_structures/src/tagged_ptr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Box<T> {
#[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Box::into_raw`
Box::from_raw(ptr.as_ptr())
unsafe { Box::from_raw(ptr.as_ptr()) }
}
}

Expand All @@ -169,7 +169,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Rc<T> {
#[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Rc::into_raw`
Rc::from_raw(ptr.as_ptr())
unsafe { Rc::from_raw(ptr.as_ptr()) }
}
}

Expand All @@ -185,7 +185,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Arc<T> {
#[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Arc::into_raw`
Arc::from_raw(ptr.as_ptr())
unsafe { Arc::from_raw(ptr.as_ptr()) }
}
}

Expand All @@ -201,7 +201,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a T {
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety:
// `ptr` comes from `into_ptr` which gets the pointer from a reference
ptr.as_ref()
unsafe { ptr.as_ref() }
}
}

Expand All @@ -217,7 +217,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a mut T {
unsafe fn from_ptr(mut ptr: NonNull<T>) -> Self {
// Safety:
// `ptr` comes from `into_ptr` which gets the pointer from a reference
ptr.as_mut()
unsafe { ptr.as_mut() }
}
}

Expand Down