|
157 | 157 | use crate::alloc::{handle_alloc_error, Global};
|
158 | 158 | use core::alloc::Allocator;
|
159 | 159 | use core::alloc::Layout;
|
160 |
| -use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce}; |
| 160 | +use core::iter::UncheckedIndexedIterator; |
| 161 | +use core::iter::{InPlaceIterable, SourceIter}; |
161 | 162 | use core::marker::PhantomData;
|
162 | 163 | use core::mem::{self, ManuallyDrop, SizedTypeProperties};
|
163 | 164 | use core::num::NonZeroUsize;
|
@@ -257,8 +258,9 @@ where
|
257 | 258 | // caveat: if they weren't we might not even make it to this point
|
258 | 259 | debug_assert_eq!(src_buf, src.buf.as_ptr());
|
259 | 260 | // check InPlaceIterable contract. This is only possible if the iterator advanced the
|
260 |
| - // source pointer at all. If it uses unchecked access via TrustedRandomAccess |
261 |
| - // then the source pointer will stay in its initial position and we can't use it as reference |
| 261 | + // source pointer at all. If it uses unchecked access via UncheckedIndexedIterator |
| 262 | + // and doesn't perform cleanup then the source pointer will stay in its initial position |
| 263 | + // and we can't use it as reference. |
262 | 264 | if src.ptr != src_ptr {
|
263 | 265 | debug_assert!(
|
264 | 266 | unsafe { dst_buf.add(len) as *const _ } <= src.ptr.as_ptr(),
|
@@ -369,28 +371,94 @@ where
|
369 | 371 | }
|
370 | 372 | }
|
371 | 373 |
|
| 374 | +// impl<T, I> SpecInPlaceCollect<T, I> for I |
| 375 | +// where |
| 376 | +// I: Iterator<Item = T> + TrustedRandomAccessNoCoerce, |
| 377 | +// { |
| 378 | +// #[inline] |
| 379 | +// unsafe fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize { |
| 380 | +// let len = self.size(); |
| 381 | +// let mut drop_guard = InPlaceDrop { inner: dst_buf, dst: dst_buf }; |
| 382 | +// for i in 0..len { |
| 383 | +// // Safety: InplaceIterable contract guarantees that for every element we read |
| 384 | +// // one slot in the underlying storage will have been freed up and we can immediately |
| 385 | +// // write back the result. |
| 386 | +// unsafe { |
| 387 | +// let dst = dst_buf.add(i); |
| 388 | +// debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation"); |
| 389 | +// ptr::write(dst, self.__iterator_get_unchecked(i)); |
| 390 | +// // Since this executes user code which can panic we have to bump the pointer |
| 391 | +// // after each step. |
| 392 | +// drop_guard.dst = dst.add(1); |
| 393 | +// } |
| 394 | +// } |
| 395 | +// mem::forget(drop_guard); |
| 396 | +// len |
| 397 | +// } |
| 398 | +// } |
| 399 | + |
372 | 400 | impl<T, I> SpecInPlaceCollect<T, I> for I
|
373 | 401 | where
|
374 |
| - I: Iterator<Item = T> + TrustedRandomAccessNoCoerce, |
| 402 | + I: Iterator<Item = T> + UncheckedIndexedIterator, |
375 | 403 | {
|
376 | 404 | #[inline]
|
377 | 405 | unsafe fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
|
378 |
| - let len = self.size(); |
379 |
| - let mut drop_guard = InPlaceDrop { inner: dst_buf, dst: dst_buf }; |
380 |
| - for i in 0..len { |
381 |
| - // Safety: InplaceIterable contract guarantees that for every element we read |
382 |
| - // one slot in the underlying storage will have been freed up and we can immediately |
383 |
| - // write back the result. |
| 406 | + let len = self.size_hint().0; |
| 407 | + |
| 408 | + if len == 0 { |
| 409 | + return 0; |
| 410 | + } |
| 411 | + |
| 412 | + struct LoopGuard<'a, I> |
| 413 | + where |
| 414 | + I: Iterator + UncheckedIndexedIterator, |
| 415 | + { |
| 416 | + it: &'a mut I, |
| 417 | + len: usize, |
| 418 | + idx: usize, |
| 419 | + dst_buf: *mut I::Item, |
| 420 | + } |
| 421 | + |
| 422 | + impl<I> Drop for LoopGuard<'_, I> |
| 423 | + where |
| 424 | + I: Iterator + UncheckedIndexedIterator, |
| 425 | + { |
| 426 | + #[inline] |
| 427 | + fn drop(&mut self) { |
| 428 | + unsafe { |
| 429 | + let new_len = self.len - self.idx; |
| 430 | + self.it.set_front_index_from_end_unchecked(new_len, self.len); |
| 431 | + if self.idx != self.len { |
| 432 | + let raw_slice = |
| 433 | + ptr::slice_from_raw_parts_mut::<I::Item>(self.dst_buf, self.idx); |
| 434 | + ptr::drop_in_place(raw_slice); |
| 435 | + } |
| 436 | + } |
| 437 | + } |
| 438 | + } |
| 439 | + |
| 440 | + let mut state = LoopGuard { it: self, len, idx: 0, dst_buf }; |
| 441 | + |
| 442 | + loop { |
384 | 443 | unsafe {
|
385 |
| - let dst = dst_buf.add(i); |
| 444 | + let idx = state.idx; |
| 445 | + state.idx = idx.unchecked_add(1); |
| 446 | + let dst = state.dst_buf.add(idx); |
386 | 447 | debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
|
387 |
| - ptr::write(dst, self.__iterator_get_unchecked(i)); |
388 |
| - // Since this executes user code which can panic we have to bump the pointer |
389 |
| - // after each step. |
390 |
| - drop_guard.dst = dst.add(1); |
| 448 | + dst.write(state.it.index_from_end_unchecked(state.len - idx)); |
| 449 | + } |
| 450 | + if state.idx == len { |
| 451 | + break; |
391 | 452 | }
|
392 | 453 | }
|
393 |
| - mem::forget(drop_guard); |
| 454 | + |
| 455 | + // disarm guard, we don't want the front elements to get dropped |
| 456 | + mem::forget(state); |
| 457 | + // since the guard is disarmed, update the iterator state |
| 458 | + if Self::CLEANUP_ON_DROP { |
| 459 | + unsafe { self.set_front_index_from_end_unchecked(0, len) }; |
| 460 | + } |
| 461 | + |
394 | 462 | len
|
395 | 463 | }
|
396 | 464 | }
|
|
0 commit comments