|
| 1 | +use embedded_storage::iter::IterableByOverlaps; |
1 | 2 | pub use embedded_storage::nor_flash::{ErrorType, NorFlashError, NorFlashErrorKind};
|
| 3 | +use embedded_storage::Region; |
| 4 | + |
| 5 | +use crate::{ReadStorage, Storage}; |
2 | 6 |
|
3 | 7 | /// Read only NOR flash trait.
|
4 | 8 | pub trait ReadNorFlash: ErrorType {
|
@@ -85,3 +89,199 @@ impl<T: NorFlash> NorFlash for &mut T {
|
85 | 89 | /// - Bits that were 0 on flash are guaranteed to stay as 0
|
86 | 90 | /// - Rest of the bits in the page are guaranteed to be unchanged
|
87 | 91 | pub trait MultiwriteNorFlash: NorFlash {}
|
| 92 | + |
| 93 | +struct Page { |
| 94 | + pub start: u32, |
| 95 | + pub size: usize, |
| 96 | +} |
| 97 | + |
| 98 | +impl Page { |
| 99 | + fn new(index: u32, size: usize) -> Self { |
| 100 | + Self { |
| 101 | + start: index * size as u32, |
| 102 | + size, |
| 103 | + } |
| 104 | + } |
| 105 | + |
| 106 | + /// The end address of the page |
| 107 | + const fn end(&self) -> u32 { |
| 108 | + self.start + self.size as u32 |
| 109 | + } |
| 110 | +} |
| 111 | + |
| 112 | +impl Region for Page { |
| 113 | + /// Checks if an address offset is contained within the page |
| 114 | + fn contains(&self, address: u32) -> bool { |
| 115 | + (self.start <= address) && (self.end() > address) |
| 116 | + } |
| 117 | +} |
| 118 | + |
| 119 | +/// |
| 120 | +#[derive(Debug)] |
| 121 | +pub struct RmwNorFlashStorage<'a, S> { |
| 122 | + storage: S, |
| 123 | + merge_buffer: &'a mut [u8], |
| 124 | +} |
| 125 | + |
| 126 | +impl<'a, S> RmwNorFlashStorage<'a, S> |
| 127 | +where |
| 128 | + S: NorFlash, |
| 129 | +{ |
| 130 | + /// Instantiate a new generic `Storage` from a `NorFlash` peripheral |
| 131 | + /// |
| 132 | + /// **NOTE** This will panic if the provided merge buffer, |
| 133 | + /// is smaller than the erase size of the flash peripheral |
| 134 | + pub fn new(nor_flash: S, merge_buffer: &'a mut [u8]) -> Self { |
| 135 | + if merge_buffer.len() < S::ERASE_SIZE { |
| 136 | + panic!("Merge buffer is too small"); |
| 137 | + } |
| 138 | + |
| 139 | + Self { |
| 140 | + storage: nor_flash, |
| 141 | + merge_buffer, |
| 142 | + } |
| 143 | + } |
| 144 | +} |
| 145 | + |
| 146 | +impl<'a, S> ReadStorage for RmwNorFlashStorage<'a, S> |
| 147 | +where |
| 148 | + S: ReadNorFlash, |
| 149 | +{ |
| 150 | + type Error = S::Error; |
| 151 | + |
| 152 | + async fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { |
| 153 | + // Nothing special to be done for reads |
| 154 | + self.storage.read(offset, bytes).await |
| 155 | + } |
| 156 | + |
| 157 | + fn capacity(&self) -> usize { |
| 158 | + self.storage.capacity() |
| 159 | + } |
| 160 | +} |
| 161 | + |
| 162 | +impl<'a, S> Storage for RmwNorFlashStorage<'a, S> |
| 163 | +where |
| 164 | + S: NorFlash, |
| 165 | +{ |
| 166 | + async fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> { |
| 167 | + // Perform read/modify/write operations on the byte slice. |
| 168 | + let last_page = self.storage.capacity() / S::ERASE_SIZE; |
| 169 | + |
| 170 | + // `data` is the part of `bytes` contained within `page`, |
| 171 | + // and `addr` in the address offset of `page` + any offset into the page as requested by `address` |
| 172 | + for (data, page, addr) in (0..last_page as u32) |
| 173 | + .map(move |i| Page::new(i, S::ERASE_SIZE)) |
| 174 | + .overlaps(bytes, offset) |
| 175 | + { |
| 176 | + let offset_into_page = addr.saturating_sub(page.start) as usize; |
| 177 | + |
| 178 | + self.storage |
| 179 | + .read(page.start, &mut self.merge_buffer[..S::ERASE_SIZE]) |
| 180 | + .await?; |
| 181 | + |
| 182 | + // If we cannot write multiple times to the same page, we will have to erase it |
| 183 | + self.storage.erase(page.start, page.end()).await?; |
| 184 | + self.merge_buffer[..S::ERASE_SIZE] |
| 185 | + .iter_mut() |
| 186 | + .skip(offset_into_page) |
| 187 | + .zip(data) |
| 188 | + .for_each(|(byte, input)| *byte = *input); |
| 189 | + self.storage |
| 190 | + .write(page.start, &self.merge_buffer[..S::ERASE_SIZE]) |
| 191 | + .await?; |
| 192 | + } |
| 193 | + Ok(()) |
| 194 | + } |
| 195 | +} |
| 196 | + |
| 197 | +/// |
| 198 | +pub struct RmwMultiwriteNorFlashStorage<'a, S> { |
| 199 | + storage: S, |
| 200 | + merge_buffer: &'a mut [u8], |
| 201 | +} |
| 202 | + |
| 203 | +impl<'a, S> RmwMultiwriteNorFlashStorage<'a, S> |
| 204 | +where |
| 205 | + S: MultiwriteNorFlash, |
| 206 | +{ |
| 207 | + /// Instantiate a new generic `Storage` from a `NorFlash` peripheral |
| 208 | + /// |
| 209 | + /// **NOTE** This will panic if the provided merge buffer, |
| 210 | + /// is smaller than the erase size of the flash peripheral |
| 211 | + pub fn new(nor_flash: S, merge_buffer: &'a mut [u8]) -> Self { |
| 212 | + if merge_buffer.len() < S::ERASE_SIZE { |
| 213 | + panic!("Merge buffer is too small"); |
| 214 | + } |
| 215 | + |
| 216 | + Self { |
| 217 | + storage: nor_flash, |
| 218 | + merge_buffer, |
| 219 | + } |
| 220 | + } |
| 221 | +} |
| 222 | + |
| 223 | +impl<'a, S> ReadStorage for RmwMultiwriteNorFlashStorage<'a, S> |
| 224 | +where |
| 225 | + S: ReadNorFlash, |
| 226 | +{ |
| 227 | + type Error = S::Error; |
| 228 | + |
| 229 | + async fn read(&mut self, offset: u32, bytes: &mut [u8]) -> Result<(), Self::Error> { |
| 230 | + // Nothing special to be done for reads |
| 231 | + self.storage.read(offset, bytes).await |
| 232 | + } |
| 233 | + |
| 234 | + fn capacity(&self) -> usize { |
| 235 | + self.storage.capacity() |
| 236 | + } |
| 237 | +} |
| 238 | + |
| 239 | +impl<'a, S> Storage for RmwMultiwriteNorFlashStorage<'a, S> |
| 240 | +where |
| 241 | + S: MultiwriteNorFlash, |
| 242 | +{ |
| 243 | + async fn write(&mut self, offset: u32, bytes: &[u8]) -> Result<(), Self::Error> { |
| 244 | + // Perform read/modify/write operations on the byte slice. |
| 245 | + let last_page = self.storage.capacity() / S::ERASE_SIZE; |
| 246 | + |
| 247 | + // `data` is the part of `bytes` contained within `page`, |
| 248 | + // and `addr` in the address offset of `page` + any offset into the page as requested by `address` |
| 249 | + for (data, page, addr) in (0..last_page as u32) |
| 250 | + .map(move |i| Page::new(i, S::ERASE_SIZE)) |
| 251 | + .overlaps(bytes, offset) |
| 252 | + { |
| 253 | + let offset_into_page = addr.saturating_sub(page.start) as usize; |
| 254 | + |
| 255 | + self.storage |
| 256 | + .read(page.start, &mut self.merge_buffer[..S::ERASE_SIZE]) |
| 257 | + .await?; |
| 258 | + |
| 259 | + let rhs = &self.merge_buffer[offset_into_page..S::ERASE_SIZE]; |
| 260 | + let is_subset = data.iter().zip(rhs.iter()).all(|(a, b)| *a & *b == *a); |
| 261 | + |
| 262 | + // Check if we can write the data block directly, under the limitations imposed by NorFlash: |
| 263 | + // - We can only change 1's to 0's |
| 264 | + if is_subset { |
| 265 | + // Use `merge_buffer` as allocation for padding `data` to `WRITE_SIZE` |
| 266 | + let offset = addr as usize % S::WRITE_SIZE; |
| 267 | + let aligned_end = data.len() % S::WRITE_SIZE + offset + data.len(); |
| 268 | + self.merge_buffer[..aligned_end].fill(0xff); |
| 269 | + self.merge_buffer[offset..offset + data.len()].copy_from_slice(data); |
| 270 | + self.storage |
| 271 | + .write(addr - offset as u32, &self.merge_buffer[..aligned_end]) |
| 272 | + .await?; |
| 273 | + } else { |
| 274 | + self.storage.erase(page.start, page.end()).await?; |
| 275 | + self.merge_buffer[..S::ERASE_SIZE] |
| 276 | + .iter_mut() |
| 277 | + .skip(offset_into_page) |
| 278 | + .zip(data) |
| 279 | + .for_each(|(byte, input)| *byte = *input); |
| 280 | + self.storage |
| 281 | + .write(page.start, &self.merge_buffer[..S::ERASE_SIZE]) |
| 282 | + .await?; |
| 283 | + } |
| 284 | + } |
| 285 | + Ok(()) |
| 286 | + } |
| 287 | +} |
0 commit comments