From eab7da6a996d8a30684156737cf37dfca093ad6f Mon Sep 17 00:00:00 2001 From: husky Date: Fri, 11 Aug 2023 15:59:26 -0700 Subject: [PATCH] multi block flush impl --- src/lib.rs | 247 +++++++++++++++++++++++++++++++++++++++++++++++-- src/structs.rs | 2 + 2 files changed, 243 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0cc9448..792ddbd 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -382,6 +382,7 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con target_block: datablock_start, target_block_count: datablock_count, list_block: 0, + old_list_block: 0, // filled in once flushed list_block_crc32: 0, }; @@ -399,11 +400,12 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con } // find a free data block for the list block - let list_block_index = find_first_unallocated_datablock(sb, bd)?; + let list_block_indexs = find_count_unallocated_datablocks(sb, bd, 2)?; // set the content and then rewrite the journal entry // note: cLion incorrectly says that this is unsafe, writing to a union is safe - entry.content.multiblock_write.list_block = list_block_index; + entry.content.multiblock_write.list_block = list_block_indexs[0]; + entry.content.multiblock_write.old_list_block = list_block_indexs[1]; entry.content.multiblock_write.flags = JMWFlags::ChosenList as u32; // calculate the crc32 @@ -418,7 +420,10 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con } // allocate the data block - if !unsafe { set_datablock_allocation_status(list_block_index, sb, bd, true) } { + if !unsafe { set_datablock_allocation_status(list_block_indexs[0], sb, bd, true) } { + return None; + } + if !unsafe { set_datablock_allocation_status(list_block_indexs[1], sb, bd, true) } { return None; } @@ -438,6 +443,11 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con direct_block_addresses: [0; 12], }; + let mut old_list_block = ListBlock { + using_indirect_blocks: false, + direct_block_addresses: [0; 12], + }; + let mut indirect_blocks_waiting_for_allocation_to_be_set = Vec::new(); // if using indirect blocks, only fill out the first (12 - 3) = 9 entries @@ -492,10 +502,23 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con } } - // write the list block + // read target inode, and write the old list block + let target_inode = unsafe { read_inode(containing_inode_index, sb, bd) }; + if target_inode.is_none() { + return None; + } + let target_inode = target_inode.unwrap(); + old_list_block.using_indirect_blocks = target_inode.flags & InodeFlags::INDIRECT as u32 != 0; + old_list_block.direct_block_addresses = target_inode.direct_block_addresses; + + // write the list blocks let buf = [0; core::mem::size_of::()]; unsafe { core::ptr::write(buf.as_ptr() as *mut ListBlock, list_block); } - if !unsafe { write_datablock(list_block_index, sb, bd, &buf) } { + if !unsafe { write_datablock(list_block_indexs[0], sb, bd, &buf) } { + return None; + } + unsafe { core::ptr::write(buf.as_ptr() as *mut ListBlock, old_list_block); } + if !unsafe { write_datablock(list_block_indexs[1], sb, bd, &buf) } { return None; } @@ -638,7 +661,7 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry return false; } } else { - if inode.flags | InodeFlags::INDIRECT as u32 == 0 { + if inode.flags & InodeFlags::INDIRECT as u32 == 0 { // inode doesn't have indirect blocks, this entry is corrupt return false; } @@ -729,5 +752,217 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry } } + true +} + +/// Checks the integrity of a multi block write journal entry +/// Returns true if the journal entry is valid, false otherwise +pub fn verify_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, journal_entry: &JournalEntry) -> bool { + if journal_entry.operation != JournalOperation::MultiblockWrite as u32 { + return false; + } + let content = unsafe { journal_entry.content.multiblock_write }; + if content.flags > 6 { + return false; + } + let mut content_clone = journal_entry.content.clone(); + content_clone.multiblock_write.flags = 0; + let mut buf = [0; core::mem::size_of::()]; + unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); } + let hash = crc32::crc32(&buf); + if hash != journal_entry.zeroed_content_crc32 { + return false; + } + + // check the source data block + let buf = read_datablock(content.list_block, sb, bd); + let crc32 = crc32::crc32(&buf); + if crc32 != content.list_block_crc32 { + return false; + } + + // should be all good! (: + true +} + +/// Flushes a multi block write journal entry +/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes +/// or if the journal entry is corrupt +/// Returns false if the journal entry is corrupt, the block device is full, or if the block device is read only +/// Otherwise, returns true +pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool { + // read the journal entry + let journal_entry = unsafe { read_journal_entry(entry_index, sb, bd) }; + if journal_entry.is_none() { + return false; + } + let mut journal_entry = journal_entry.unwrap(); + + // verify the journal entry + if !verify_multi_block_write(sb, bd, &journal_entry) { + return false; + } + + // because everything is verified, we should be able to execute steps 8 through 11 and + // not have to worry about crashes; since the journal entry is good, we can repeat these steps + // until they succeed + + let content = unsafe { journal_entry.content.multiblock_write }; + if content.flags < 5 && content.flags > 0 { + // source block wasn't written, this entry is corrupt + return false; + } + + // if flag is 5, copy current data to old list block and then overwrite with new data + if content.flags == 5 { + let inode = read_inode(content.target_inode, sb, bd); + if inode.is_none() { + return false; + } + let inode = inode.unwrap(); + + // get dbas of new list block + let buf = read_datablock(content.list_block, sb, bd); + let list_block = unsafe { core::ptr::read(buf.as_ptr() as *const ListBlock) }; + let dba = list_block.direct_block_addresses; + // update inode + let mut inode = inode; + inode.direct_block_addresses = dba; + if !unsafe { write_inode(content.target_inode, sb, bd, inode) } { + return false; + } + + // update journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + journal_entry.content.multiblock_write.flags = JMWFlags::Written as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } { + return false; + } + } + + let content = unsafe { journal_entry.content.multiblock_write }; + + // if flag is 6, we have to find the differences between the old and new list blocks and deallocate the unused blocks + if content.flags == 6 { + let mut unused_datablocks: Vec; + let list_block = read_datablock(content.list_block, sb, bd); + let list_block = unsafe { core::ptr::read(list_block.as_ptr() as *const ListBlock) }; + let old_list_block = read_datablock(content.old_list_block, sb, bd); + let old_list_block = unsafe { core::ptr::read(old_list_block.as_ptr() as *const ListBlock) }; + if !list_block.using_indirect_blocks { + if !old_list_block.using_indirect_blocks { + // simplest case, just check what blocks are different + unused_datablocks = old_list_block.direct_block_addresses.iter() + .filter(|&x| !list_block.direct_block_addresses.contains(x)) + .copied().collect(); + } else { + // old list block uses indirect blocks, new one doesn't + + unused_datablocks = old_list_block.direct_block_addresses[0..9].iter() + .filter(|&x| !list_block.direct_block_addresses.contains(x)) + .copied().collect(); + // compare indirect blocks + for i in 9..12 { + let indirect_block = read_datablock(old_list_block.direct_block_addresses[i], sb, bd); + let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap()); + for j in 0..count as usize { + let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap()); + if !list_block.direct_block_addresses.contains(&dba) { + unused_datablocks.push(dba); + } + } + } + } + } else if !old_list_block.using_indirect_blocks { + // new list block uses indirect blocks, old one doesn't + + unused_datablocks = old_list_block.direct_block_addresses.iter() + .filter(|&x| !list_block.direct_block_addresses[0..9].contains(x)) + .copied().collect(); + // compare indirect blocks + for i in 9..12 { + let indirect_block = read_datablock(list_block.direct_block_addresses[i], sb, bd); + let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap()); + for j in 0..count as usize { + let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap()); + if unused_datablocks.contains(&dba) { + unused_datablocks.retain(|&x| x != dba); + } + } + } + } else { + // both use indirect blocks + // most complicated case, compare all blocks + unused_datablocks = old_list_block.direct_block_addresses[0..9].iter() + .filter(|&x| !list_block.direct_block_addresses[0..9].contains(x)) + .copied().collect(); + + let mut new_indirect_blocks: Vec = Vec::new(); + for i in 9..12 { + let indirect_block = read_datablock(list_block.direct_block_addresses[i], sb, bd); + let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap()); + for j in 0..count as usize { + let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap()); + new_indirect_blocks.push(dba); + } + } + + // compare indirect blocks + for i in 9..12 { + let indirect_block = read_datablock(old_list_block.direct_block_addresses[i], sb, bd); + let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap()); + for j in 0..count as usize { + let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap()); + if !new_indirect_blocks.contains(&dba) { + unused_datablocks.push(dba); + } + } + } + } + + // deallocate unused blocks + for dba in unused_datablocks { + if !unsafe { set_datablock_allocation_status(dba, sb, bd, false) } { + return false; + } + } + + // deallocate old list block + if !unsafe { set_datablock_allocation_status(content.old_list_block, sb, bd, false) } { + return false; + } + + // deallocate list block + if !unsafe { set_datablock_allocation_status(content.list_block, sb, bd, false) } { + return false; + } + + // update journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + journal_entry.content.multiblock_write.flags = JMWFlags::CompleteAndDeallocated as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } { + return false; + } + } + + let content = unsafe { journal_entry.content.multiblock_write }; + + // if flag is 0, move the journal head to the next entry + if content.flags == 0 { + let head = sb.journal_position; + let mut next = head + 1; + let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::() as u64) as u32; + if next >= max_index { + next = 0; + } + + // write superblock + let mut sb = *sb; + sb.journal_position = next; + if !unsafe { write_superblock(sb, bd) } { + return false; + } + } + true } \ No newline at end of file diff --git a/src/structs.rs b/src/structs.rs index ae6d99c..b249fc6 100644 --- a/src/structs.rs +++ b/src/structs.rs @@ -460,6 +460,8 @@ pub struct JournalMultiblockWrite { pub target_block_count: Index, /// block number of list block structure pub list_block: Index, + /// block number of old list block structure + pub old_list_block: Index, /// crc32 hash of the list block pub list_block_crc32: u32, }