diff --git a/src/lib.rs b/src/lib.rs index 57b57c3..0cc9448 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,7 +5,7 @@ extern crate alloc; use alloc::vec; use alloc::vec::Vec; use vapfs::{BlockDevice, Index}; -use crate::structs::{Inode, JBRFlags, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock}; +use crate::structs::{Inode, InodeFlags, JBRFlags, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock}; pub mod btree; pub mod structs; @@ -306,6 +306,7 @@ pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, co target_is_inode: write_to_inode, target_inode: containing_inode_index, target_block: otherwise_datablock_index.unwrap_or(0), + real_target_block: 0, // filled in once flushed source_block: 0, // filled in once allocated source_block_crc32: 0, // filled in once allocated }; @@ -404,6 +405,14 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con // note: cLion incorrectly says that this is unsafe, writing to a union is safe entry.content.multiblock_write.list_block = list_block_index; entry.content.multiblock_write.flags = JMWFlags::ChosenList as u32; + + // calculate the crc32 + let mut content_cloned = entry.content.clone(); + content_cloned.multiblock_write.flags = 0; + let mut buf: [u8; core::mem::size_of::()] = [0; core::mem::size_of::()]; + unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_cloned); } + entry.zeroed_content_crc32 = crc32::crc32(&buf); + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { return None; } @@ -434,9 +443,7 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con // if using indirect blocks, only fill out the first (12 - 3) = 9 entries // otherwise, fill out all 12 entries if list_block.using_indirect_blocks { - for i in 0..9 { - list_block.direct_block_addresses[i] = allocated_blocks[i]; - } + list_block.direct_block_addresses[..9].copy_from_slice(&allocated_blocks[..9]); // if using indirect blocks, fit the remaining entries into the indirect blocks // layout is u64 count followed by u64 addresses @@ -536,4 +543,191 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con // return the journal entry index Some(entry_index) +} + +/// Checks the integrity of a single block write journal entry +/// Returns true if the journal entry is valid, false otherwise +pub fn verify_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, journal_entry: &JournalEntry) -> bool { + if journal_entry.operation != JournalOperation::SingleBlockWrite as u32 { + return false; + } + let content = unsafe { journal_entry.content.block_write }; + if content.flags > 4 { + return false; + } + let mut content_clone = journal_entry.content.clone(); + content_clone.block_write.flags = 0; + let mut buf = [0; core::mem::size_of::()]; + unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); } + let hash = crc32::crc32(&buf); + if hash != journal_entry.zeroed_content_crc32 { + return false; + } + + // check the source data block + let buf = read_datablock(content.source_block, sb, bd); + let crc32 = crc32::crc32(&buf); + if crc32 != content.source_block_crc32 { + return false; + } + + // should be all good! (: + true +} + +/// Flushes a single block write journal entry +/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes +/// or if the journal entry is corrupt +/// Returns false if the journal entry is corrupt, the block device is full, or if the block device is read only +/// Otherwise, returns true +pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool { + // read the journal entry + let journal_entry = unsafe { read_journal_entry(entry_index, sb, bd) }; + if journal_entry.is_none() { + return false; + } + let mut journal_entry = journal_entry.unwrap(); + + // verify the journal entry + if !verify_single_block_write(sb, bd, &journal_entry) { + return false; + } + + // because everything is verified, we should be able to execute steps 6 through 9 and + // not have to worry about crashes; since the journal entry is good, we can repeat these steps + // until they succeed + + let content = unsafe { journal_entry.content.block_write }; + if content.flags < 3 && content.flags > 0 { + // source block wasn't written, this entry is corrupt + return false; + } + + // if flag is 3, either update inode metadata or copy the data to the destination block + if content.flags == 3 { + if content.target_is_inode { + // copy the data directly to the target inode's block + let buf = read_datablock(content.source_block, sb, bd); + let mut inode_buf: [u8; core::mem::size_of::()] = [0; core::mem::size_of::()]; + inode_buf[0..core::mem::size_of::()].clone_from_slice(&buf[0..core::mem::size_of::()]); + let inode = unsafe { core::ptr::read(inode_buf.as_ptr() as *const Inode) }; + if !unsafe { write_inode(content.target_inode, sb, bd, inode) } { + return false; + } + } else { + // update inode metadata + let inode = read_inode(content.target_inode, sb, bd); + if inode.is_none() { + return false; + } + let mut inode = inode.unwrap(); + // target block is either an index into the direct blocks or an indirect block (if greater than 11) + if content.target_block < 12 { + let previous_block = inode.direct_block_addresses[content.target_block as usize]; + + // update the journal entry + journal_entry.content.block_write.real_target_block = previous_block; + if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } { + return false; + } + + inode.direct_block_addresses[content.target_block as usize] = content.source_block; + + // update the inode + if !unsafe { write_inode(content.target_inode, sb, bd, inode) } { + return false; + } + } else { + if inode.flags | InodeFlags::INDIRECT as u32 == 0 { + // inode doesn't have indirect blocks, this entry is corrupt + return false; + } + + // figure out which indirect block we need to write to (either 1, 2, or 3) + // range 12..(12*2) is indirect block 1 + // range (12*2)..(12*3) is indirect block 2 + // range (12*3)..(12*4) is indirect block 3 + let indirect_block_index = (content.target_block - 12) / 12; + let indirect_block_offset = (content.target_block - 12) % 12; + let indirect_block = inode.direct_block_addresses[indirect_block_index as usize]; + let mut indirect_block_buf = read_datablock(indirect_block, sb, bd); + // get the count + let mut count = u64::from_be_bytes(indirect_block_buf.as_slice()[0..8].try_into().unwrap()); + // place the source block at index (indirect_block_offset * 8) + 8 + let target_index = (indirect_block_offset * 8) + 8; + + // if there's already a block at the target index, we need to update the journal entry + if indirect_block_offset < count { + // update the journal entry + journal_entry.content.block_write.real_target_block = u64::from_be_bytes(indirect_block_buf.as_slice()[target_index as usize..(target_index + 8) as usize].try_into().unwrap()); + if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } { + return false; + } + } + + indirect_block_buf.as_mut_slice()[target_index as usize..(target_index + 8) as usize].clone_from_slice(&content.source_block.to_be_bytes()); + // update the count + if count < indirect_block_offset + 1 { + count = indirect_block_offset + 1; + } + indirect_block_buf.as_mut_slice()[0..8].clone_from_slice(&count.to_be_bytes()); + // write the indirect block back to the block device + if !unsafe { write_datablock(indirect_block, sb, bd, &indirect_block_buf) } { + return false; + } + } + } + + // update journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + journal_entry.content.block_write.flags = JMWFlags::Written as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } { + return false; + } + } + + let content = unsafe { journal_entry.content.block_write }; + + // if flag is 4, deallocate the source block + if content.flags == 4 { + if content.target_is_inode { + let block_to_deallocate = content.source_block; // data was copied + if !unsafe { set_datablock_allocation_status(block_to_deallocate, sb, bd, false) } { + return false; + } + } else { + let block_to_deallocate = content.real_target_block; // data was moved, this should contain the old block + if !unsafe { set_datablock_allocation_status(block_to_deallocate, sb, bd, false) } { + return false; + } + } + + // update journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + journal_entry.content.block_write.flags = JMWFlags::CompleteAndDeallocated as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } { + return false; + } + } + + let content = unsafe { journal_entry.content.block_write }; + + // if flag is 0, move the journal head to the next entry + if content.flags == 0 { + let head = sb.journal_position; + let mut next = head + 1; + let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::() as u64) as u32; + if next >= max_index { + next = 0; + } + + // write superblock + let mut sb = *sb; + sb.journal_position = next; + if !unsafe { write_superblock(sb, bd) } { + return false; + } + } + + true } \ No newline at end of file diff --git a/src/structs.rs b/src/structs.rs index 0749dea..ae6d99c 100644 --- a/src/structs.rs +++ b/src/structs.rs @@ -13,6 +13,7 @@ pub const MAGIC: u64 = 0x766170554653; /// Free data blocks bitmap is data_block_count / 8 bytes long (rounded up). /// Free inodes bitmap is inode_count / 8 bytes long (rounded up). #[repr(C)] +#[derive(Copy, Clone)] pub struct Superblock { /// magic number that identifies the Superblock as a valid VapUFS filesystem pub magic: u64, @@ -378,6 +379,8 @@ pub struct JournalBlockWrite { /// if greater than 12, the indirect block this points to (i / 12) will be used, and the index in that block will be i % 12) /// (if target is an inode, this field will be ignored) pub target_block: Index, + /// actual data block number, unused if target is an inode + pub real_target_block: Index, /// block number of source data block pub source_block: Index, /// crc32 hash of the source data block