From 9ddefddf66da6c7ea45e4f84bf180889a41a9e80 Mon Sep 17 00:00:00 2001 From: husky Date: Mon, 7 Aug 2023 15:15:39 -0700 Subject: [PATCH] journal entry creation --- src/lib.rs | 310 ++++++++++++++++++++++++++++++++++++++++++++++++- src/structs.rs | 42 ++++--- 2 files changed, 336 insertions(+), 16 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 235dc79..57b57c3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,9 +2,10 @@ extern crate alloc; +use alloc::vec; use alloc::vec::Vec; use vapfs::{BlockDevice, Index}; -use crate::structs::{Inode, JournalEntry, Superblock}; +use crate::structs::{Inode, JBRFlags, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock}; pub mod btree; pub mod structs; @@ -135,6 +136,32 @@ pub fn find_first_unallocated_datablock(sb: &Superblock, bd: &mut dyn BlockDevic bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET).map(|index| index as Index) } +/// Finds a number of unallocated datablocks and returns their indices. +/// Will return None if not enough unallocated datablocks are found, or if the block device cannot fill the buffer. +pub fn find_count_unallocated_datablocks(sb: &Superblock, bd: &mut dyn BlockDevice, count: usize) -> Option> { + // datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up + let bitmap_offset = 1024 + 156; + let bitmap_length = (sb.data_block_count + 7) / 8; + let mut bitmap_buf: Vec = Vec::new(); + bitmap_buf.resize(bitmap_length as usize, 0); + bd.seek(bitmap_offset); + let read_count = bd.read_blocks(&mut bitmap_buf); + if read_count < bitmap_length as usize { + return None; + } + let mut found = Vec::new(); + while found.len() < count { + if let Some(i) = bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET) { + found.push(i as Index); + // set the bit so we don't find it again + bitmap::set_bit(&mut bitmap_buf, i, bitmap::SET); + } else { + return None; + } + } + Some(found) +} + /// Finds the first unallocated inode and returns its index. /// Will return None if no unallocated inode is found, or if the block device cannot fill the buffer. pub fn find_first_unallocated_inode(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option { @@ -228,4 +255,285 @@ pub unsafe fn write_journal_entry(index: Index, sb: &Superblock, bd: &mut dyn Bl bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * core::mem::size_of::() as u64)); let write_count = bd.write_blocks(&buf); write_count == core::mem::size_of::() +} + +/// Checks if a journal entry has been completed. +/// Will return None if the index is out of bounds or if the block device cannot fill the buffer, +/// or if the entry is invalid +pub fn is_journal_entry_complete(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option { + let entry = read_journal_entry(index, sb, bd)?; + // if flags == 0, the entry is complete + const SINGLEBLOCK: u32 = JournalOperation::SingleBlockWrite as u32; + const MULTIBLOCK: u32 = JournalOperation::MultiblockWrite as u32; + match entry.operation { + SINGLEBLOCK => unsafe { Some(entry.content.block_write.flags == 0) }, + MULTIBLOCK => unsafe { Some(entry.content.multiblock_write.flags == 0) }, + _ => None, + } +} + +/// Returns the index of the next unused journal entry. +/// Will loop around to the beginning of the journal if the end is reached. +pub fn next_journal_position(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option { + let mut index = sb.journal_position as Index; + let max_index = (sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::() as u64; + loop { + let entry = read_journal_entry(index, sb, bd)?; + // if flags == 0, the entry is complete + // flags should always be the same size and at the same offset in the union, so we can just check one + if unsafe { entry.content.block_write.flags == 0 } { + return Some(index); + } + index += 1; + if index >= max_index { + index = 0; + } + if index == sb.journal_position as Index { + // we've looped around to the beginning of the journal + return None; + } + } +} + +/// Creates a journal entry for a single block write operation. +/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes. +/// Returns None if the journal is full, or if the block device cannot be written to. +/// Returns the journal entry index if successful. +pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, write_to_inode: bool, otherwise_datablock_index: Option, data: &[u8]) -> Option { + let entry_index = next_journal_position(sb, bd)?; + let entry_content = JournalBlockWrite { + flags: 0, + target_is_inode: write_to_inode, + target_inode: containing_inode_index, + target_block: otherwise_datablock_index.unwrap_or(0), + source_block: 0, // filled in once allocated + source_block_crc32: 0, // filled in once allocated + }; + + let mut entry = JournalEntry { + operation: JournalOperation::SingleBlockWrite as u32, + zeroed_content_crc32: 0, + content: JournalEntryContents { + block_write: entry_content, + }, + }; + + // write the journal entry + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // find a free data block + let data_block_index = find_first_unallocated_datablock(sb, bd)?; + + // set the content and then rewrite the journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.block_write.source_block = data_block_index; + entry.content.block_write.flags = JBRFlags::Chosen as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + // allocate the data block + if !unsafe { set_datablock_allocation_status(data_block_index, sb, bd, true) } { + return None; + } + // set the content and then rewrite the journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.block_write.flags = JBRFlags::Allocated as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // write the data to the data block + if !unsafe { write_datablock(data_block_index, sb, bd, data) } { + return None; + } + + let written_data = read_datablock(data_block_index, sb, bd); + + // set the crc32 and stored flag + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.block_write.source_block_crc32 = crc32::crc32(&written_data); + entry.content.block_write.flags = JBRFlags::Stored as u32; + // generate crc32 of the entry + let mut buf: [u8; core::mem::size_of::()] = [0; core::mem::size_of::()]; + let mut clone = entry.content.clone(); + clone.block_write.flags = 0; + unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, clone); } + entry.zeroed_content_crc32 = crc32::crc32(&buf); + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // all further steps will be performed on a journal flush + Some(entry_index) +} + +/// Creates a journal entry for a multi block write operation. +/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes. +/// Returns None if the journal is full, or if the block device cannot be written to. +/// Returns the journal entry index if successful. +pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, datablock_start: Index, datablock_count: Index, data: &[u8]) -> Option { + let entry_index = next_journal_position(sb, bd)?; + let entry_content = JournalMultiblockWrite { + flags: 0, + target_inode: containing_inode_index, + target_block: datablock_start, + target_block_count: datablock_count, + list_block: 0, + list_block_crc32: 0, + }; + + let mut entry = JournalEntry { + operation: JournalOperation::MultiblockWrite as u32, + zeroed_content_crc32: 0, + content: JournalEntryContents { + multiblock_write: entry_content, + }, + }; + + // write the journal entry + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // find a free data block for the list block + let list_block_index = find_first_unallocated_datablock(sb, bd)?; + + // set the content and then rewrite the journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.multiblock_write.list_block = list_block_index; + entry.content.multiblock_write.flags = JMWFlags::ChosenList as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // allocate the data block + if !unsafe { set_datablock_allocation_status(list_block_index, sb, bd, true) } { + return None; + } + + // set the content and then rewrite the journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.multiblock_write.flags = JMWFlags::AllocatedList as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // find the data blocks + let allocated_blocks = find_count_unallocated_datablocks(sb, bd, datablock_count as usize)?; + + // create a list block + let mut list_block = ListBlock { + using_indirect_blocks: if datablock_count > 12 { true } else { false }, + direct_block_addresses: [0; 12], + }; + + let mut indirect_blocks_waiting_for_allocation_to_be_set = Vec::new(); + + // if using indirect blocks, only fill out the first (12 - 3) = 9 entries + // otherwise, fill out all 12 entries + if list_block.using_indirect_blocks { + for i in 0..9 { + list_block.direct_block_addresses[i] = allocated_blocks[i]; + } + + // if using indirect blocks, fit the remaining entries into the indirect blocks + // layout is u64 count followed by u64 addresses + let max_addresses_per_block = (sb.block_size as usize - core::mem::size_of::()) / core::mem::size_of::(); + let mut indirect_block_count = (datablock_count - 9) / max_addresses_per_block as u64; + // if the count is not a multiple of the max addresses per block, add one + if (datablock_count - 9) % max_addresses_per_block as u64 != 0 { + indirect_block_count += 1; + } + // if the count is over 3, return None + if indirect_block_count > 3 { + return None; + } + + // allocate the indirect blocks + let indirect_blocks = find_count_unallocated_datablocks(sb, bd, indirect_block_count as usize)?; + for i in 0..indirect_block_count { + list_block.direct_block_addresses[9 + i as usize] = indirect_blocks[i as usize]; + indirect_blocks_waiting_for_allocation_to_be_set.push(indirect_blocks[i as usize]); + } + + // write the indirect blocks + let mut indirect_block_data = vec![0; core::mem::size_of::() * max_addresses_per_block]; + for i in 0..indirect_block_count { + // write the count + let count = if i == indirect_block_count - 1 { + (datablock_count - 9) % max_addresses_per_block as u64 + } else { + max_addresses_per_block as u64 + }; + unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count); } + + // write the addresses + for j in 0..count { + unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).offset(1 + j as isize), allocated_blocks[(9 + i as usize) as usize + j as usize]); } + } + + // write the data + if !unsafe { write_datablock(list_block.direct_block_addresses[9 + i as usize], sb, bd, &indirect_block_data) } { + return None; + } + } + } else { + for i in 0..12 { + list_block.direct_block_addresses[i] = allocated_blocks[i]; + } + } + + // write the list block + let buf = [0; core::mem::size_of::()]; + unsafe { core::ptr::write(buf.as_ptr() as *mut ListBlock, list_block); } + if !unsafe { write_datablock(list_block_index, sb, bd, &buf) } { + return None; + } + + // set the content and then rewrite the journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.multiblock_write.flags = JMWFlags::ChosenData as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // if we're using indirect blocks, set the allocation status of the indirect blocks + for block in indirect_blocks_waiting_for_allocation_to_be_set { + if !unsafe { set_datablock_allocation_status(block, sb, bd, true) } { + return None; + } + } + + // set the allocation status of the data blocks + for block in &allocated_blocks { + if !unsafe { set_datablock_allocation_status(*block, sb, bd, true) } { + return None; + } + } + + // update journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.multiblock_write.flags = JMWFlags::AllocatedData as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // store the data in the data blocks + for i in 0..datablock_count { + if !unsafe { write_datablock(allocated_blocks[i as usize], sb, bd, &data[i as usize * sb.block_size as usize..(i as usize + 1) * sb.block_size as usize]) } { + return None; + } + } + + // update journal entry + // note: cLion incorrectly says that this is unsafe, writing to a union is safe + entry.content.multiblock_write.flags = JMWFlags::Stored as u32; + if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } { + return None; + } + + // return the journal entry index + Some(entry_index) } \ No newline at end of file diff --git a/src/structs.rs b/src/structs.rs index cb4cf44..0749dea 100644 --- a/src/structs.rs +++ b/src/structs.rs @@ -40,8 +40,9 @@ pub struct Superblock { pub last_modification_time: Timestamp, /// crc32 checksum of this Superblock pub checksum: u32, - /// reserved u32 - pub reserved_1: u32, + /// index of the current journal entry being processed + /// incremented after a journal entry is committed, set to 0 if past max + pub journal_position: u32, /// reserved values for expansion pub reserved: [u64; 7], // 156 bytes used so far @@ -288,8 +289,8 @@ pub enum InodeFlags { /// # JournalEntry /// A Journal Entry #[repr(C)] +#[derive(Copy, Clone)] pub struct JournalEntry { - pub flags: u32, /// JournalOperation pub operation: u32, /// crc32 hash of the content with flags set to zero, used to verify journal was fully written without verifying the stage @@ -302,7 +303,6 @@ impl JournalEntry { pub fn convert_big_endian_to_native(&mut self) { #[cfg(target_endian = "little")] { - self.flags = u32::from_be(self.flags); self.operation = u32::from_be(self.operation); self.zeroed_content_crc32 = u32::from_be(self.zeroed_content_crc32); match self.operation { @@ -322,7 +322,6 @@ impl JournalEntry { pub fn convert_native_to_big_endian(&mut self) { #[cfg(target_endian = "little")] { - self.flags = u32::to_be(self.flags); self.operation = u32::to_be(self.operation); self.zeroed_content_crc32 = u32::to_be(self.zeroed_content_crc32); match self.operation { @@ -373,7 +372,11 @@ pub struct JournalBlockWrite { pub flags: u32, /// are we writing to an inode instead of a data block? pub target_is_inode: bool, - /// block number of target + /// target inode number + pub target_inode: Index, + /// target block number (if target is a data block, this will be the index in the inode's direct block array; + /// if greater than 12, the indirect block this points to (i / 12) will be used, and the index in that block will be i % 12) + /// (if target is an inode, this field will be ignored) pub target_block: Index, /// block number of source data block pub source_block: Index, @@ -410,15 +413,17 @@ impl JournalBlockWrite { #[repr(u32)] #[derive(Copy, Clone)] pub enum JBRFlags { + /// source data block has been chosen but not yet allocated + Chosen = 1, /// source data block has been allocated - Allocated = 1, + Allocated = 2, /// data has been written to the source data block - Stored = 2, + Stored = 3, /// source data block has either replaced an old data block or has been written to an inode - Written = 3, + Written = 4, /// source data block (in the case of a write to an inode) or old data block (in the case of a write to a data block) has been deallocated /// (i.e. this journal entry has been fully committed) - CompleteAndDeallocated = 4, + CompleteAndDeallocated = 0, } /// # JournalMultiblockWrite @@ -444,6 +449,8 @@ pub enum JBRFlags { pub struct JournalMultiblockWrite { /// JMWFlags stating how far the write has progressed pub flags: u32, + /// inode number of target inode + pub target_inode: Index, /// block number of first target block pub target_block: Index, /// number of target blocks @@ -485,17 +492,21 @@ impl JournalMultiblockWrite { #[repr(u32)] #[derive(Copy, Clone)] pub enum JMWFlags { + /// list block has been chosen but not yet allocated + ChosenList = 1, /// list block has been allocated - AllocatedList = 1, + AllocatedList = 2, + /// data blocks have been chosen and stored in the list block but not yet allocated + ChosenData = 3, /// data blocks have been allocated - AllocatedData = 2, + AllocatedData = 4, /// data has been written to the data blocks and the list block - Stored = 3, + Stored = 5, /// data blocks have replaced old data blocks - Written = 4, + Written = 6, /// data blocks and list block have been deallocated /// (i.e. this journal entry has been fully committed) - CompleteAndDeallocated = 5, + CompleteAndDeallocated = 0, } /// # ListBlock @@ -535,6 +546,7 @@ impl ListBlock { /// # JournalEntryContents /// union of all possible journal entries #[repr(C)] +#[derive(Copy, Clone)] pub union JournalEntryContents { pub block_write: JournalBlockWrite, pub multiblock_write: JournalMultiblockWrite,