higher level journaled write functions

This commit is contained in:
husky 2023-08-12 16:38:37 -07:00
parent eab7da6a99
commit 8836a52ec7
No known key found for this signature in database
GPG key ID: 6B3D8CB511646891
2 changed files with 342 additions and 44 deletions

View file

@ -5,7 +5,7 @@ extern crate alloc;
use alloc::vec;
use alloc::vec::Vec;
use vapfs::{BlockDevice, Index};
use crate::structs::{Inode, InodeFlags, JBRFlags, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock};
use crate::structs::{Inode, InodeFlags, JBRFlags, JBRTargetType, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock};
pub mod btree;
pub mod structs;
@ -84,8 +84,8 @@ pub unsafe fn write_datablock(index: Index, sb: &Superblock, bd: &mut dyn BlockD
/// Checks if a datablock is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_datablock_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
if index >= bitmap_length {
return None;
@ -103,9 +103,9 @@ pub fn is_datablock_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockD
/// Checks if an inode is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_inode_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
// inode bitmap is at 1024 + 156 + datablock_bitmap_length byte offset,
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + 156 + ((sb.data_block_count + 7) / 8);
let bitmap_offset = 1024 + sb.block_size as u64 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
if index >= bitmap_length {
return None;
@ -123,8 +123,8 @@ pub fn is_inode_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockDevic
/// Finds the first unallocated datablock and returns its index.
/// Will return None if no unallocated datablock is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_datablock(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
@ -139,8 +139,8 @@ pub fn find_first_unallocated_datablock(sb: &Superblock, bd: &mut dyn BlockDevic
/// Finds a number of unallocated datablocks and returns their indices.
/// Will return None if not enough unallocated datablocks are found, or if the block device cannot fill the buffer.
pub fn find_count_unallocated_datablocks(sb: &Superblock, bd: &mut dyn BlockDevice, count: usize) -> Option<Vec<Index>> {
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
@ -165,9 +165,9 @@ pub fn find_count_unallocated_datablocks(sb: &Superblock, bd: &mut dyn BlockDevi
/// Finds the first unallocated inode and returns its index.
/// Will return None if no unallocated inode is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_inode(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
// inode bitmap is at 1024 + 156 + datablock_bitmap_length byte offset,
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + 156 + ((sb.data_block_count + 7) / 8);
let bitmap_offset = 1024 + sb.block_size as u64 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
@ -186,8 +186,8 @@ pub unsafe fn set_datablock_allocation_status(index: Index, sb: &Superblock, bd:
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
if index >= bitmap_length {
return false;
@ -212,9 +212,9 @@ pub unsafe fn set_inode_allocation_status(index: Index, sb: &Superblock, bd: &mu
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
// inode bitmap is at 1024 + 156 + datablock_bitmap_length byte offset,
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + 156 + ((sb.data_block_count + 7) / 8);
let bitmap_offset = 1024 + sb.block_size as u64 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
if index >= bitmap_length {
return false;
@ -275,7 +275,7 @@ pub fn is_journal_entry_complete(index: Index, sb: &Superblock, bd: &mut dyn Blo
/// Returns the index of the next unused journal entry.
/// Will loop around to the beginning of the journal if the end is reached.
pub fn next_journal_position(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
let mut index = sb.journal_position as Index;
let mut index = sb.journal_position as Index + 1;
let max_index = (sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64;
loop {
let entry = read_journal_entry(index, sb, bd)?;
@ -299,11 +299,11 @@ pub fn next_journal_position(sb: &Superblock, bd: &mut dyn BlockDevice) -> Optio
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
/// Returns None if the journal is full, or if the block device cannot be written to.
/// Returns the journal entry index if successful.
pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, write_to_inode: bool, otherwise_datablock_index: Option<Index>, data: &[u8]) -> Option<Index> {
pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, target_type: JBRTargetType, otherwise_datablock_index: Option<Index>, data: &[u8]) -> Option<Index> {
let entry_index = next_journal_position(sb, bd)?;
let entry_content = JournalBlockWrite {
flags: 0,
target_is_inode: write_to_inode,
target_type: target_type as u32,
target_inode: containing_inode_index,
target_block: otherwise_datablock_index.unwrap_or(0),
real_target_block: 0, // filled in once flushed
@ -358,7 +358,7 @@ pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, co
entry.content.block_write.flags = JBRFlags::Stored as u32;
// generate crc32 of the entry
let mut buf: [u8; core::mem::size_of::<JournalEntryContents>()] = [0; core::mem::size_of::<JournalEntryContents>()];
let mut clone = entry.content.clone();
let mut clone = entry.content;
clone.block_write.flags = 0;
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, clone); }
entry.zeroed_content_crc32 = crc32::crc32(&buf);
@ -409,7 +409,7 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
entry.content.multiblock_write.flags = JMWFlags::ChosenList as u32;
// calculate the crc32
let mut content_cloned = entry.content.clone();
let mut content_cloned = entry.content;
content_cloned.multiblock_write.flags = 0;
let mut buf: [u8; core::mem::size_of::<JournalEntryContents>()] = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_cloned); }
@ -439,7 +439,7 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
// create a list block
let mut list_block = ListBlock {
using_indirect_blocks: if datablock_count > 12 { true } else { false },
using_indirect_blocks: datablock_count > 12,
direct_block_addresses: [0; 12],
};
@ -488,7 +488,7 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
// write the addresses
for j in 0..count {
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).offset(1 + j as isize), allocated_blocks[(9 + i as usize) as usize + j as usize]); }
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).offset(1 + j as isize), allocated_blocks[(9 + i) as usize + j as usize]); }
}
// write the data
@ -497,17 +497,11 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
}
}
} else {
for i in 0..12 {
list_block.direct_block_addresses[i] = allocated_blocks[i];
}
list_block.direct_block_addresses[..12].copy_from_slice(&allocated_blocks[..12]);
}
// read target inode, and write the old list block
let target_inode = unsafe { read_inode(containing_inode_index, sb, bd) };
if target_inode.is_none() {
return None;
}
let target_inode = target_inode.unwrap();
let target_inode = read_inode(containing_inode_index, sb, bd)?;
old_list_block.using_indirect_blocks = target_inode.flags & InodeFlags::INDIRECT as u32 != 0;
old_list_block.direct_block_addresses = target_inode.direct_block_addresses;
@ -578,7 +572,7 @@ pub fn verify_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, jour
if content.flags > 4 {
return false;
}
let mut content_clone = journal_entry.content.clone();
let mut content_clone = journal_entry.content;
content_clone.block_write.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
@ -605,7 +599,7 @@ pub fn verify_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, jour
/// Otherwise, returns true
pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool {
// read the journal entry
let journal_entry = unsafe { read_journal_entry(entry_index, sb, bd) };
let journal_entry = read_journal_entry(entry_index, sb, bd);
if journal_entry.is_none() {
return false;
}
@ -623,12 +617,22 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
let content = unsafe { journal_entry.content.block_write };
if content.flags < 3 && content.flags > 0 {
// source block wasn't written, this entry is corrupt
return false;
// set the flags to 0 so that we don't try to flush this entry again
journal_entry.content.block_write.flags = 0;
unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) };
return true;
}
let content = unsafe { journal_entry.content.block_write };
// if flag is 0, we don't need to do anything; return
// we will check again later to see if the flags have changed from our work
if content.flags == 0 {
return true;
}
// if flag is 3, either update inode metadata or copy the data to the destination block
if content.flags == 3 {
if content.target_is_inode {
if content.target_type == JBRTargetType::Inode as u32 {
// copy the data directly to the target inode's block
let buf = read_datablock(content.source_block, sb, bd);
let mut inode_buf: [u8; core::mem::size_of::<Inode>()] = [0; core::mem::size_of::<Inode>()];
@ -637,7 +641,7 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
} else {
} else if content.target_type == JBRTargetType::DataBlock as u32 {
// update inode metadata
let inode = read_inode(content.target_inode, sb, bd);
if inode.is_none() {
@ -699,6 +703,11 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
return false;
}
}
} else if content.target_type == JBRTargetType::Disk as u32 {
// copy the data directly to the offset on the disk
let buf = read_datablock(content.source_block, sb, bd);
bd.seek(content.target_block * sb.block_size as u64);
bd.write_blocks(&buf);
}
// update journal entry
@ -713,7 +722,7 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
// if flag is 4, deallocate the source block
if content.flags == 4 {
if content.target_is_inode {
if content.target_type == JBRTargetType::Inode as u32 {
let block_to_deallocate = content.source_block; // data was copied
if !unsafe { set_datablock_allocation_status(block_to_deallocate, sb, bd, false) } {
return false;
@ -737,6 +746,13 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
// if flag is 0, move the journal head to the next entry
if content.flags == 0 {
// superblock may have changed, read it again
let sb = get_superblock(bd);
if sb.is_none() {
return false;
}
let sb = sb.as_ref().unwrap();
let head = sb.journal_position;
let mut next = head + 1;
let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64) as u32;
@ -765,7 +781,7 @@ pub fn verify_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, journ
if content.flags > 6 {
return false;
}
let mut content_clone = journal_entry.content.clone();
let mut content_clone = journal_entry.content;
content_clone.multiblock_write.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
@ -792,7 +808,7 @@ pub fn verify_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, journ
/// Otherwise, returns true
pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool {
// read the journal entry
let journal_entry = unsafe { read_journal_entry(entry_index, sb, bd) };
let journal_entry = read_journal_entry(entry_index, sb, bd);
if journal_entry.is_none() {
return false;
}
@ -810,7 +826,17 @@ pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_
let content = unsafe { journal_entry.content.multiblock_write };
if content.flags < 5 && content.flags > 0 {
// source block wasn't written, this entry is corrupt
return false;
// set the flags to 0 so that we don't try to flush this entry again
journal_entry.content.multiblock_write.flags = 0;
unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) };
return true;
}
let content = unsafe { journal_entry.content.multiblock_write };
// if flag is 0, we don't need to do anything; return
// we will check again later to see if the flags have changed from our work
if content.flags == 0 {
return true;
}
// if flag is 5, copy current data to old list block and then overwrite with new data
@ -965,4 +991,262 @@ pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_
}
true
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum JournaledWriteResult {
Success,
OutOfDiskSpace,
UnderlyingBlockDeviceError,
PotentialFilesystemCorruption,
}
/// flushes all pending journal entries until the index is reached
/// if plus_one is true, then the index is inclusive, otherwise it is exclusive
/// returns true if the index was reached, false if the index was not reached
pub fn flush_count_entries(sb: &Superblock, bd: &mut dyn BlockDevice, mut to: Index, plus_one: bool) -> bool {
let mut sb = *sb;
let mut head = sb.journal_position as Index;
let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64) as Index;
if head >= max_index {
head = 0;
}
if plus_one {
to += 1;
}
if to >= max_index {
if plus_one {
while to >= max_index {
to -= max_index;
}
} else {
return false; // please no infinite loops (:
}
}
while head != to {
let entry = read_journal_entry(head, &sb, bd);
if entry.is_none() {
head += 1;
if head >= max_index {
head = 0;
}
continue;
}
let entry = entry.unwrap();
const SINGLE_BLOCK_WRITE: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTI_BLOCK_WRITE: u32 = JournalOperation::MultiblockWrite as u32;
match entry.operation {
SINGLE_BLOCK_WRITE => {
flush_single_block_write(&sb, bd, head);
}
MULTI_BLOCK_WRITE => {
flush_multi_block_write(&sb, bd, head);
}
_ => {}
}
// reread superblock
let sb_opt = get_superblock(bd);
if sb_opt.is_none() {
return false;
}
sb = sb_opt.unwrap();
head += 1;
if head >= max_index {
head = 0;
}
}
true
}
/// attempts to figure out why we couldn't create a journal entry, and returns success if it was able to resolve the issue
pub fn why_cant_make_journal_entry(sb: &Superblock, bd: &mut dyn BlockDevice) -> JournaledWriteResult {
if find_first_unallocated_datablock(sb, bd).is_none() {
return JournaledWriteResult::OutOfDiskSpace;
} else {
// the journal is probably full, flush the current entry
let current_entry = read_journal_entry(sb.journal_position as Index, sb, bd);
if current_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
let current_entry = current_entry.unwrap();
const SINGLE_BLOCK_WRITE: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTI_BLOCK_WRITE: u32 = JournalOperation::MultiblockWrite as u32;
match current_entry.operation {
SINGLE_BLOCK_WRITE => {
if !flush_single_block_write(sb, bd, sb.journal_position as Index) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
MULTI_BLOCK_WRITE => {
if !flush_multi_block_write(sb, bd, sb.journal_position as Index) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
_ => {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
}
JournaledWriteResult::Success
}
/// "safely" overwrites the contents of the superblock with the given superblock struct
/// # Safety
/// this function is unsafe because it writes to the superblock, which is a critical part of the filesystem
/// the writes will be journaled, but if the superblock becomes corrupted then that will not matter
pub unsafe fn journaled_write_superblock(current_superblock: &Superblock, bd: &mut dyn BlockDevice, new_superblock: Superblock, flush_immediately: bool) -> JournaledWriteResult {
// convert superblock to buffer
let buf = [0u8; core::mem::size_of::<Superblock>()];
unsafe { core::ptr::write(buf.as_ptr() as *mut Superblock, new_superblock) };
// create journal entry
let mut journal_entry = schedule_single_block_write(
current_superblock, bd, 0,
JBRTargetType::Disk, Some(1024),
&buf,
);
// if none...
if journal_entry.is_none() {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(current_superblock, bd);
if why != JournaledWriteResult::Success {
return why;
}
// try again
journal_entry = schedule_single_block_write(
current_superblock, bd, 0,
JBRTargetType::Disk, Some(1024),
&buf,
);
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(current_superblock, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
JournaledWriteResult::Success
}
/// overwrites the contents of an inode with the given inode struct
/// if you want to update the contents of an inode, this is the function you want
pub fn journaled_write_inode(sb: &Superblock, bd: &mut dyn BlockDevice, old_inode: Index, new_inode: Inode, flush_immediately: bool) -> JournaledWriteResult {
// convert inode to buffer
let buf = [0u8; core::mem::size_of::<Inode>()];
unsafe { core::ptr::write(buf.as_ptr() as *mut Inode, new_inode) };
// create journal entry
let mut journal_entry = schedule_single_block_write(
sb, bd, old_inode,
JBRTargetType::Inode, None,
&buf,
);
// if none...
if journal_entry.is_none() {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(sb, bd);
if why != JournaledWriteResult::Success {
return why;
}
// try again
journal_entry = schedule_single_block_write(
sb, bd, old_inode,
JBRTargetType::Inode, None,
&buf,
);
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(sb, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
JournaledWriteResult::Success
}
/// writes data blocks of an inode to the disk, automatically decides whether to use single or multi block writes
/// if you want to write data to the disk, this is likely the function you want
/// # Important Node
/// if data.len() is not a multiple of the block size, undefined behavior may occur
pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: Index, from_block: Index, data: &[u8], flush_immediately: bool) -> JournaledWriteResult {
// create journal entry
let mut journal_entry = if data.len() > sb.block_size as _ {
schedule_multi_block_write(
sb, bd, inode,
from_block, (data.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
data,
)
} else {
schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
)
};
// if none...
if journal_entry.is_none() {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(sb, bd);
if why != JournaledWriteResult::Success {
return why;
}
// try again
journal_entry = if data.len() > sb.block_size as _ {
schedule_multi_block_write(
sb, bd, inode,
from_block, (data.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
data,
)
} else {
schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
)
};
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(sb, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
JournaledWriteResult::Success
}

View file

@ -9,7 +9,7 @@ pub const MAGIC: u64 = 0x766170554653;
/// The primary struct of a VapUFS filesystem, contains metadata about the filesystem.
/// Located at byte offset 1024 of the block device.
/// All values are big-endian unless otherwise specified.
/// Directly after the Superblock are the free data blocks bitmap and the free inodes bitmap.
/// Directly after (i.e. the next block after) the Superblock are the free data blocks bitmap and the free inodes bitmap.
/// Free data blocks bitmap is data_block_count / 8 bytes long (rounded up).
/// Free inodes bitmap is inode_count / 8 bytes long (rounded up).
#[repr(C)]
@ -17,7 +17,7 @@ pub const MAGIC: u64 = 0x766170554653;
pub struct Superblock {
/// magic number that identifies the Superblock as a valid VapUFS filesystem
pub magic: u64,
/// size of each block in bytes
/// size of each block in bytes, must be *at least* the size of the superblock without the bitmaps
pub block_size: u32,
/// location of first data block in blocks
pub first_data_block: Index,
@ -47,7 +47,7 @@ pub struct Superblock {
/// reserved values for expansion
pub reserved: [u64; 7],
// 156 bytes used so far
// rest of the block is used by the free data blocks bitmap and the free inodes bitmap
// next block is used by the free data blocks bitmap and the free inodes bitmap
}
impl Superblock {
@ -371,8 +371,9 @@ pub enum JournalOperation {
pub struct JournalBlockWrite {
/// JBRFlags stating how far the write has progressed
pub flags: u32,
/// are we writing to an inode instead of a data block?
pub target_is_inode: bool,
/// are we writing to an inode instead of a data block, or maybe even directly to the disk?
/// see JBRTargetType
pub target_type: u32,
/// target inode number
pub target_inode: Index,
/// target block number (if target is a data block, this will be the index in the inode's direct block array;
@ -429,6 +430,19 @@ pub enum JBRFlags {
CompleteAndDeallocated = 0,
}
/// # JBRTargetType
/// Type of target of a JournalBlockWrite
#[repr(u32)]
#[derive(Copy, Clone)]
pub enum JBRTargetType {
/// target is a data block
DataBlock = 0,
/// target is an inode
Inode = 1,
/// target is the disk itself
Disk = 2,
}
/// # JournalMultiblockWrite
/// a special entry for writing to multiple blocks at once,
/// used for circumstances where it is very important that all blocks are either