vapfs_ufs/src/lib.rs
2023-08-21 22:04:10 -07:00

1390 lines
No EOL
59 KiB
Rust

#![no_std]
extern crate alloc;
use alloc::collections::VecDeque;
use alloc::vec;
use alloc::vec::Vec;
use vapfs::{BlockDevice, Index};
use crate::structs::{Inode, InodeFlags, JBRFlags, JBRTargetType, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock};
pub mod btree;
pub mod structs;
pub mod bitmap;
pub mod crc32;
/// Reads the superblock (located at byte offset 1024 of the block device) and returns it.
/// Returns None if the block device is too small to contain a superblock.
pub fn get_superblock(bd: &mut dyn BlockDevice) -> Option<Superblock> {
let mut buf: [u8; core::mem::size_of::<Superblock>()] = [0; core::mem::size_of::<Superblock>()];
bd.seek(1024);
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<Superblock>() {
return None;
}
let mut superblock = unsafe { core::ptr::read(buf.as_ptr() as *const Superblock) };
superblock.convert_big_endian_to_native();
Some(superblock)
}
/// Performs a direct write of a superblock to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_superblock(mut sb: Superblock, bd: &mut dyn BlockDevice) -> bool {
sb.convert_native_to_big_endian();
let mut buf: [u8; core::mem::size_of::<Superblock>()] = [0; core::mem::size_of::<Superblock>()];
core::ptr::write(buf.as_mut_ptr() as *mut Superblock, sb);
bd.seek(1024);
let write_count = bd.write_blocks(&buf);
write_count == core::mem::size_of::<Superblock>()
}
/// Reads the inode at the given index and returns it.
pub fn read_inode(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Inode> {
let mut buf: [u8; core::mem::size_of::<Inode>()] = [0; core::mem::size_of::<Inode>()];
bd.seek((sb.first_inode_block * sb.block_size as u64) + (index * core::mem::size_of::<Inode>() as u64));
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<Inode>() {
return None;
}
let mut inode = unsafe { core::ptr::read(buf.as_ptr() as *const Inode) };
inode.convert_big_endian_to_native();
Some(inode)
}
/// Performs a direct write of an inode to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_inode(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, mut inode: Inode) -> bool {
inode.convert_native_to_big_endian();
let mut buf: [u8; core::mem::size_of::<Inode>()] = [0; core::mem::size_of::<Inode>()];
core::ptr::write(buf.as_mut_ptr() as *mut Inode, inode);
bd.seek((sb.first_inode_block * sb.block_size as u64) + (index * core::mem::size_of::<Inode>() as u64));
let write_count = bd.write_blocks(&buf);
write_count == core::mem::size_of::<Inode>()
}
/// Reads a single datablock into memory, may return less than the block size if the end of the block device is reached.
pub fn read_datablock(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Vec<u8> {
let mut buf: Vec<u8> = Vec::new();
buf.resize(sb.block_size as usize, 0);
bd.seek((sb.first_data_block * sb.block_size as u64) + (index * sb.block_size as u64));
bd.read_blocks(&mut buf);
buf
}
/// Performs a direct write of a datablock to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_datablock(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, buf: &[u8]) -> bool {
bd.seek((sb.first_data_block * sb.block_size as u64) + (index * sb.block_size as u64));
let write_count = bd.write_blocks(buf);
write_count == sb.block_size as usize
}
/// Checks if a datablock is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_datablock_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
if index >= bitmap_length {
return None;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
Some(bitmap::get_bit(&bitmap_buf, index as usize) == bitmap::SET)
}
/// Checks if an inode is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_inode_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
if index >= bitmap_length {
return None;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
Some(bitmap::get_bit(&bitmap_buf, index as usize) == bitmap::SET)
}
/// Finds the first unallocated datablock and returns its index.
/// Will return None if no unallocated datablock is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_datablock(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET).map(|index| index as Index)
}
/// Finds a number of unallocated datablocks and returns their indices.
/// Will return None if not enough unallocated datablocks are found, or if the block device cannot fill the buffer.
pub fn find_count_unallocated_datablocks(sb: &Superblock, bd: &mut dyn BlockDevice, count: usize) -> Option<Vec<Index>> {
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
let mut found = Vec::new();
while found.len() < count {
if let Some(i) = bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET) {
found.push(i as Index);
// set the bit so we don't find it again
bitmap::set_bit(&mut bitmap_buf, i, bitmap::SET);
} else {
return None;
}
}
Some(found)
}
/// Finds the first unallocated inode and returns its index.
/// Will return None if no unallocated inode is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_inode(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET).map(|index| index as Index)
}
/// Sets the allocation status of a datablock.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn set_datablock_allocation_status(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, allocated: bool) -> bool {
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64;
let bitmap_length = (sb.data_block_count + 7) / 8;
if index >= bitmap_length {
return false;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return false;
}
bitmap::set_bit(&mut bitmap_buf, index as usize, allocated);
bd.seek(bitmap_offset);
let write_count = bd.write_blocks(&bitmap_buf);
write_count == bitmap_length as usize
}
/// Sets the allocation status of an inode.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn set_inode_allocation_status(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, allocated: bool) -> bool {
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
if index >= bitmap_length {
return false;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return false;
}
bitmap::set_bit(&mut bitmap_buf, index as usize, allocated);
bd.seek(bitmap_offset);
let write_count = bd.write_blocks(&bitmap_buf);
write_count == bitmap_length as usize
}
/// Reads a journal entry by index
pub fn read_journal_entry(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<JournalEntry> {
let mut buf: [u8; core::mem::size_of::<JournalEntry>()] = [0; core::mem::size_of::<JournalEntry>()];
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * core::mem::size_of::<JournalEntry>() as u64));
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<JournalEntry>() {
return None;
}
let mut entry = unsafe { core::ptr::read(buf.as_ptr() as *const JournalEntry) };
entry.convert_big_endian_to_native();
Some(entry)
}
/// Performs a direct write of a journal entry to the block device.
/// # Safety
/// unsafe because it assumes that the entry is correctly formatted and that everything has been performed correctly
pub unsafe fn write_journal_entry(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, mut entry: JournalEntry) -> bool {
entry.convert_native_to_big_endian();
let mut buf: [u8; core::mem::size_of::<JournalEntry>()] = [0; core::mem::size_of::<JournalEntry>()];
core::ptr::write(buf.as_mut_ptr() as *mut JournalEntry, entry);
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * core::mem::size_of::<JournalEntry>() as u64));
let write_count = bd.write_blocks(&buf);
write_count == core::mem::size_of::<JournalEntry>()
}
/// Checks if a journal entry has been completed.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer,
/// or if the entry is invalid
pub fn is_journal_entry_complete(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
let entry = read_journal_entry(index, sb, bd)?;
// if flags == 0, the entry is complete
const SINGLEBLOCK: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTIBLOCK: u32 = JournalOperation::MultiblockWrite as u32;
match entry.operation {
SINGLEBLOCK => unsafe { Some(entry.content.block_write.flags == 0) },
MULTIBLOCK => unsafe { Some(entry.content.multiblock_write.flags == 0) },
_ => None,
}
}
/// Returns the index of the next unused journal entry.
/// Will loop around to the beginning of the journal if the end is reached.
pub fn next_journal_position(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
let mut index = sb.journal_position as Index + 1;
let max_index = (sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64;
loop {
let entry = read_journal_entry(index, sb, bd)?;
// if flags == 0, the entry is complete
// flags should always be the same size and at the same offset in the union, so we can just check one
if unsafe { entry.content.block_write.flags == 0 } {
return Some(index);
}
index += 1;
if index >= max_index {
index = 0;
}
if index == sb.journal_position as Index {
// we've looped around to the beginning of the journal
return None;
}
}
}
/// Returns the index of an indirectly indexed datablock, or 0 if it does not exist.
pub fn get_indirect_datablock(sb: &Superblock, bd: &mut dyn BlockDevice, dbas: [Index; 12], address: Index) -> Index {
if address < 12 {
dbas[address as usize]
} else {
let n = address - 12;
let mut blocks_left = n / (sb.block_size as u64 - 8);
let mut indexes_left = n % (sb.block_size as u64 - 8);
let mut current_block = dbas[9]; // first indirect block
let mut visited = vec![];
loop {
if visited.contains(&current_block) {
return 0;
}
visited.push(current_block);
let mut head = 0;
let buf = read_datablock(current_block, sb, bd);
let count = u64::from_be_bytes(buf[0..8].try_into().unwrap());
head += 8;
// if blocks_left == 0, we're in the right block (either that or we're in the right block to recurse downwards into)
if blocks_left == 0 {
for _ in 0..count.min(indexes_left) {
let isdata_depth = &buf[head..head + 8];
head += 8;
let ptr_data = &buf[head..head + 8];
let ptr = u64::from_be_bytes(ptr_data.try_into().unwrap());
let is_data = isdata_depth[1] != 0;
// if not data, we need to recurse
if !is_data {
current_block = ptr;
break; // skip the rest of the loop
} else {
// if indexes_left == 0, we found the correct index
if indexes_left == 0 {
return ptr;
} else {
indexes_left -= 1;
}
}
}
} else {
for _ in 0..count {
let isdata_depth = &buf[head..head + 8];
head += 8;
let ptr_data = &buf[head..head + 8];
let ptr = u64::from_be_bytes(ptr_data.try_into().unwrap());
let is_data = isdata_depth[1] != 0;
let mut depth = isdata_depth.to_vec();
depth[0] = 0;
let depth = u64::from_be_bytes(depth.try_into().unwrap());
// if blocks_left is less than the depth, we are at the correct block
if !is_data {
if blocks_left < depth {
// if not data, we need to recurse
blocks_left = 0;
current_block = ptr;
break; // skip the rest of the loop
} else {
blocks_left -= depth;
}
} else {
// if indexes_left == 0, we found the correct index
if indexes_left == 0 {
return ptr;
} else {
indexes_left -= 1;
}
}
}
}
}
}
}
/// Creates a journal entry for a single block write operation.
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
/// Returns None if the journal is full, or if the block device cannot be written to.
/// Returns the journal entry index if successful.
pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, target_type: JBRTargetType, otherwise_datablock_index: Option<Index>, data: &[u8]) -> Option<Index> {
let entry_index = next_journal_position(sb, bd)?;
let entry_content = JournalBlockWrite {
flags: 0,
target_type: target_type as u32,
target_inode: containing_inode_index,
target_block: otherwise_datablock_index.unwrap_or(0),
real_target_block: 0, // filled in once flushed
source_block: 0, // filled in once allocated
source_block_crc32: 0, // filled in once allocated
};
let mut entry = JournalEntry {
operation: JournalOperation::SingleBlockWrite as u32,
zeroed_content_crc32: 0,
content: JournalEntryContents {
block_write: entry_content,
},
};
// write the journal entry
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// find a free data block
let data_block_index = find_first_unallocated_datablock(sb, bd)?;
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.block_write.source_block = data_block_index;
entry.content.block_write.flags = JBRFlags::Chosen as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// allocate the data block
if !unsafe { set_datablock_allocation_status(data_block_index, sb, bd, true) } {
return None;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.block_write.flags = JBRFlags::Allocated as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// write the data to the data block
if !unsafe { write_datablock(data_block_index, sb, bd, data) } {
return None;
}
let written_data = read_datablock(data_block_index, sb, bd);
// set the crc32 and stored flag
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.block_write.source_block_crc32 = crc32::crc32(&written_data);
entry.content.block_write.flags = JBRFlags::Stored as u32;
// generate crc32 of the entry
let mut buf: [u8; core::mem::size_of::<JournalEntryContents>()] = [0; core::mem::size_of::<JournalEntryContents>()];
let mut clone = entry.content;
clone.block_write.flags = 0;
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, clone); }
entry.zeroed_content_crc32 = crc32::crc32(&buf);
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// all further steps will be performed on a journal flush
Some(entry_index)
}
/// Creates a journal entry for a multi block write operation.
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
/// Returns None if the journal is full, or if the block device cannot be written to.
/// Returns the journal entry index if successful.
pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, datablock_start: Index, datablock_count: Index, data: &[u8]) -> Option<Index> {
let entry_index = next_journal_position(sb, bd)?;
let entry_content = JournalMultiblockWrite {
flags: 0,
target_inode: containing_inode_index,
target_block: datablock_start,
target_block_count: datablock_count,
list_block: 0,
old_list_block: 0, // filled in once flushed
list_block_crc32: 0,
};
let mut entry = JournalEntry {
operation: JournalOperation::MultiblockWrite as u32,
zeroed_content_crc32: 0,
content: JournalEntryContents {
multiblock_write: entry_content,
},
};
// write the journal entry
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// find a free data block for the list block
let list_block_indexs = find_count_unallocated_datablocks(sb, bd, 2)?;
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.list_block = list_block_indexs[0];
entry.content.multiblock_write.old_list_block = list_block_indexs[1];
entry.content.multiblock_write.flags = JMWFlags::ChosenList as u32;
// calculate the crc32
let mut content_cloned = entry.content;
content_cloned.multiblock_write.flags = 0;
let mut buf: [u8; core::mem::size_of::<JournalEntryContents>()] = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_cloned); }
entry.zeroed_content_crc32 = crc32::crc32(&buf);
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// allocate the data block
if !unsafe { set_datablock_allocation_status(list_block_indexs[0], sb, bd, true) } {
return None;
}
if !unsafe { set_datablock_allocation_status(list_block_indexs[1], sb, bd, true) } {
return None;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::AllocatedList as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// find the data blocks
let allocated_blocks = find_count_unallocated_datablocks(sb, bd, datablock_count as usize)?;
// create a list block
let mut list_block = ListBlock {
using_indirect_blocks: datablock_count > 12,
direct_block_addresses: [0; 12],
};
let mut old_list_block = ListBlock {
using_indirect_blocks: false,
direct_block_addresses: [0; 12],
};
let mut indirect_blocks_waiting_for_allocation_to_be_set = Vec::new();
// if using indirect blocks, only fill out the first (12 - 3) = 9 entries
// otherwise, fill out all 12 entries
if list_block.using_indirect_blocks {
list_block.direct_block_addresses[..9].copy_from_slice(&allocated_blocks[..9]);
// if using indirect blocks, fit the remaining entries into the indirect blocks
// layout is u64 count followed by u64 addresses
let max_addresses_per_block = (sb.block_size as usize - core::mem::size_of::<u64>()) / (core::mem::size_of::<u64>() * 2);
let mut indirect_block_count = (datablock_count - 9) / max_addresses_per_block as u64;
// if the count is not a multiple of the max addresses per block, add one
if (datablock_count - 9) % max_addresses_per_block as u64 != 0 {
indirect_block_count += 1;
}
// if the count is over 3, we'll need to use nested indirect blocks
// calculate how many layers of indirect blocks we'll need,
// filling max_addresses per block until we have less than max_addresses_per_block left
// this will be the amount of layers required to store the data
let depth = {
let mut depth = 0;
let mut remaining = indirect_block_count;
while remaining > max_addresses_per_block as u64 {
remaining -= max_addresses_per_block as u64;
depth += 1;
}
depth
};
// allocate the indirect blocks
let indirect_blocks = find_count_unallocated_datablocks(sb, bd, indirect_block_count as usize + (depth * max_addresses_per_block))?;
for i in 0..(indirect_block_count as usize + (depth * max_addresses_per_block)) {
list_block.direct_block_addresses[9 + i] = indirect_blocks[i];
indirect_blocks_waiting_for_allocation_to_be_set.push(indirect_blocks[i]);
}
// write the indirect blocks
let mut indirect_block_data = vec![0; core::mem::size_of::<u64>() * max_addresses_per_block];
let mut indirect_blocks_from_previous_layer = VecDeque::new();
let mut indirect_blocks_from_previous_layer_alt = VecDeque::new();
let mut using_alt = false;
let mut acc: VecDeque<u64> = VecDeque::new(); // how much each previous layer has had
let mut acc_alt: VecDeque<u64> = VecDeque::new(); // how much each previous layer has had
for i in 0..(indirect_block_count as usize + (depth * max_addresses_per_block)) {
// we will write the indirect blocks that contain the data blocks first
// then we will write the indirect blocks that contain the indirect blocks
// are we writing the indirect blocks that contain the data blocks?
let writing_data_blocks = i < indirect_block_count as usize;
if writing_data_blocks {
let count = if i == (indirect_block_count - 1) as usize { // if we're at the last block, not all of the addresses will be used
(datablock_count - 9) % max_addresses_per_block as u64
} else {
max_addresses_per_block as u64 // otherwise, all of the addresses will be used
};
// add count
unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count); }
// add addresses
for j in 0..count {
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + j as usize), allocated_blocks[9 + i * max_addresses_per_block + j as usize]); }
}
// write the indirect block
if !unsafe { write_datablock(indirect_blocks[i], sb, bd, &indirect_block_data) } {
return None;
}
indirect_blocks_from_previous_layer.push_back(indirect_blocks[i]);
acc.push_back(count);
} else {
// we're writing the indirect blocks that contain the indirect blocks
if !using_alt {
// write addresses from front of indirect_blocks_from_previous_layer
let count = if indirect_blocks_from_previous_layer.len() > max_addresses_per_block - 8 {
max_addresses_per_block - 8
} else {
indirect_blocks_from_previous_layer.len()
};
// add count
unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count as u64); }
// add addresses
for j in 0..count {
// get acc value
let acc_val = acc.pop_front().unwrap_or(0);
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + (j * 16)), acc_val); }
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + 8 + (j * 16)), indirect_blocks_from_previous_layer.pop_front().unwrap_or(0)); }
}
// write the indirect block
if !unsafe { write_datablock(indirect_blocks[i], sb, bd, &indirect_block_data) } {
return None;
}
// add the indirect block to the back of indirect_blocks_from_previous_layer_alt
indirect_blocks_from_previous_layer_alt.push_back(indirect_blocks[i]);
acc_alt.push_back(count as u64);
// if indirect_blocks_from_previous_layer is empty, switch to using_alt
if indirect_blocks_from_previous_layer.is_empty() {
using_alt = true;
}
} else {
// write addresses from front of indirect_blocks_from_previous_layer_alt
let count = if indirect_blocks_from_previous_layer_alt.len() > max_addresses_per_block - 8 {
max_addresses_per_block - 8
} else {
indirect_blocks_from_previous_layer_alt.len()
};
// add count
unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count as u64); }
// add addresses
for j in 0..count {
// get acc value
let acc_val = acc_alt.pop_front().unwrap_or(0);
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + (j * 16)), acc_val); }
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + 8 + (j * 16)), indirect_blocks_from_previous_layer_alt.pop_front().unwrap_or(0)); }
}
// write the indirect block
if !unsafe { write_datablock(indirect_blocks[i], sb, bd, &indirect_block_data) } {
return None;
}
// add the indirect block to the back of indirect_blocks_from_previous_layer
indirect_blocks_from_previous_layer.push_back(indirect_blocks[i]);
acc.push_back(count as u64);
// if indirect_blocks_from_previous_layer_alt is empty, switch to using_alt
if indirect_blocks_from_previous_layer_alt.is_empty() {
using_alt = false;
}
}
}
}
} else {
list_block.direct_block_addresses[..12].copy_from_slice(&allocated_blocks[..12]);
}
// read target inode, and write the old list block
let target_inode = read_inode(containing_inode_index, sb, bd)?;
old_list_block.using_indirect_blocks = target_inode.flags & InodeFlags::INDIRECT as u32 != 0;
old_list_block.direct_block_addresses = target_inode.direct_block_addresses;
// write the list blocks
let buf = [0; core::mem::size_of::<ListBlock>()];
unsafe { core::ptr::write(buf.as_ptr() as *mut ListBlock, list_block); }
if !unsafe { write_datablock(list_block_indexs[0], sb, bd, &buf) } {
return None;
}
unsafe { core::ptr::write(buf.as_ptr() as *mut ListBlock, old_list_block); }
if !unsafe { write_datablock(list_block_indexs[1], sb, bd, &buf) } {
return None;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::ChosenData as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// if we're using indirect blocks, set the allocation status of the indirect blocks
for block in indirect_blocks_waiting_for_allocation_to_be_set {
if !unsafe { set_datablock_allocation_status(block, sb, bd, true) } {
return None;
}
}
// set the allocation status of the data blocks
for block in &allocated_blocks {
if !unsafe { set_datablock_allocation_status(*block, sb, bd, true) } {
return None;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::AllocatedData as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// store the data in the data blocks
for i in 0..datablock_count {
if !unsafe { write_datablock(allocated_blocks[i as usize], sb, bd, &data[i as usize * sb.block_size as usize..(i as usize + 1) * sb.block_size as usize]) } {
return None;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::Stored as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// return the journal entry index
Some(entry_index)
}
/// Checks the integrity of a single block write journal entry
/// Returns true if the journal entry is valid, false otherwise
pub fn verify_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, journal_entry: &JournalEntry) -> bool {
if journal_entry.operation != JournalOperation::SingleBlockWrite as u32 {
return false;
}
let content = unsafe { journal_entry.content.block_write };
if content.flags > 4 {
return false;
}
let mut content_clone = journal_entry.content;
content_clone.block_write.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
let hash = crc32::crc32(&buf);
if hash != journal_entry.zeroed_content_crc32 {
return false;
}
// check the source data block
let buf = read_datablock(content.source_block, sb, bd);
let crc32 = crc32::crc32(&buf);
if crc32 != content.source_block_crc32 {
return false;
}
// should be all good! (:
true
}
/// Flushes a single block write journal entry
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes
/// or if the journal entry is corrupt
/// Returns false if the journal entry is corrupt, the block device is full, or if the block device is read only
/// Otherwise, returns true
pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool {
// read the journal entry
let journal_entry = read_journal_entry(entry_index, sb, bd);
if journal_entry.is_none() {
return false;
}
let mut journal_entry = journal_entry.unwrap();
// verify the journal entry
if !verify_single_block_write(sb, bd, &journal_entry) {
return false;
}
// because everything is verified, we should be able to execute steps 6 through 9 and
// not have to worry about crashes; since the journal entry is good, we can repeat these steps
// until they succeed
let content = unsafe { journal_entry.content.block_write };
if content.flags < 3 && content.flags > 0 {
// source block wasn't written, this entry is corrupt
// set the flags to 0 so that we don't try to flush this entry again
journal_entry.content.block_write.flags = 0;
unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) };
return true;
}
let content = unsafe { journal_entry.content.block_write };
// if flag is 0, we don't need to do anything; return
// we will check again later to see if the flags have changed from our work
if content.flags == 0 {
return true;
}
// if flag is 3, either update inode metadata or copy the data to the destination block
if content.flags == 3 {
if content.target_type == JBRTargetType::Inode as u32 {
// copy the data directly to the target inode's block
let buf = read_datablock(content.source_block, sb, bd);
let mut inode_buf: [u8; core::mem::size_of::<Inode>()] = [0; core::mem::size_of::<Inode>()];
inode_buf[0..core::mem::size_of::<Inode>()].clone_from_slice(&buf[0..core::mem::size_of::<Inode>()]);
let inode = unsafe { core::ptr::read(inode_buf.as_ptr() as *const Inode) };
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
} else if content.target_type == JBRTargetType::DataBlock as u32 {
// update inode metadata
let inode = read_inode(content.target_inode, sb, bd);
if inode.is_none() {
return false;
}
let mut inode = inode.unwrap();
// target block is either an index into the direct blocks or an indirect block (if greater than 11)
if content.target_block < 12 {
let previous_block = inode.direct_block_addresses[content.target_block as usize];
// update the journal entry
journal_entry.content.block_write.real_target_block = previous_block;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
inode.direct_block_addresses[content.target_block as usize] = content.source_block;
// update the inode
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
} else {
if inode.flags & InodeFlags::INDIRECT as u32 == 0 {
// inode doesn't have indirect blocks, this entry is corrupt
return false;
}
// figure out which indirect block we need to write to (either 1, 2, or 3)
// range 12..(12*2) is indirect block 1
// range (12*2)..(12*3) is indirect block 2
// range (12*3)..(12*4) is indirect block 3
let indirect_block_index = (content.target_block - 12) / 12;
let indirect_block_offset = (content.target_block - 12) % 12;
let indirect_block = inode.direct_block_addresses[indirect_block_index as usize];
let mut indirect_block_buf = read_datablock(indirect_block, sb, bd);
// get the count
let mut count = u64::from_be_bytes(indirect_block_buf.as_slice()[0..8].try_into().unwrap());
// place the source block at index (indirect_block_offset * 8) + 8
let target_index = (indirect_block_offset * 8) + 8;
// if there's already a block at the target index, we need to update the journal entry
if indirect_block_offset < count {
// update the journal entry
journal_entry.content.block_write.real_target_block = u64::from_be_bytes(indirect_block_buf.as_slice()[target_index as usize..(target_index + 8) as usize].try_into().unwrap());
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
}
indirect_block_buf.as_mut_slice()[target_index as usize..(target_index + 8) as usize].clone_from_slice(&content.source_block.to_be_bytes());
// update the count
if count < indirect_block_offset + 1 {
count = indirect_block_offset + 1;
}
indirect_block_buf.as_mut_slice()[0..8].clone_from_slice(&count.to_be_bytes());
// write the indirect block back to the block device
if !unsafe { write_datablock(indirect_block, sb, bd, &indirect_block_buf) } {
return false;
}
}
} else if content.target_type == JBRTargetType::Disk as u32 {
// copy the data directly to the offset on the disk
let buf = read_datablock(content.source_block, sb, bd);
bd.seek(content.target_block * sb.block_size as u64);
bd.write_blocks(&buf);
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.block_write.flags = JMWFlags::Written as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
}
let content = unsafe { journal_entry.content.block_write };
// if flag is 4, deallocate the source block
if content.flags == 4 {
if content.target_type == JBRTargetType::Inode as u32 {
let block_to_deallocate = content.source_block; // data was copied
if !unsafe { set_datablock_allocation_status(block_to_deallocate, sb, bd, false) } {
return false;
}
} else {
let block_to_deallocate = content.real_target_block; // data was moved, this should contain the old block
if !unsafe { set_datablock_allocation_status(block_to_deallocate, sb, bd, false) } {
return false;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.block_write.flags = JMWFlags::CompleteAndDeallocated as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
}
let content = unsafe { journal_entry.content.block_write };
// if flag is 0, move the journal head to the next entry
if content.flags == 0 {
// superblock may have changed, read it again
let sb = get_superblock(bd);
if sb.is_none() {
return false;
}
let sb = sb.as_ref().unwrap();
let head = sb.journal_position;
let mut next = head + 1;
let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64) as u32;
if next >= max_index {
next = 0;
}
// write superblock
let mut sb = *sb;
sb.journal_position = next;
if !unsafe { write_superblock(sb, bd) } {
return false;
}
}
true
}
/// Checks the integrity of a multi block write journal entry
/// Returns true if the journal entry is valid, false otherwise
pub fn verify_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, journal_entry: &JournalEntry) -> bool {
if journal_entry.operation != JournalOperation::MultiblockWrite as u32 {
return false;
}
let content = unsafe { journal_entry.content.multiblock_write };
if content.flags > 6 {
return false;
}
let mut content_clone = journal_entry.content;
content_clone.multiblock_write.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
let hash = crc32::crc32(&buf);
if hash != journal_entry.zeroed_content_crc32 {
return false;
}
// check the source data block
let buf = read_datablock(content.list_block, sb, bd);
let crc32 = crc32::crc32(&buf);
if crc32 != content.list_block_crc32 {
return false;
}
// should be all good! (:
true
}
/// Flushes a multi block write journal entry
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes
/// or if the journal entry is corrupt
/// Returns false if the journal entry is corrupt, the block device is full, or if the block device is read only
/// Otherwise, returns true
pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool {
// read the journal entry
let journal_entry = read_journal_entry(entry_index, sb, bd);
if journal_entry.is_none() {
return false;
}
let mut journal_entry = journal_entry.unwrap();
// verify the journal entry
if !verify_multi_block_write(sb, bd, &journal_entry) {
return false;
}
// because everything is verified, we should be able to execute steps 8 through 11 and
// not have to worry about crashes; since the journal entry is good, we can repeat these steps
// until they succeed
let content = unsafe { journal_entry.content.multiblock_write };
if content.flags < 5 && content.flags > 0 {
// source block wasn't written, this entry is corrupt
// set the flags to 0 so that we don't try to flush this entry again
journal_entry.content.multiblock_write.flags = 0;
unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) };
return true;
}
let content = unsafe { journal_entry.content.multiblock_write };
// if flag is 0, we don't need to do anything; return
// we will check again later to see if the flags have changed from our work
if content.flags == 0 {
return true;
}
// if flag is 5, copy current data to old list block and then overwrite with new data
if content.flags == 5 {
let inode = read_inode(content.target_inode, sb, bd);
if inode.is_none() {
return false;
}
let inode = inode.unwrap();
// get dbas of new list block
let buf = read_datablock(content.list_block, sb, bd);
let list_block = unsafe { core::ptr::read(buf.as_ptr() as *const ListBlock) };
let dba = list_block.direct_block_addresses;
// update inode
let mut inode = inode;
inode.direct_block_addresses = dba;
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.multiblock_write.flags = JMWFlags::Written as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
}
let content = unsafe { journal_entry.content.multiblock_write };
// if flag is 6, we will deallocate all blocks in the old list block (using stupid method so that it's faster)
if content.flags == 6 {
let mut unused_datablocks: Vec<Index> = Vec::new();
let list_block = read_datablock(content.list_block, sb, bd);
let list_block = unsafe { core::ptr::read(list_block.as_ptr() as *const ListBlock) };
let old_list_block = read_datablock(content.old_list_block, sb, bd);
let old_list_block = unsafe { core::ptr::read(old_list_block.as_ptr() as *const ListBlock) };
for i in 0..9 {
if old_list_block.direct_block_addresses[i] != 0 {
unused_datablocks.push(old_list_block.direct_block_addresses[i]);
}
}
for x in 0..3 {
if !old_list_block.using_indirect_blocks {
unused_datablocks.push(old_list_block.direct_block_addresses[x + 9]);
} else {
// read indirect block
let mut deallocation_queue: Vec<Index> = Vec::new();
let mut buf = vec![];
let mut ptr = old_list_block.direct_block_addresses[x + 9];
deallocation_queue.push(ptr);
while !deallocation_queue.is_empty() {
// read indirect block
buf = read_datablock(ptr, sb, bd);
let mut head = 0;
let count = u64::from_be_bytes(buf[head..head + 8].try_into().unwrap()) as usize;
head += 8;
for i in 0..count {
let is_data: bool = buf[head] != 0;
let mut depth_no_data = buf[head..head + 8].to_vec();
depth_no_data[0] = 0;
let depth = u64::from_be_bytes(depth_no_data.try_into().unwrap()) as usize;
head += 8;
let new_ptr = u64::from_be_bytes(buf[head..head + 8].try_into().unwrap());
if !is_data {
deallocation_queue.push(new_ptr);
} else {
unused_datablocks.push(new_ptr);
}
}
// deallocate this block
unused_datablocks.push(ptr);
// get next block
if let Some(next_ptr) = deallocation_queue.pop() {
ptr = next_ptr;
} else {
break;
}
}
}
}
// deallocate unused blocks
for dba in unused_datablocks {
if !unsafe { set_datablock_allocation_status(dba, sb, bd, false) } {
return false;
}
}
// deallocate old list block
if !unsafe { set_datablock_allocation_status(content.old_list_block, sb, bd, false) } {
return false;
}
// deallocate list block
if !unsafe { set_datablock_allocation_status(content.list_block, sb, bd, false) } {
return false;
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.multiblock_write.flags = JMWFlags::CompleteAndDeallocated as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
}
let content = unsafe { journal_entry.content.multiblock_write };
// if flag is 0, move the journal head to the next entry
if content.flags == 0 {
let head = sb.journal_position;
let mut next = head + 1;
let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64) as u32;
if next >= max_index {
next = 0;
}
// write superblock
let mut sb = *sb;
sb.journal_position = next;
if !unsafe { write_superblock(sb, bd) } {
return false;
}
}
true
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum JournaledWriteResult {
Success,
OutOfDiskSpace,
UnderlyingBlockDeviceError,
PotentialFilesystemCorruption,
}
/// flushes all pending journal entries until the index is reached
/// if plus_one is true, then the index is inclusive, otherwise it is exclusive
/// returns true if the index was reached, false if the index was not reached
pub fn flush_count_entries(sb: &Superblock, bd: &mut dyn BlockDevice, mut to: Index, plus_one: bool) -> bool {
let mut sb = *sb;
let mut head = sb.journal_position as Index;
let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64) as Index;
if head >= max_index {
head = 0;
}
if plus_one {
to += 1;
}
if to >= max_index {
if plus_one {
while to >= max_index {
to -= max_index;
}
} else {
return false; // please no infinite loops (:
}
}
while head != to {
let entry = read_journal_entry(head, &sb, bd);
if entry.is_none() {
head += 1;
if head >= max_index {
head = 0;
}
continue;
}
let entry = entry.unwrap();
const SINGLE_BLOCK_WRITE: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTI_BLOCK_WRITE: u32 = JournalOperation::MultiblockWrite as u32;
match entry.operation {
SINGLE_BLOCK_WRITE => {
flush_single_block_write(&sb, bd, head);
}
MULTI_BLOCK_WRITE => {
flush_multi_block_write(&sb, bd, head);
}
_ => {}
}
// reread superblock
let sb_opt = get_superblock(bd);
if sb_opt.is_none() {
return false;
}
sb = sb_opt.unwrap();
head += 1;
if head >= max_index {
head = 0;
}
}
true
}
/// attempts to figure out why we couldn't create a journal entry, and returns success if it was able to resolve the issue
pub fn why_cant_make_journal_entry(sb: &Superblock, bd: &mut dyn BlockDevice) -> JournaledWriteResult {
if find_first_unallocated_datablock(sb, bd).is_none() {
return JournaledWriteResult::OutOfDiskSpace;
} else {
// the journal is probably full, flush the current entry
let current_entry = read_journal_entry(sb.journal_position as Index, sb, bd);
if current_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
let current_entry = current_entry.unwrap();
const SINGLE_BLOCK_WRITE: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTI_BLOCK_WRITE: u32 = JournalOperation::MultiblockWrite as u32;
match current_entry.operation {
SINGLE_BLOCK_WRITE => {
if !flush_single_block_write(sb, bd, sb.journal_position as Index) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
MULTI_BLOCK_WRITE => {
if !flush_multi_block_write(sb, bd, sb.journal_position as Index) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
_ => {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
}
JournaledWriteResult::Success
}
/// "safely" overwrites the contents of the superblock with the given superblock struct
/// # Safety
/// this function is unsafe because it writes to the superblock, which is a critical part of the filesystem
/// the writes will be journaled, but if the superblock becomes corrupted then that will not matter
pub unsafe fn journaled_write_superblock(current_superblock: &Superblock, bd: &mut dyn BlockDevice, new_superblock: Superblock, flush_immediately: bool) -> JournaledWriteResult {
// convert superblock to buffer
let buf = [0u8; core::mem::size_of::<Superblock>()];
unsafe { core::ptr::write(buf.as_ptr() as *mut Superblock, new_superblock) };
// create journal entry
let mut journal_entry = schedule_single_block_write(
current_superblock, bd, 0,
JBRTargetType::Disk, Some(1024),
&buf,
);
// if none...
if journal_entry.is_none() {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(current_superblock, bd);
if why != JournaledWriteResult::Success {
return why;
}
// try again
journal_entry = schedule_single_block_write(
current_superblock, bd, 0,
JBRTargetType::Disk, Some(1024),
&buf,
);
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(current_superblock, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
JournaledWriteResult::Success
}
/// overwrites the contents of an inode with the given inode struct
/// if you want to update the contents of an inode, this is the function you want
pub fn journaled_write_inode(sb: &Superblock, bd: &mut dyn BlockDevice, old_inode: Index, new_inode: Inode, flush_immediately: bool) -> JournaledWriteResult {
// convert inode to buffer
let buf = [0u8; core::mem::size_of::<Inode>()];
unsafe { core::ptr::write(buf.as_ptr() as *mut Inode, new_inode) };
// create journal entry
let mut journal_entry = schedule_single_block_write(
sb, bd, old_inode,
JBRTargetType::Inode, None,
&buf,
);
// if none...
if journal_entry.is_none() {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(sb, bd);
if why != JournaledWriteResult::Success {
return why;
}
// try again
journal_entry = schedule_single_block_write(
sb, bd, old_inode,
JBRTargetType::Inode, None,
&buf,
);
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(sb, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
JournaledWriteResult::Success
}
/// writes data blocks of an inode to the disk, uses single block writes
/// if you want to write data to the disk, this is likely the function you want
/// # Important Node
/// if data.len() is not a multiple of the block size, undefined behavior may occur
pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: Index, from_block: Index, data: &[u8], flush_immediately: bool) -> JournaledWriteResult {
// create journal entry
let mut journal_entries = {
let mut journal_entries = Vec::new();
for i in 0..(data.len() / sb.block_size as usize) {
journal_entries.push(schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
));
}
journal_entries
};
while let Some(mut journal_entry) = journal_entries.pop() {
// if none...
if journal_entry.is_none() {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(sb, bd);
if why != JournaledWriteResult::Success {
return why;
}
// try again
journal_entry = if data.len() > sb.block_size as _ {
schedule_multi_block_write(
sb, bd, inode,
from_block, (data.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
data,
)
} else {
schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
)
};
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(sb, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
}
JournaledWriteResult::Success
}