refactoring to improve listblock funcs

This commit is contained in:
husky 2023-10-19 22:35:44 -07:00
parent 53edeea2e4
commit a4b9a162f9
No known key found for this signature in database
GPG key ID: 6B3D8CB511646891
5 changed files with 1253 additions and 42 deletions

View file

@ -1 +0,0 @@
pub mod file_create;

View file

@ -5,8 +5,8 @@ extern crate alloc;
use alloc::collections::VecDeque;
use alloc::vec;
use alloc::vec::Vec;
use vapfs::{BlockDevice, Index};
use crate::structs::{Inode, InodeFlags, JBRFlags, JBRTargetType, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock};
use vapfs::{BlockDevice, Index, Timestamp};
use crate::structs::{Inode, InodeFlags, JBRFlags, JBRTargetType, JCAFlags, JDASFlags, JIEFlags, JMWFlags, JournalBlockWrite, JournalCountAssertion, JournalDataAllocationSet, JournalEntry, JournalEntryContents, JournalInodeExpansion, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock};
pub mod btree;
pub mod structs;
@ -14,7 +14,6 @@ pub mod bitmap;
pub mod crc32;
pub mod safe;
pub mod listblock;
pub mod journal;
/// Reads the superblock (located at byte offset 1024 of the block device) and returns it.
/// Returns None if the block device is too small to contain a superblock.
@ -240,7 +239,7 @@ pub unsafe fn set_inode_allocation_status(index: Index, sb: &Superblock, bd: &mu
/// Reads a journal entry by index
pub fn read_journal_entry(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<JournalEntry> {
let mut buf: [u8; core::mem::size_of::<JournalEntry>()] = [0; core::mem::size_of::<JournalEntry>()];
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * core::mem::size_of::<JournalEntry>() as u64));
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * sb.block_size as u64));
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<JournalEntry>() {
return None;
@ -257,7 +256,7 @@ pub unsafe fn write_journal_entry(index: Index, sb: &Superblock, bd: &mut dyn Bl
entry.convert_native_to_big_endian();
let mut buf: [u8; core::mem::size_of::<JournalEntry>()] = [0; core::mem::size_of::<JournalEntry>()];
core::ptr::write(buf.as_mut_ptr() as *mut JournalEntry, entry);
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * core::mem::size_of::<JournalEntry>() as u64));
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * sb.block_size as u64));
let write_count = bd.write_blocks(&buf);
write_count == core::mem::size_of::<JournalEntry>()
}
@ -510,8 +509,10 @@ pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, co
entry.content.block_write.flags = JBRFlags::Stored as u32;
// generate crc32 of the entry
let mut buf: [u8; core::mem::size_of::<JournalEntryContents>()] = [0; core::mem::size_of::<JournalEntryContents>()];
entry.convert_native_to_big_endian();
let mut clone = entry.content;
clone.block_write.flags = 0;
entry.convert_big_endian_to_native();
clone.block_write.flags = 0; // safe because 0 == 0 no matter what endianness
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, clone); }
entry.zeroed_content_crc32 = crc32::crc32(&buf);
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
@ -526,7 +527,7 @@ pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, co
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
/// Returns None if the journal is full, if the block device cannot be written to, or if you're somehow trying to write over 2105000 terabytes of data.
/// Returns the journal entry index if successful.
pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, datablock_start: Index, datablock_count: Index, data: &[u8]) -> Option<Index> {
pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, datablock_start: Index, datablock_count: Index, data: &[u8], instead_use_old_listblock: Option<ListBlock>, add_blocks: Option<Index>) -> Option<Index> {
let entry_index = next_journal_position(sb, bd)?;
let entry_content = JournalMultiblockWrite {
flags: 0,
@ -536,6 +537,8 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
list_block: 0,
old_list_block: 0, // filled in once flushed
list_block_crc32: 0,
keep_old_list_block: if instead_use_old_listblock.is_some() { 1 } else { 0 },
extra_data_blocks: add_blocks.unwrap_or(0),
};
let mut entry = JournalEntry {
@ -561,8 +564,10 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
entry.content.multiblock_write.flags = JMWFlags::ChosenList as u32;
// calculate the crc32
entry.convert_native_to_big_endian();
let mut content_cloned = entry.content;
content_cloned.multiblock_write.flags = 0;
entry.convert_big_endian_to_native();
content_cloned.multiblock_write.flags = 0; // safe because 0 == 0 no matter what endianness
let mut buf: [u8; core::mem::size_of::<JournalEntryContents>()] = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_cloned); }
entry.zeroed_content_crc32 = crc32::crc32(&buf);
@ -587,7 +592,7 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
}
// find the data blocks
let allocated_blocks = find_count_unallocated_datablocks(sb, bd, datablock_count as usize)?;
let mut allocated_blocks = vec![];
// create a list block
let mut list_block = ListBlock {
@ -601,6 +606,11 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
sextuple_indirect_block_address: [0; 32],
};
if let Some(old_list_block) = instead_use_old_listblock {
// copy the old list block
list_block = old_list_block;
}
let mut old_list_block = ListBlock {
count: 0,
direct_block_addresses: [0; 32],
@ -617,8 +627,6 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
// if using indirect blocks, only fill out the dba
// otherwise, have fun!
if datablock_count > 32 {
list_block.direct_block_addresses.copy_from_slice(&allocated_blocks[..32]);
list_block.count = 32;
// set the indirect blocks
let max_per_block = sb.block_size as u64 / 8;
@ -716,7 +724,14 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
}
// allocate the indirect blocks
let indirect_blocks = find_count_unallocated_datablocks(sb, bd, indirect_blocks_needed as usize)?;
let indirect_blocks = find_count_unallocated_datablocks(sb, bd, indirect_blocks_needed as usize + datablock_count as usize)?;
// give datablock_count to allocated_blocks
allocated_blocks.extend_from_slice(&indirect_blocks[indirect_blocks_needed as usize..]);
let indirect_blocks = &indirect_blocks[..indirect_blocks_needed as usize];
list_block.direct_block_addresses.copy_from_slice(&allocated_blocks[..32]);
list_block.count = allocated_blocks.len() as u64;
// fill with data
let mut i = 0;
@ -976,8 +991,11 @@ pub fn verify_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, jour
if content.flags > 4 {
return false;
}
let mut content_clone = journal_entry.content;
let mut entry = journal_entry.clone();
entry.convert_native_to_big_endian();
let mut content_clone = entry.content;
content_clone.block_write.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
let hash = crc32::crc32(&buf);
@ -1120,7 +1138,6 @@ pub fn flush_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry
if !unsafe { write_inode(content.target_inode, sb, bd, inode) } {
return false;
}
}
} else if content.target_type == JBRTargetType::Disk as u32 {
// copy the data directly to the offset on the disk
@ -1200,7 +1217,9 @@ pub fn verify_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, journ
if content.flags > 6 {
return false;
}
let mut content_clone = journal_entry.content;
let mut entry = journal_entry.clone();
entry.convert_native_to_big_endian();
let mut content_clone = entry.content;
content_clone.multiblock_write.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
@ -1613,6 +1632,324 @@ pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_
true
}
/// Schedules a count assertion journal entry
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes
/// Returns None if the journal is full, if the block device is full, or if you're trying to assert
/// a count greater than the max number of journal entries.
pub fn schedule_count_assertion(sb: &Superblock, bd: &mut dyn BlockDevice, count: u32) -> Option<Index> {
let entry_index = next_journal_position(sb, bd)?;
let entry_content = JournalCountAssertion {
flags: JCAFlags::Written as u32,
count,
};
let mut entry = JournalEntry {
operation: JournalOperation::CountAssertion as u32,
zeroed_content_crc32: 0,
content: JournalEntryContents {
count_assertion: entry_content,
},
};
// calculate the crc32 of the zeroed content
entry.convert_native_to_big_endian();
let mut content_clone = entry.content;
entry.convert_big_endian_to_native();
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
content_clone.count_assertion.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
let hash = crc32::crc32(&buf);
entry.zeroed_content_crc32 = hash;
// write the journal entry
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
Some(entry_index)
}
/// "flushes" a count assertion journal entry, i.e. checks if the following count journal entries
/// are valid
/// if they are, returns true and moves the head to the next entry
/// otherwise, returns false and moves the head past count journal entries.
/// if an error occurs, returns false and doesn't move the head. CHECK FOR THIS CASE!
pub fn flush_count_assertion(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool {
// read the journal entry
let journal_entry = read_journal_entry(entry_index, sb, bd);
if journal_entry.is_none() {
return false;
}
let mut journal_entry = journal_entry.unwrap();
// check the crc32
let mut entry = journal_entry.clone();
entry.convert_native_to_big_endian();
let mut content_clone = entry.content;
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
content_clone.count_assertion.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
let hash = crc32::crc32(&buf);
if hash != journal_entry.zeroed_content_crc32 {
return false;
}
// check the count
let content = unsafe { journal_entry.content.count_assertion };
if content.count as u64 > sb.journal_block_count {
return false;
}
let mut head = sb.journal_position as Index;
head += 1; // skip the count assertion entry
let max_index = sb.journal_block_count;
if head >= max_index {
head = 0;
}
let mut left = content.count as Index;
let mut fail = false;
// check the flags to see if it's marked Bad
if content.flags == JCAFlags::Bad as u32 {
fail = true;
}
let mut checked = vec![];
if !fail {
while left > 0 {
let entry = read_journal_entry(head, sb, bd);
if entry.is_none() {
fail = true;
break;
}
let entry = entry.unwrap();
checked.push(head);
const SINGLE_BLOCK_WRITE: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTI_BLOCK_WRITE: u32 = JournalOperation::MultiblockWrite as u32;
const COUNT_ASSERTION: u32 = JournalOperation::CountAssertion as u32;
match entry.operation {
SINGLE_BLOCK_WRITE => {
if !verify_single_block_write(sb, bd, &entry) {
fail = true;
break;
}
}
MULTI_BLOCK_WRITE => {
if !verify_multi_block_write(sb, bd, &entry) {
fail = true;
break;
}
}
COUNT_ASSERTION => {
fail = true; // count assertions can't be nested
break;
}
_ => {
fail = true;
break;
}
}
head += 1;
left -= 1;
if head >= max_index {
head = 0;
}
}
}
if fail {
for i in checked {
let entry = read_journal_entry(i, sb, bd).unwrap();
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.count_assertion.flags = 0; // being lazy, all flags areas should be at the same offset
if !unsafe { write_journal_entry(i, sb, bd, entry) } {
return false;
}
}
// update the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.count_assertion.flags = JCAFlags::Bad as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
// move the head past all the count assertion entries
let mut sb = *sb;
sb.journal_position = (entry_index + content.count as Index) as u32;
if sb.journal_position >= sb.journal_block_count as u32 {
sb.journal_position -= sb.journal_block_count as u32;
}
if !unsafe { write_superblock(sb, bd) } {
return false;
}
// mark the entry as complete
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.count_assertion.flags = JCAFlags::Complete as u32;
if !unsafe { write_journal_entry(entry_index, &sb, bd, journal_entry) } {
return false;
}
false
} else {
// update the journal entry
// (note: we do this before updating the superblock so that in the event of a crash,
// the filesystem will read this entry as complete and not try to flush it again,
// which could corrupt things if the head was moved before this entry was marked as complete)
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.count_assertion.flags = JCAFlags::Complete as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
// move the head to the next entry
let mut sb = *sb;
sb.journal_position = (entry_index + 1) as u32;
if sb.journal_position >= sb.journal_block_count as u32 {
sb.journal_position -= sb.journal_block_count as u32;
}
if !unsafe { write_superblock(sb, bd) } {
return false;
}
true
}
}
/// Schedules a data allocation set journal entry
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes
/// Returns None if the journal is full, if the block device is full, or if you specified more than 32 blocks
/// Otherwise, returns Some(Index) where Index is the index of the journal entry
pub fn schedule_data_allocation_set(sb: &Superblock, bd: &mut dyn BlockDevice, data_blocks: Vec<Index>, set_inodes: bool, set_to_allocated: bool) -> Option<Index> {
if data_blocks.len() > 32 {
return None;
}
let entry_index = next_journal_position(sb, bd)?;
let mut entry_content = JournalDataAllocationSet {
flags: JDASFlags::Chosen as u32,
set_inodes: set_inodes as u8,
count: data_blocks.len() as u8,
set_to_allocated: set_to_allocated as u8,
data_blocks: [0; 32],
};
for i in 0..data_blocks.len() {
entry_content.data_blocks[i] = data_blocks[i];
}
let mut entry = JournalEntry {
operation: JournalOperation::DataAllocationSet as u32,
zeroed_content_crc32: 0,
content: JournalEntryContents {
data_allocation_set: entry_content,
},
};
// calculate the crc32 of the zeroed content
entry.convert_native_to_big_endian();
let mut content_clone = entry.content;
entry.convert_big_endian_to_native();
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
content_clone.data_allocation_set.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
let hash = crc32::crc32(&buf);
entry.zeroed_content_crc32 = hash;
// write the journal entry
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// set the blocks
for i in 0..data_blocks.len() {
if set_inodes {
if !unsafe { set_inode_allocation_status(data_blocks[i], sb, bd, set_to_allocated) } {
return None;
}
} else if !unsafe { set_datablock_allocation_status(data_blocks[i], sb, bd, set_to_allocated) } {
return None;
}
}
// update the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.data_allocation_set.flags = JDASFlags::Set as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
Some(entry_index)
}
/// Verifies that a data allocation set journal entry is valid
/// Returns true if the entry is valid, false if it is not
pub fn verify_data_allocation_set(sb: &Superblock, bd: &mut dyn BlockDevice, entry: &JournalEntry) -> bool {
let content = unsafe { entry.content.data_allocation_set };
// check the crc32
let mut entry = entry.clone();
entry.convert_native_to_big_endian();
let mut content_clone = entry.content;
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
content_clone.data_allocation_set.flags = 0;
let mut buf = [0; core::mem::size_of::<JournalEntryContents>()];
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, content_clone); }
let hash = crc32::crc32(&buf);
if hash != entry.zeroed_content_crc32 {
return false;
}
// check the flags
if content.flags != JDASFlags::Set as u32 {
return false;
}
true
}
/// Flushes a data allocation set journal entry
/// Returns true if the entry was flushed successfully, false if an error occurred
/// head is not moved if an error occurs
pub fn flush_data_allocation_set(sb: &Superblock, bd: &mut dyn BlockDevice, entry_index: Index) -> bool {
// read the journal entry
let journal_entry = read_journal_entry(entry_index, sb, bd);
if journal_entry.is_none() {
return false;
}
let mut journal_entry = journal_entry.unwrap();
// check this entry
if !verify_data_allocation_set(sb, bd, &journal_entry) {
return false;
}
// update to complete
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry.content.data_allocation_set.flags = JDASFlags::Complete as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, journal_entry) } {
return false;
}
// move the head to the next entry
let mut sb = *sb;
sb.journal_position = (entry_index + 1) as u32;
if sb.journal_position >= sb.journal_block_count as u32 {
sb.journal_position -= sb.journal_block_count as u32;
}
if !unsafe { write_superblock(sb, bd) } {
return false;
}
true
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum JournaledWriteResult {
Success,
@ -1628,7 +1965,7 @@ pub fn flush_count_entries(sb: &Superblock, bd: &mut dyn BlockDevice, mut to: In
let mut sb = *sb;
let mut head = sb.journal_position as Index;
let max_index = ((sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64) as Index;
let max_index = sb.journal_block_count;
if head >= max_index {
head = 0;
}
@ -1658,8 +1995,18 @@ pub fn flush_count_entries(sb: &Superblock, bd: &mut dyn BlockDevice, mut to: In
}
let entry = entry.unwrap();
if unsafe { entry.content.count_assertion.flags } == JCAFlags::Complete as u32 {
head += 1;
if head >= max_index {
head = 0;
}
continue;
}
const SINGLE_BLOCK_WRITE: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTI_BLOCK_WRITE: u32 = JournalOperation::MultiblockWrite as u32;
const COUNT_ASSERTION: u32 = JournalOperation::CountAssertion as u32;
const DATA_ALLOCATION_SET: u32 = JournalOperation::DataAllocationSet as u32;
match entry.operation {
SINGLE_BLOCK_WRITE => {
flush_single_block_write(&sb, bd, head);
@ -1667,6 +2014,23 @@ pub fn flush_count_entries(sb: &Superblock, bd: &mut dyn BlockDevice, mut to: In
MULTI_BLOCK_WRITE => {
flush_multi_block_write(&sb, bd, head);
}
COUNT_ASSERTION => {
if !flush_count_assertion(&sb, bd, head) {
let sb_opt = get_superblock(bd);
if sb_opt.is_none() {
return false;
}
let sb_opt = sb_opt.unwrap();
if sb_opt.journal_position as Index == head {
// the entry itself was corrupt
// fixme: figure out what's best to do here
return false;
}
}
}
DATA_ALLOCATION_SET => {
flush_data_allocation_set(&sb, bd, head);
}
_ => {}
}
@ -1677,7 +2041,12 @@ pub fn flush_count_entries(sb: &Superblock, bd: &mut dyn BlockDevice, mut to: In
}
sb = sb_opt.unwrap();
head += 1;
if head == sb.journal_position as Index {
head += 1;
} else {
head = sb.journal_position as Index;
}
if head >= max_index {
head = 0;
}
@ -1687,7 +2056,7 @@ pub fn flush_count_entries(sb: &Superblock, bd: &mut dyn BlockDevice, mut to: In
}
/// attempts to figure out why we couldn't create a journal entry, and returns success if it was able to resolve the issue
pub fn why_cant_make_journal_entry(sb: &Superblock, bd: &mut dyn BlockDevice) -> JournaledWriteResult {
pub fn why_cant_make_journal_entry(sb: &mut Superblock, bd: &mut dyn BlockDevice) -> JournaledWriteResult {
if find_first_unallocated_datablock(sb, bd).is_none() {
return JournaledWriteResult::OutOfDiskSpace;
} else {
@ -1699,6 +2068,8 @@ pub fn why_cant_make_journal_entry(sb: &Superblock, bd: &mut dyn BlockDevice) ->
let current_entry = current_entry.unwrap();
const SINGLE_BLOCK_WRITE: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTI_BLOCK_WRITE: u32 = JournalOperation::MultiblockWrite as u32;
const COUNT_ASSERTION: u32 = JournalOperation::CountAssertion as u32;
const DATA_ALLOCATION_SET: u32 = JournalOperation::DataAllocationSet as u32;
match current_entry.operation {
SINGLE_BLOCK_WRITE => {
if !flush_single_block_write(sb, bd, sb.journal_position as Index) {
@ -1710,11 +2081,37 @@ pub fn why_cant_make_journal_entry(sb: &Superblock, bd: &mut dyn BlockDevice) ->
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
COUNT_ASSERTION => {
//if !flush_count_assertion(sb, bd, sb.journal_position as Index) {
// return JournaledWriteResult::PotentialFilesystemCorruption;
//}
// skip the count assertion entry to hopefully not crash
let mut sb = *sb;
sb.journal_position += 1;
if sb.journal_position >= sb.journal_block_count as u32 {
sb.journal_position -= sb.journal_block_count as u32;
}
if !unsafe { write_superblock(sb, bd) } {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
DATA_ALLOCATION_SET => {
if !flush_data_allocation_set(sb, bd, sb.journal_position as Index) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
_ => {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
}
let new_sb = get_superblock(bd);
if new_sb.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
*sb = new_sb.unwrap();
JournaledWriteResult::Success
}
@ -1814,7 +2211,8 @@ pub fn journaled_write_inode(sb: &Superblock, bd: &mut dyn BlockDevice, old_inod
/// if you want to write data to the disk, this is likely the function you want
/// # Important Node
/// if data.len() is not a multiple of the block size, undefined behavior may occur
pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: Index, from_block: Index, data: &[u8], flush_immediately: bool) -> JournaledWriteResult {
/// if raw is true, then this will write directly to disk and not to the data block area which may be unsafe!
pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: Index, from_block: Index, data: &[u8], flush_immediately: bool, raw: bool) -> JournaledWriteResult {
// create journal entry
let mut journal_entries = {
@ -1822,7 +2220,7 @@ pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: In
for i in 0..(data.len() / sb.block_size as usize) {
journal_entries.push(schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
if !raw { JBRTargetType::DataBlock } else { JBRTargetType::Disk }, Some(from_block),
data,
));
}
@ -1840,10 +2238,10 @@ pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: In
// try again
journal_entry = schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
);
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
);
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;

View file

@ -1,11 +1,11 @@
use alloc::vec;
use alloc::vec::Vec;
use vapfs::{BlockDevice, Index};
use crate::read_datablock;
use crate::{read_datablock, write_datablock};
use crate::structs::{ListBlock, Superblock};
pub struct ListblockIter<'a> {
listblock: &'a ListBlock,
listblock: &'a mut ListBlock,
index: usize,
buf1: (Index, Vec<u8>),
buf2: (Index, Vec<u8>),
@ -16,7 +16,7 @@ pub struct ListblockIter<'a> {
}
impl<'a> ListblockIter<'a> {
pub fn new(listblock: &'a ListBlock) -> Self {
pub fn new(listblock: &'a mut ListBlock) -> Self {
Self { listblock, index: 0, buf1: (0, vec![]), buf2: (0, vec![]), buf3: (0, vec![]), buf4: (0, vec![]), buf5: (0, vec![]), buf6: (0, vec![]) }
}
@ -264,4 +264,384 @@ impl<'a> ListblockIter<'a> {
}
}
}
/// returns the amount of extra datablocks needed to be able to append the given amount of datablocks
/// returns None if an error occurs
pub fn needed_to_append(&mut self, sb: &Superblock, bd: &mut dyn BlockDevice, want_to_append: u64) -> Option<Index> {
let max_per_block = sb.block_size as u64 / 8; // maximum amount of pointers we can store per datablock
let N = max_per_block * 32; // maximum amount of pointers we can store in the single indirect blocks section of a listblock
let N2 = N * N; // maximum amount of pointers we can store in the double indirect blocks section of a listblock
let N3 = N2 * N; // you get the idea
let N4 = N3 * N;
let N5 = N4 * N;
let N6 = N5 * N;
let mut indirect_blocks_needed = 0;
// if we can fit it in the direct blocks, no extra blocks needed
let dba_count = self.listblock.count + want_to_append; // how many blocks of data we will have after appending
if dba_count < 32 {
return Some(0);
} else if dba_count < N { // if we can fit it in the single indirect blocks
// yes we can
// how many indirect blocks do we need?
indirect_blocks_needed = (dba_count / max_per_block) + 1;
} else if dba_count < N2 {
// no, but we can fit it in the double indirect blocks
// first, fill up the single indirect blocks
indirect_blocks_needed = N / max_per_block;
let datablocks_left = dba_count - N;
// how many double indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N) + 1;
// how many single indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / max_per_block) + 1;
} else if dba_count < N3 {
// this fills up the single and double indirect blocks
indirect_blocks_needed = N / max_per_block; // 32 single indirect blocks
indirect_blocks_needed += N2 / N;
let datablocks_left = dba_count - N2;
// how many triple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N2) + 1;
// how many double indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N) + 1;
// how many single indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / max_per_block) + 1;
} else if dba_count < N4 {
// this fills up the single, double, and triple indirect blocks
indirect_blocks_needed = N / max_per_block;
indirect_blocks_needed += N2 / N;
indirect_blocks_needed += N3 / N2;
let datablocks_left = dba_count - N3;
// how many quadruple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N3) + 1;
// how many triple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N2) + 1;
// how many double indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N) + 1;
// how many single indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / max_per_block) + 1;
} else if dba_count < N5 {
// this fills up the single, double, triple, and quadruple indirect blocks
indirect_blocks_needed = N / max_per_block;
indirect_blocks_needed += N2 / N;
indirect_blocks_needed += N3 / N2;
indirect_blocks_needed += N4 / N3;
let datablocks_left = dba_count - N4;
// how many quintuple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N4) + 1;
// how many quadruple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N3) + 1;
// how many triple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N2) + 1;
// how many double indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N) + 1;
// how many single indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / max_per_block) + 1;
} else if dba_count < N6 {
// this fills up the single, double, triple, quadruple, and quintuple indirect blocks
indirect_blocks_needed = N / max_per_block;
indirect_blocks_needed += N2 / N;
indirect_blocks_needed += N3 / N2;
indirect_blocks_needed += N4 / N3;
indirect_blocks_needed += N5 / N4;
let datablocks_left = dba_count - N5;
// how many sextuple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N5) + 1;
// how many quintuple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N4) + 1;
// how many quadruple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N3) + 1;
// how many triple indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N2) + 1;
// how many double indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / N) + 1;
// how many single indirect blocks do we need?
indirect_blocks_needed += (datablocks_left / max_per_block) + 1;
} else {
// congratulations, you've attempted to write around 2105000 terabytes of data
return None;
}
if want_to_append != 0 {
// subtract the amount of indirect blocks we already have
indirect_blocks_needed -= self.needed_to_append(sb, bd, 0)?;
}
Some(indirect_blocks_needed)
}
/// appends the given datablocks to the list block, using the provided extra indirect blocks
/// this function is unsafe because it assumes that you've calculated the correct amount of indirect blocks needed
/// also, no journaling is done to the writes and it is assumed that you are working with scratch data and not live data
/// returns true on success, false on failure
pub unsafe fn append(&mut self, sb: &Superblock, bd: &mut dyn BlockDevice, to_append: &[Index], extra: &mut Vec<Index>) -> bool {
let mut i = 0;
let mut taken = 0;
let mut skip_data = self.listblock.count as u64;
let max_per_block = sb.block_size as u64 / 8; // maximum amount of pointers we can store per datablock
let N = max_per_block * 32; // maximum amount of pointers we can store in the single indirect blocks section of a listblock
let N2 = N * N; // maximum amount of pointers we can store in the double indirect blocks section of a listblock
let N3 = N2 * N; // you get the idea
let N4 = N3 * N;
let N5 = N4 * N;
let N6 = N5 * N;
// fill first 32 direct if possible
if skip_data < 32 {
for block in 0..32 {
if i >= to_append.len() as u64 {
break;
}
if skip_data > 0 {
skip_data -= 1;
continue;
}
self.listblock.direct_block_addresses[block as usize] = to_append[i as usize];
i += 1;
taken += 1;
}
}
fn fillwithdata_1(
sb: &Superblock, bd: &mut dyn BlockDevice, i: &mut u64, skip_data: &mut u64, data: &[Index],
max_per_block: u64, indirect_blocks: &mut Vec<Index>,
taken: &mut usize, dbas: &mut [Index], max: usize) -> bool {
for block1 in 0..max {
if *i >= data.len() as u64 {
break; // don't crash if the programmer fucked up
}
let mut list = vec![0u64; max_per_block as usize];
if *skip_data < max_per_block {
// we will write new data within this loop iteration, copy data from disk into
// list
let mut buf = read_datablock(dbas[block1], sb, bd);
for k in 0..*skip_data {
let mut addr: [u8; 8] = [0; 8];
addr.copy_from_slice(&buf[(k * 8) as usize..(k * 8 + 8) as usize]);
let addr = u64::from_be_bytes(addr);
list[k as usize] = addr;
}
}
let mut j = 0;
while j < max_per_block { // for each space in an indirect block
let index = *i % max_per_block;
if *i >= data.len() as u64 {
break;
}
if *skip_data > 0 {
*skip_data -= 1;
continue;
}
list[index as usize] = data[*i as usize].to_be();
*i += 1;
j += 1;
}
if *skip_data > 0 {
// no data should've been written, skip this block
continue;
}
let buf = list.iter().map(|x| x.to_be_bytes()).flatten().collect::<Vec<u8>>();
if !unsafe { write_datablock(indirect_blocks[*taken], sb, bd, &buf) } {
return false;
}
*taken += 1;
dbas[block1] = indirect_blocks[*taken - 1];
}
true
}
// single indirect blocks (should write N datablocks)
if !fillwithdata_1(sb, bd, &mut i, &mut skip_data, to_append, max_per_block,
extra, &mut taken,
&mut self.listblock.single_indirect_block_address, 32) {
return false;
}
// double indirect blocks (should write N^2 datablocks)
for block2 in 0..32 {
if i >= to_append.len() as u64 {
break;
}
let mut list = vec![0u64; max_per_block as usize];
//
if !fillwithdata_1(sb, bd, &mut i, &mut skip_data, to_append, max_per_block,
extra, &mut taken,
&mut list, max_per_block as usize) {
return false;
}
let buf = list.iter().map(|x| x.to_be_bytes()).flatten().collect::<Vec<u8>>();
if skip > 0 {
skip -= 1;
continue;
}
if !unsafe { write_datablock(extra[taken], sb, bd, &buf) } {
return false;
}
taken += 1;
self.listblock.double_indirect_block_address[block2 as usize] = extra[taken - 1];
}
// triple indirect blocks
fn fillwithdata_2(
sb: &Superblock, bd: &mut dyn BlockDevice, i: &mut u64, skip: &mut u64, data: &[Index],
block_size: usize, max_per_block: u64, indirect_blocks: &mut Vec<Index>,
taken: &mut usize, dbas: &mut [Index], max: usize) -> bool {
for block3 in 0..32 { // triple
if *i >= data.len() as u64 {
break;
}
let mut buf = vec![0u8; sb.block_size as usize];
for block2 in 0..max_per_block { // double
if *i >= data.len() as u64 {
break;
}
let mut buf2 = vec![0u64; max_per_block as usize];
// single
fillwithdata_1(sb, bd, i, skip, data, block_size, max_per_block,
indirect_blocks, taken,
&mut buf2, max_per_block as usize);
let buf2 = buf2.iter().map(|x| x.to_be_bytes()).flatten().collect::<Vec<u8>>();
if *skip > 0 {
*skip -= 1;
continue;
}
if !unsafe { write_datablock(indirect_blocks[*taken], sb, bd, &buf2) } {
return false;
}
*taken += 1;
buf[block2 as usize * 8..(block2 as usize + 1) * 8].copy_from_slice(&indirect_blocks[*taken - 1].to_be_bytes());
}
if *skip > 0 {
*skip -= 1;
continue;
}
if !unsafe { write_datablock(indirect_blocks[*taken], sb, bd, &buf) } {
return false;
}
*taken += 1;
dbas[block3 as usize] = indirect_blocks[*taken - 1];
}
true
}
fillwithdata_2(sb, bd, &mut i, &mut skip, to_append, sb.block_size as usize, max_per_block,
extra, &mut taken,
&mut self.listblock.triple_indirect_block_address, 32);
// quadruple indirect blocks
for block4 in 0..32 {
if i >= to_append.len() as u64 {
break;
}
let mut list = vec![0u64; max_per_block as usize];
fillwithdata_2(sb, bd, &mut i, &mut skip, to_append, sb.block_size as usize, max_per_block,
extra, &mut taken,
&mut list, max_per_block as usize);
let buf = list.iter().map(|x| x.to_be_bytes()).flatten().collect::<Vec<u8>>();
if skip > 0 {
skip -= 1;
continue;
}
if !unsafe { write_datablock(extra[taken], sb, bd, &buf) } {
return false;
}
taken += 1;
self.listblock.quadruple_indirect_block_address[block4 as usize] = extra[taken - 1];
}
// quintuple indirect blocks
for block5 in 0..32 {
if i >= to_append.len() as u64 {
break;
}
let mut buf = vec![0u8; sb.block_size as usize];
for block4 in 0..max_per_block {
if i >= to_append.len() as u64 {
break;
}
let mut list = vec![0u64; max_per_block as usize];
fillwithdata_2(sb, bd, &mut i, &mut skip, to_append, sb.block_size as usize, max_per_block,
extra, &mut taken,
&mut list, max_per_block as usize);
let buf2 = list.iter().map(|x| x.to_be_bytes()).flatten().collect::<Vec<u8>>();
if skip > 0 {
skip -= 1;
continue;
}
if !unsafe { write_datablock(extra[taken], sb, bd, &buf2) } {
return false;
}
taken += 1;
buf[block4 as usize * 8..(block4 as usize + 1) * 8].copy_from_slice(&extra[taken - 1].to_be_bytes());
}
if skip > 0 {
skip -= 1;
continue;
}
if !unsafe { write_datablock(extra[taken], sb, bd, &buf) } {
return false;
}
taken += 1;
self.listblock.quintuple_indirect_block_address[block5 as usize] = extra[taken - 1];
}
// sextuple indirect blocks
for block6 in 0..32 {
if i >= to_append.len() as u64 {
break;
}
let mut buf_outer = vec![0u8; max_per_block as usize];
for block5 in 0..max_per_block {
if i >= to_append.len() as u64 {
break;
}
let mut buf = vec![0u8; max_per_block as usize];
for block4 in 0..max_per_block {
if i >= to_append.len() as u64 {
break;
}
let mut list = vec![0u64; max_per_block as usize];
fillwithdata_2(sb, bd, &mut i, &mut skip, to_append, sb.block_size as usize, max_per_block,
extra, &mut taken,
&mut list, max_per_block as usize);
let buf2 = list.iter().map(|x| x.to_be_bytes()).flatten().collect::<Vec<u8>>();
if skip > 0 {
skip -= 1;
continue;
}
if !unsafe { write_datablock(extra[taken], sb, bd, &buf2) } {
return false;
}
taken += 1;
buf[block4 as usize * 8..(block4 as usize + 1) * 8].copy_from_slice(&extra[taken - 1].to_be_bytes());
}
if skip > 0 {
skip -= 1;
continue;
}
if !unsafe { write_datablock(extra[taken], sb, bd, &buf) } {
return false;
}
taken += 1;
buf_outer[block5 as usize * 8..(block5 as usize + 1) * 8].copy_from_slice(&extra[taken - 1].to_be_bytes());
}
if skip > 0 {
skip -= 1;
continue;
}
if !unsafe { write_datablock(extra[taken], sb, bd, &buf_outer) } {
return false;
}
taken += 1;
self.listblock.sextuple_indirect_block_address[block6 as usize] = extra[taken - 1];
}
todo!()
}
}

View file

@ -1,9 +1,11 @@
use alloc::vec;
use alloc::vec::Vec;
use ondisk_btree::ToBytes;
use vapfs::{BlockDevice, Timestamp};
use crate::btree::Directory;
use crate::{bitmap, structs, write_datablock, write_inode, write_journal_entry, write_superblock};
use crate::structs::{Filetype, Inode, JournalBlockWrite, JournalEntry, JournalEntryContents, ListBlock, Superblock, UNIXMode};
use vapfs::{BlockDevice, Index, Timestamp};
use crate::btree::{Directory, EntryType};
use crate::{bitmap, find_first_unallocated_inode, flush_count_entries, get_superblock, JournaledWriteResult, read_datablock, read_inode, schedule_count_assertion, schedule_data_allocation_set, schedule_multi_block_write, schedule_single_block_write, structs, why_cant_make_journal_entry, write_datablock, write_inode, write_journal_entry, write_superblock};
use crate::listblock::ListblockIter;
use crate::structs::{Filetype, Inode, JBRTargetType, JournalBlockWrite, JournalEntry, JournalEntryContents, ListBlock, Superblock, UNIXMode};
pub enum FilesystemError {
/// Returned when an error relating to a lack of storage space occurs
@ -12,6 +14,81 @@ pub enum FilesystemError {
BlockSizeTooSmall,
/// Returned when an error occurs that was not expected
UnexpectedError,
/// Returned when a provided inode is invalid
InvalidInode,
/// Returned when the block device is acting in an unexpected way
UnexpectedBlockDeviceError,
/// Returned when the filesystem may be corrupted
PotentialFilesystemCorruption,
/// Returned when a file is not found
FileNotFound,
/// Returned when a file's type is not correct for the operation
InvalidFileType,
}
fn why(sb: &mut Superblock, bd: &mut dyn BlockDevice, res: Option<Index>) -> Result<(), FilesystemError> {
if res.is_none() {
let why = why_cant_make_journal_entry(sb, bd);
match why {
JournaledWriteResult::Success => {}
JournaledWriteResult::OutOfDiskSpace => {
return Err(FilesystemError::NotEnoughStorageSpace);
}
JournaledWriteResult::UnderlyingBlockDeviceError => {
return Err(FilesystemError::UnexpectedBlockDeviceError);
}
JournaledWriteResult::PotentialFilesystemCorruption => {
return Err(FilesystemError::PotentialFilesystemCorruption);
}
}
}
Ok(())
}
fn inode2dir(sb: &Superblock, bd: &mut dyn BlockDevice, inode_idx: Index) -> Result<Directory, FilesystemError> {
let inode = read_inode(inode_idx, &sb, bd);
if inode.is_none() {
return Err(FilesystemError::InvalidInode);
}
let inode = inode.unwrap();
let mut buf = vec![];
let mut lb_iter = ListblockIter::new(&inode.listblock);
while let Some(block) = lb_iter.next(&sb, bd) {
let block_buf = read_datablock(block, &sb, bd);
buf.extend_from_slice(&block_buf);
}
// truncate to the size of the directory
buf.truncate(inode.size as usize);
Ok(Directory::open(&buf))
}
/// should always return a vec aligned to the block size
fn pretend_set_allocation_inode(sb: &Superblock, bd: &mut dyn BlockDevice, index: Index, allocated: bool) -> Result<(u64, Vec<u8>), FilesystemError> {
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + sb.block_size as u64 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
if index >= bitmap_length {
return Err(FilesystemError::InvalidInode);
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return Err(FilesystemError::UnexpectedBlockDeviceError);
}
bitmap::set_bit(&mut bitmap_buf, index as usize, allocated);
let bitmap_block_count = (bitmap_length + (sb.block_size as u64 - 1)) / sb.block_size as u64;
let mut final_buf = vec![0; bitmap_block_count as usize * sb.block_size as usize];
final_buf[..bitmap_buf.len()].copy_from_slice(&bitmap_buf);
let bitmap_block_offset = bitmap_offset / sb.block_size as u64;
Ok((bitmap_block_offset, final_buf))
}
/// initialises a filesystem with the given details
@ -163,9 +240,7 @@ pub fn init_filesystem(bd: &mut dyn BlockDevice, block_size: u32, bd_size: u64,
root_inode.listblock.count = root_block_count as u64;
// recalculate the inode checksum
root_inode.convert_native_to_big_endian();
root_inode.recalculate_checksum();
root_inode.convert_big_endian_to_native();
// write the root inode to the first inode block
unsafe {
@ -216,4 +291,233 @@ pub fn init_filesystem(bd: &mut dyn BlockDevice, block_size: u32, bd_size: u64,
}
Ok(())
}
/// creates a new file under the given directory,
/// if initial_data is Some, the file will be filled with the given data
/// returns the inode index on success
pub fn new_file(bd: &mut dyn BlockDevice, parent_inode_idx: Index, name: &str, base_inode: Inode, initial_data: Option<&[u8]>, flush_immediately: bool) -> Result<Index, FilesystemError> {
// we need to do the following:
// 1. flush all journal entries to get a readable state
// 2. create an inode (journaled block write on the inode bitmap)
// 3. update the parent directory (journaled multiblock write on the parent directory)
// steps 2-3 should be protected by a count assertion
// step 1
// flush all journal entries
let sb = get_superblock(bd);
if sb.is_none() {
return Err(FilesystemError::UnexpectedError);
}
let mut sb = sb.unwrap();
flush_count_entries(&sb, bd, sb.journal_position as u64, false);
// x journal entries to allocate the inode, 1 journal entry to fill the inode, 1 journal entry to update the parent directory
// 2 + x journal entries total
// read the parent directory
let mut parent_dir = inode2dir(&sb, bd, parent_inode_idx)?;
// find a free inode
let inode = find_first_unallocated_inode(&sb, bd);
if inode.is_none() {
return Err(FilesystemError::NotEnoughStorageSpace);
}
let inode_index = inode.unwrap();
let (bitmap_block_offset, bitmap_buf) = pretend_set_allocation_inode(&sb, bd, inode_index, true)?;
// make a count assertion entry
let res = schedule_count_assertion(&sb, bd, (2 + (bitmap_buf.len() / sb.block_size as usize) + if initial_data.is_some() { 1 } else { 0 }) as u32);
why(&mut sb, bd, res)?;
// step 2
// journal the bitmap
for i in 0..bitmap_buf.len() as u64 / sb.block_size as u64 {
let res = schedule_single_block_write(
&sb, bd,
0,
JBRTargetType::Disk, Some(bitmap_block_offset + i),
&bitmap_buf[(i * sb.block_size as u64) as usize..((i + 1) * sb.block_size as u64) as usize]);
why(&mut sb, bd, res)?;
}
// create the inode
let mut inode = base_inode;
inode.recalculate_checksum();
inode.convert_native_to_big_endian();
let mut buf = vec![0; sb.block_size as usize];
unsafe { core::ptr::copy(&inode as *const Inode as *const u8, buf.as_mut_ptr(), sb.block_size as usize) };
let res = schedule_single_block_write(
&sb, bd,
inode_index,
JBRTargetType::Inode, None,
&buf);
why(&mut sb, bd, res)?;
// update the parent directory
parent_dir.new_entry(name, EntryType::Inode(inode_index));
let mut buf = parent_dir.to_bytes();
let res = schedule_multi_block_write(
&sb, bd,
parent_inode_idx,
0, (buf.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
&mut buf);
why(&mut sb, bd, res)?;
let res = res.unwrap();
// if flush_immediately is true, flush the journal entries
#[allow(clippy::collapsible_if)]
if flush_immediately {
if !flush_count_entries(&sb, bd, res, true) {
return Err(FilesystemError::UnexpectedError);
}
}
Ok(inode_index)
}
/// removes a file from the given directory,
/// if shred is true, the file will be overwritten with zeros before being removed.
/// will only remove the file if it is a regular file or a hard link
/// returns () on success, or an error if the file could not be removed
pub fn remove_file(bd: &mut dyn BlockDevice, parent_node_idx: Index, name: &str, shred: bool, flush_immediately: bool) -> Result<(), FilesystemError> {
// we need to do the following:
// 1. flush all journal entries to get a readable state
// 2. update the parent directory (journaled multiblock write on the parent directory)
// 3. free the inode (journaled block write on the inode bitmap)
// 4. free the data blocks (journaled data allocation set)
// (if shred) 5. unjournaled write of zeros to the data blocks
// steps 2-4 should be protected by a count assertion
// 5 isn't because it's not journaled
// flush all journal entries
let sb = get_superblock(bd);
if sb.is_none() {
return Err(FilesystemError::UnexpectedError);
}
let mut sb = sb.unwrap();
flush_count_entries(&sb, bd, sb.journal_position as u64, false);
// read the parent directory
let mut parent_dir = inode2dir(&sb, bd, parent_node_idx)?;
// prepare to unallocate the inode
let inode_index = parent_dir.find(name);
let inode_index = match inode_index {
None => {
return Err(FilesystemError::FileNotFound);
}
Some(entry_type) => match entry_type {
EntryType::Inode(inode) => inode,
EntryType::HardLink(inode) => inode,
EntryType::SoftLink(_) => {
return Err(FilesystemError::InvalidFileType);
}
EntryType::Corrupt => {
return Err(FilesystemError::InvalidFileType);
}
}
};
let inode = read_inode(inode_index, &sb, bd).ok_or(FilesystemError::InvalidInode)?;
// make a count assertion entry
// free inode = bitmap_buf.len() / sb.block_size
// free data blocks = (inode.block_count / 32)
// update parent directory = 1
let res = schedule_count_assertion(&sb, bd, (2 + ((inode.block_count as usize + 31) / 32)) as u32);
why(&mut sb, bd, res)?;
// update the parent directory
parent_dir.remove_entry(name);
let mut buf = parent_dir.to_bytes();
let res = schedule_multi_block_write(
&sb, bd,
parent_node_idx,
0, (buf.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
&mut buf);
why(&mut sb, bd, res)?;
// if shred is true, overwrite the data blocks with zeros
if shred {
let mut lb_iter = ListblockIter::new(&inode.listblock);
while let Some(block) = lb_iter.next(&sb, bd) {
let buf = vec![0; sb.block_size as usize];
unsafe {
write_datablock(block, &sb, bd, &buf);
}
}
}
// free the inode
let res = schedule_data_allocation_set(&sb, bd, vec![inode_index], true, false);
why(&mut sb, bd, res)?;
// free the data blocks
let mut lb_iter = ListblockIter::new(&inode.listblock);
let mut blocks = vec![];
let mut blocks32 = vec![];
while let Some(block) = lb_iter.next(&sb, bd) {
blocks32.push(block);
if blocks32.len() == 32 {
blocks.push(blocks32);
blocks32 = vec![];
}
}
let mut last_index = 0;
for blocks32 in blocks {
let res = schedule_data_allocation_set(&sb, bd, blocks32, false, false);
why(&mut sb, bd, res)?;
last_index = res.unwrap();
}
// if flush_immediately is true, flush the journal entries
#[allow(clippy::collapsible_if)]
if flush_immediately {
if !flush_count_entries(&sb, bd, last_index, true) {
return Err(FilesystemError::UnexpectedError);
}
}
Ok(())
}
/// writes data to a file, starting from the given byte offset
/// returns the number of bytes written on success, or an error if the file could not be written to
pub fn write_bytes_to_file(bd: &mut dyn BlockDevice, inode: Index, offset: Index, data: &[u8]) -> Result<usize, FilesystemError> {
// we need to do the following:
// 1. flush all journal entries to get a readable state
// 2. calculate number of new blocks needed to store the data
// 3. if new blocks are needed, allocate them (journaled data allocation set) and update inode with new size and block count
// 4. write the data (un-count-asserted single block journal writes) todo: reimplement multiblock journal write so that we can use it instead
// flush all journal entries
let sb = get_superblock(bd);
if sb.is_none() {
return Err(FilesystemError::UnexpectedError);
}
let mut sb = sb.unwrap();
flush_count_entries(&sb, bd, sb.journal_position as u64, false);
// read the inode
let mut inode = read_inode(inode, &sb, bd).ok_or(FilesystemError::InvalidInode)?;
// calculate the number of new blocks needed to store the data
let current_block_count = inode.block_count;
let blocks_to_write = (data.len() as Index+ (sb.block_size as Index - 1)) / sb.block_size as Index;
let offset_blocks = (offset + (sb.block_size as Index - 1)) / sb.block_size as Index;
let new_block_count = if offset_blocks + blocks_to_write > current_block_count {
offset_blocks + blocks_to_write
} else {
current_block_count
};
if new_block_count > current_block_count {
// allocate new blocks and place them at the end of the listblock
}
todo!()
}

View file

@ -163,6 +163,7 @@ pub enum Filetype {
/// # Inode
/// Usually represents a file or directory, used to store metadata and locations of data blocks.
#[derive(Copy, Clone)]
#[repr(C)]
pub struct Inode {
/// UNIX permissions / mode and filetype
@ -241,9 +242,11 @@ impl Inode {
/// returns true if the crc32 checksum is currently valid
/// returns false otherwise
pub fn is_checksum_valid(&self) -> bool {
let mut clone = *self;
clone.convert_big_endian_to_native();
let mut buf = [0; core::mem::size_of::<Self>() - 4]; // don't hash the checksum
unsafe {
core::ptr::copy(self as *const Self as *const u8, buf.as_mut_ptr(), buf.len());
core::ptr::copy(&clone as *const Self as *const u8, buf.as_mut_ptr(), buf.len());
}
let checksum = crc32::crc32(&buf);
@ -252,11 +255,13 @@ impl Inode {
/// updates the crc32 checksum
pub fn recalculate_checksum(&mut self) {
self.convert_native_to_big_endian();
let mut buf = [0; core::mem::size_of::<Self>() - 4]; // don't hash the checksum
unsafe {
core::ptr::copy(self as *const Self as *const u8, buf.as_mut_ptr(), buf.len());
}
let checksum = crc32::crc32(&buf);
self.convert_big_endian_to_native();
self.checksum = checksum;
}
@ -342,12 +347,10 @@ pub enum JournalOperation {
SingleBlockWrite = 0,
/// A multi-block write, described by a `JournalMultiblockWrite`
MultiblockWrite = 1,
/// A file creation, described by a `JournalFileCreate`
FileCreate = 2,
/// A file deletion, described by a `JournalFileDelete`
FileDelete = 3,
/// A file truncation, described by a `JournalFileTruncate`
FileTruncate = 4,
/// A "Count Assertion", requires that the next N journal entries can be flushed without error, described by a `JournalCountAssertion`
CountAssertion = 2,
/// A data block allocation set, described by a `JournalDataAllocationSet`
DataAllocationSet = 3,
}
/// # JournalBlockWrite
@ -488,6 +491,10 @@ pub struct JournalMultiblockWrite {
pub old_list_block: Index,
/// crc32 hash of the list block
pub list_block_crc32: u32,
/// if true, don't deallocate old list block
pub keep_old_list_block: u8,
/// number of extra empty data blocks to allocate and append
pub extra_data_blocks: Index,
}
impl JournalMultiblockWrite {
@ -496,10 +503,14 @@ impl JournalMultiblockWrite {
#[cfg(target_endian = "little")]
{
self.flags = u32::from_be(self.flags);
self.target_inode = u64::from_be(self.target_inode);
self.target_block = u64::from_be(self.target_block);
self.target_block_count = u64::from_be(self.target_block_count);
self.list_block = u64::from_be(self.list_block);
self.old_list_block = u64::from_be(self.old_list_block);
self.list_block_crc32 = u32::from_be(self.list_block_crc32);
self.keep_old_list_block = u8::from_be(self.keep_old_list_block);
self.extra_data_blocks = u64::from_be(self.extra_data_blocks);
}
}
@ -508,10 +519,14 @@ impl JournalMultiblockWrite {
#[cfg(target_endian = "little")]
{
self.flags = u32::to_be(self.flags);
self.target_inode = u64::to_be(self.target_inode);
self.target_block = u64::to_be(self.target_block);
self.target_block_count = u64::to_be(self.target_block_count);
self.list_block = u64::to_be(self.list_block);
self.old_list_block = u64::to_be(self.old_list_block);
self.list_block_crc32 = u32::to_be(self.list_block_crc32);
self.keep_old_list_block = u8::to_be(self.keep_old_list_block);
self.extra_data_blocks = u64::to_be(self.extra_data_blocks);
}
}
}
@ -543,7 +558,7 @@ pub enum JMWFlags {
#[repr(C)]
#[derive(Copy, Clone)]
pub struct ListBlock {
/// Count of blocks used
/// Count of blocks used, does not include non-data (i.e. pointer-containing) blocks
pub count: Index,
/// Direct-Block-Addresses
pub direct_block_addresses: [Index; 32],
@ -601,6 +616,119 @@ impl ListBlock {
}
}
/// # JournalCountAssertion
/// upon reaching this entry during a flush, we will verify that the next n entries
/// are possible to flush (i.e. all source data was written, and the entry can be completed without error).
/// if everything is okay, this entry will be marked as complete, and the next n entries will be flushed.
/// if something is wrong, the following entries will be marked as complete and skipped.
#[repr(C)]
#[derive(Copy, Clone)]
pub struct JournalCountAssertion {
/// JCAFlags, mainly for compatibility
pub flags: u32,
/// number of entries to assert
pub count: u32,
}
impl JournalCountAssertion {
/// in-place conversion from the storage representation (big endian) to the native representation
pub fn convert_big_endian_to_native(&mut self) {
#[cfg(target_endian = "little")]
{
self.flags = u32::from_be(self.flags);
self.count = u32::from_be(self.count);
}
}
/// in-place conversion from the native representation to the storage representation (big endian)
pub fn convert_native_to_big_endian(&mut self) {
#[cfg(target_endian = "little")]
{
self.flags = u32::to_be(self.flags);
self.count = u32::to_be(self.count);
}
}
}
/// # JCAFlags
/// Flags field of a JournalCountAssertion
#[repr(u32)]
#[derive(Copy, Clone)]
pub enum JCAFlags {
/// Journal entry has been written and is waiting to be flushed
Written = 1,
/// Following entries were bad, but head hasn't been moved
Bad = 2,
/// Journal entry has been flushed
Complete = 0,
}
/// # JournalDataAllocationSet
/// performed as follows:
/// 1. create and write the journal entry
/// 2. sets the status of the given data blocks to what was chosen
/// 3. set the entry's allocated flag
/// == the following steps will be performed upon a journal flush ==
/// 4. set the entry as complete
/// == done! ==
#[repr(C)]
#[derive(Copy, Clone)]
pub struct JournalDataAllocationSet {
/// JDASFlags
pub flags: u32,
/// if true, sets inodes instead
pub set_inodes: u8,
/// number of data blocks to set (maximum 32)
pub count: u8,
/// true (!= 0) if they should be set as allocated, false if they should be set as deallocated
pub set_to_allocated: u8,
/// the data blocks to set
pub data_blocks: [Index; 32],
}
impl JournalDataAllocationSet {
/// in-place conversion from the storage representation (big endian) to the native representation
pub fn convert_big_endian_to_native(&mut self) {
#[cfg(target_endian = "little")]
{
self.flags = u32::from_be(self.flags);
self.set_inodes = u8::from_be(self.set_inodes);
self.count = u8::from_be(self.count);
self.set_to_allocated = u8::from_be(self.set_to_allocated);
for i in 0..32 {
self.data_blocks[i] = u64::from_be(self.data_blocks[i]);
}
}
}
/// in-place conversion from the native representation to the storage representation (big endian)
pub fn convert_native_to_big_endian(&mut self) {
#[cfg(target_endian = "little")]
{
self.flags = u32::to_be(self.flags);
self.set_inodes = u8::to_be(self.set_inodes);
self.count = u8::to_be(self.count);
self.set_to_allocated = u8::to_be(self.set_to_allocated);
for i in 0..32 {
self.data_blocks[i] = u64::to_be(self.data_blocks[i]);
}
}
}
}
/// # JIEFlags
/// Flags field of a JournalInodeExpansion
#[repr(u32)]
#[derive(Copy, Clone)]
pub enum JDASFlags {
/// journal entry was created, but nothing was done
Chosen = 1,
/// datablocks have been set
Set = 2,
/// done
Complete = 0,
}
/// # JournalEntryContents
/// union of all possible journal entries
#[repr(C)]
@ -608,4 +736,6 @@ impl ListBlock {
pub union JournalEntryContents {
pub block_write: JournalBlockWrite,
pub multiblock_write: JournalMultiblockWrite,
pub count_assertion: JournalCountAssertion,
pub data_allocation_set: JournalDataAllocationSet,
}