vapfs_ufs/src/lib.rs

539 lines
23 KiB
Rust
Raw Normal View History

#![no_std]
extern crate alloc;
2023-08-07 15:15:39 -07:00
use alloc::vec;
use alloc::vec::Vec;
use vapfs::{BlockDevice, Index};
2023-08-07 15:15:39 -07:00
use crate::structs::{Inode, JBRFlags, JMWFlags, JournalBlockWrite, JournalEntry, JournalEntryContents, JournalMultiblockWrite, JournalOperation, ListBlock, Superblock};
pub mod btree;
pub mod structs;
pub mod bitmap;
pub mod crc32;
/// Reads the superblock (located at byte offset 1024 of the block device) and returns it.
/// Returns None if the block device is too small to contain a superblock.
pub fn get_superblock(bd: &mut dyn BlockDevice) -> Option<Superblock> {
let mut buf: [u8; core::mem::size_of::<Superblock>()] = [0; core::mem::size_of::<Superblock>()];
bd.seek(1024);
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<Superblock>() {
return None;
}
let mut superblock = unsafe { core::ptr::read(buf.as_ptr() as *const Superblock) };
superblock.convert_big_endian_to_native();
Some(superblock)
}
/// Performs a direct write of a superblock to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_superblock(mut sb: Superblock, bd: &mut dyn BlockDevice) -> bool {
sb.convert_native_to_big_endian();
let mut buf: [u8; core::mem::size_of::<Superblock>()] = [0; core::mem::size_of::<Superblock>()];
core::ptr::write(buf.as_mut_ptr() as *mut Superblock, sb);
bd.seek(1024);
let write_count = bd.write_blocks(&buf);
write_count == core::mem::size_of::<Superblock>()
}
/// Reads the inode at the given index and returns it.
pub fn read_inode(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Inode> {
let mut buf: [u8; core::mem::size_of::<Inode>()] = [0; core::mem::size_of::<Inode>()];
bd.seek((sb.first_inode_block * sb.block_size as u64) + (index * core::mem::size_of::<Inode>() as u64));
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<Inode>() {
return None;
}
let mut inode = unsafe { core::ptr::read(buf.as_ptr() as *const Inode) };
inode.convert_big_endian_to_native();
Some(inode)
}
/// Performs a direct write of an inode to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_inode(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, mut inode: Inode) -> bool {
inode.convert_native_to_big_endian();
let mut buf: [u8; core::mem::size_of::<Inode>()] = [0; core::mem::size_of::<Inode>()];
core::ptr::write(buf.as_mut_ptr() as *mut Inode, inode);
bd.seek((sb.first_inode_block * sb.block_size as u64) + (index * core::mem::size_of::<Inode>() as u64));
let write_count = bd.write_blocks(&buf);
write_count == core::mem::size_of::<Inode>()
}
/// Reads a single datablock into memory, may return less than the block size if the end of the block device is reached.
pub fn read_datablock(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Vec<u8> {
let mut buf: Vec<u8> = Vec::new();
buf.resize(sb.block_size as usize, 0);
bd.seek((sb.first_data_block * sb.block_size as u64) + (index * sb.block_size as u64));
bd.read_blocks(&mut buf);
buf
}
/// Performs a direct write of a datablock to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_datablock(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, buf: &[u8]) -> bool {
bd.seek((sb.first_data_block * sb.block_size as u64) + (index * sb.block_size as u64));
let write_count = bd.write_blocks(buf);
write_count == sb.block_size as usize
}
/// Checks if a datablock is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_datablock_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
let bitmap_length = (sb.data_block_count + 7) / 8;
if index >= bitmap_length {
return None;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
Some(bitmap::get_bit(&bitmap_buf, index as usize) == bitmap::SET)
}
/// Checks if an inode is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_inode_allocated(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
// inode bitmap is at 1024 + 156 + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + 156 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
if index >= bitmap_length {
return None;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
Some(bitmap::get_bit(&bitmap_buf, index as usize) == bitmap::SET)
}
/// Finds the first unallocated datablock and returns its index.
/// Will return None if no unallocated datablock is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_datablock(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
let bitmap_length = (sb.data_block_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET).map(|index| index as Index)
}
2023-08-07 15:15:39 -07:00
/// Finds a number of unallocated datablocks and returns their indices.
/// Will return None if not enough unallocated datablocks are found, or if the block device cannot fill the buffer.
pub fn find_count_unallocated_datablocks(sb: &Superblock, bd: &mut dyn BlockDevice, count: usize) -> Option<Vec<Index>> {
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
let bitmap_length = (sb.data_block_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
let mut found = Vec::new();
while found.len() < count {
if let Some(i) = bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET) {
found.push(i as Index);
// set the bit so we don't find it again
bitmap::set_bit(&mut bitmap_buf, i, bitmap::SET);
} else {
return None;
}
}
Some(found)
}
/// Finds the first unallocated inode and returns its index.
/// Will return None if no unallocated inode is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_inode(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
// inode bitmap is at 1024 + 156 + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + 156 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return None;
}
bitmap::find_first_bit_equal_to(&bitmap_buf, bitmap::UNSET).map(|index| index as Index)
}
/// Sets the allocation status of a datablock.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn set_datablock_allocation_status(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, allocated: bool) -> bool {
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
// datablock bitmap is at 1024 + 156 byte offset, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + 156;
let bitmap_length = (sb.data_block_count + 7) / 8;
if index >= bitmap_length {
return false;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return false;
}
bitmap::set_bit(&mut bitmap_buf, index as usize, allocated);
bd.seek(bitmap_offset);
let write_count = bd.write_blocks(&bitmap_buf);
write_count == bitmap_length as usize
}
/// Sets the allocation status of an inode.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn set_inode_allocation_status(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, allocated: bool) -> bool {
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
// inode bitmap is at 1024 + 156 + datablock_bitmap_length byte offset,
// length is inode_count / 8 rounded up
let bitmap_offset = 1024 + 156 + ((sb.data_block_count + 7) / 8);
let bitmap_length = (sb.inode_count + 7) / 8;
if index >= bitmap_length {
return false;
}
let mut bitmap_buf: Vec<u8> = Vec::new();
bitmap_buf.resize(bitmap_length as usize, 0);
bd.seek(bitmap_offset);
let read_count = bd.read_blocks(&mut bitmap_buf);
if read_count < bitmap_length as usize {
return false;
}
bitmap::set_bit(&mut bitmap_buf, index as usize, allocated);
bd.seek(bitmap_offset);
let write_count = bd.write_blocks(&bitmap_buf);
write_count == bitmap_length as usize
2023-07-31 07:43:49 -07:00
}
/// Reads a journal entry by index
pub fn read_journal_entry(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<JournalEntry> {
let mut buf: [u8; core::mem::size_of::<JournalEntry>()] = [0; core::mem::size_of::<JournalEntry>()];
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * core::mem::size_of::<JournalEntry>() as u64));
let read_count = bd.read_blocks(&mut buf);
if read_count < core::mem::size_of::<JournalEntry>() {
return None;
}
let mut entry = unsafe { core::ptr::read(buf.as_ptr() as *const JournalEntry) };
entry.convert_big_endian_to_native();
Some(entry)
}
/// Performs a direct write of a journal entry to the block device.
/// # Safety
/// unsafe because it assumes that the entry is correctly formatted and that everything has been performed correctly
pub unsafe fn write_journal_entry(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice, mut entry: JournalEntry) -> bool {
entry.convert_native_to_big_endian();
let mut buf: [u8; core::mem::size_of::<JournalEntry>()] = [0; core::mem::size_of::<JournalEntry>()];
core::ptr::write(buf.as_mut_ptr() as *mut JournalEntry, entry);
bd.seek((sb.first_journal_block * sb.block_size as u64) + (index * core::mem::size_of::<JournalEntry>() as u64));
let write_count = bd.write_blocks(&buf);
write_count == core::mem::size_of::<JournalEntry>()
2023-08-07 15:15:39 -07:00
}
/// Checks if a journal entry has been completed.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer,
/// or if the entry is invalid
pub fn is_journal_entry_complete(index: Index, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<bool> {
let entry = read_journal_entry(index, sb, bd)?;
// if flags == 0, the entry is complete
const SINGLEBLOCK: u32 = JournalOperation::SingleBlockWrite as u32;
const MULTIBLOCK: u32 = JournalOperation::MultiblockWrite as u32;
match entry.operation {
SINGLEBLOCK => unsafe { Some(entry.content.block_write.flags == 0) },
MULTIBLOCK => unsafe { Some(entry.content.multiblock_write.flags == 0) },
_ => None,
}
}
/// Returns the index of the next unused journal entry.
/// Will loop around to the beginning of the journal if the end is reached.
pub fn next_journal_position(sb: &Superblock, bd: &mut dyn BlockDevice) -> Option<Index> {
let mut index = sb.journal_position as Index;
let max_index = (sb.journal_block_count * sb.block_size as u64) / core::mem::size_of::<JournalEntry>() as u64;
loop {
let entry = read_journal_entry(index, sb, bd)?;
// if flags == 0, the entry is complete
// flags should always be the same size and at the same offset in the union, so we can just check one
if unsafe { entry.content.block_write.flags == 0 } {
return Some(index);
}
index += 1;
if index >= max_index {
index = 0;
}
if index == sb.journal_position as Index {
// we've looped around to the beginning of the journal
return None;
}
}
}
/// Creates a journal entry for a single block write operation.
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
/// Returns None if the journal is full, or if the block device cannot be written to.
/// Returns the journal entry index if successful.
pub fn schedule_single_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, write_to_inode: bool, otherwise_datablock_index: Option<Index>, data: &[u8]) -> Option<Index> {
let entry_index = next_journal_position(sb, bd)?;
let entry_content = JournalBlockWrite {
flags: 0,
target_is_inode: write_to_inode,
target_inode: containing_inode_index,
target_block: otherwise_datablock_index.unwrap_or(0),
source_block: 0, // filled in once allocated
source_block_crc32: 0, // filled in once allocated
};
let mut entry = JournalEntry {
operation: JournalOperation::SingleBlockWrite as u32,
zeroed_content_crc32: 0,
content: JournalEntryContents {
block_write: entry_content,
},
};
// write the journal entry
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// find a free data block
let data_block_index = find_first_unallocated_datablock(sb, bd)?;
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.block_write.source_block = data_block_index;
entry.content.block_write.flags = JBRFlags::Chosen as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// allocate the data block
if !unsafe { set_datablock_allocation_status(data_block_index, sb, bd, true) } {
return None;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.block_write.flags = JBRFlags::Allocated as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// write the data to the data block
if !unsafe { write_datablock(data_block_index, sb, bd, data) } {
return None;
}
let written_data = read_datablock(data_block_index, sb, bd);
// set the crc32 and stored flag
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.block_write.source_block_crc32 = crc32::crc32(&written_data);
entry.content.block_write.flags = JBRFlags::Stored as u32;
// generate crc32 of the entry
let mut buf: [u8; core::mem::size_of::<JournalEntryContents>()] = [0; core::mem::size_of::<JournalEntryContents>()];
let mut clone = entry.content.clone();
clone.block_write.flags = 0;
unsafe { core::ptr::write(buf.as_mut_ptr() as *mut JournalEntryContents, clone); }
entry.zeroed_content_crc32 = crc32::crc32(&buf);
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// all further steps will be performed on a journal flush
Some(entry_index)
}
/// Creates a journal entry for a multi block write operation.
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
/// Returns None if the journal is full, or if the block device cannot be written to.
/// Returns the journal entry index if successful.
pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, containing_inode_index: Index, datablock_start: Index, datablock_count: Index, data: &[u8]) -> Option<Index> {
let entry_index = next_journal_position(sb, bd)?;
let entry_content = JournalMultiblockWrite {
flags: 0,
target_inode: containing_inode_index,
target_block: datablock_start,
target_block_count: datablock_count,
list_block: 0,
list_block_crc32: 0,
};
let mut entry = JournalEntry {
operation: JournalOperation::MultiblockWrite as u32,
zeroed_content_crc32: 0,
content: JournalEntryContents {
multiblock_write: entry_content,
},
};
// write the journal entry
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// find a free data block for the list block
let list_block_index = find_first_unallocated_datablock(sb, bd)?;
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.list_block = list_block_index;
entry.content.multiblock_write.flags = JMWFlags::ChosenList as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// allocate the data block
if !unsafe { set_datablock_allocation_status(list_block_index, sb, bd, true) } {
return None;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::AllocatedList as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// find the data blocks
let allocated_blocks = find_count_unallocated_datablocks(sb, bd, datablock_count as usize)?;
// create a list block
let mut list_block = ListBlock {
using_indirect_blocks: if datablock_count > 12 { true } else { false },
direct_block_addresses: [0; 12],
};
let mut indirect_blocks_waiting_for_allocation_to_be_set = Vec::new();
// if using indirect blocks, only fill out the first (12 - 3) = 9 entries
// otherwise, fill out all 12 entries
if list_block.using_indirect_blocks {
for i in 0..9 {
list_block.direct_block_addresses[i] = allocated_blocks[i];
}
// if using indirect blocks, fit the remaining entries into the indirect blocks
// layout is u64 count followed by u64 addresses
let max_addresses_per_block = (sb.block_size as usize - core::mem::size_of::<u64>()) / core::mem::size_of::<u64>();
let mut indirect_block_count = (datablock_count - 9) / max_addresses_per_block as u64;
// if the count is not a multiple of the max addresses per block, add one
if (datablock_count - 9) % max_addresses_per_block as u64 != 0 {
indirect_block_count += 1;
}
// if the count is over 3, return None
if indirect_block_count > 3 {
return None;
}
// allocate the indirect blocks
let indirect_blocks = find_count_unallocated_datablocks(sb, bd, indirect_block_count as usize)?;
for i in 0..indirect_block_count {
list_block.direct_block_addresses[9 + i as usize] = indirect_blocks[i as usize];
indirect_blocks_waiting_for_allocation_to_be_set.push(indirect_blocks[i as usize]);
}
// write the indirect blocks
let mut indirect_block_data = vec![0; core::mem::size_of::<u64>() * max_addresses_per_block];
for i in 0..indirect_block_count {
// write the count
let count = if i == indirect_block_count - 1 {
(datablock_count - 9) % max_addresses_per_block as u64
} else {
max_addresses_per_block as u64
};
unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count); }
// write the addresses
for j in 0..count {
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).offset(1 + j as isize), allocated_blocks[(9 + i as usize) as usize + j as usize]); }
}
// write the data
if !unsafe { write_datablock(list_block.direct_block_addresses[9 + i as usize], sb, bd, &indirect_block_data) } {
return None;
}
}
} else {
for i in 0..12 {
list_block.direct_block_addresses[i] = allocated_blocks[i];
}
}
// write the list block
let buf = [0; core::mem::size_of::<ListBlock>()];
unsafe { core::ptr::write(buf.as_ptr() as *mut ListBlock, list_block); }
if !unsafe { write_datablock(list_block_index, sb, bd, &buf) } {
return None;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::ChosenData as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// if we're using indirect blocks, set the allocation status of the indirect blocks
for block in indirect_blocks_waiting_for_allocation_to_be_set {
if !unsafe { set_datablock_allocation_status(block, sb, bd, true) } {
return None;
}
}
// set the allocation status of the data blocks
for block in &allocated_blocks {
if !unsafe { set_datablock_allocation_status(*block, sb, bd, true) } {
return None;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::AllocatedData as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// store the data in the data blocks
for i in 0..datablock_count {
if !unsafe { write_datablock(allocated_blocks[i as usize], sb, bd, &data[i as usize * sb.block_size as usize..(i as usize + 1) * sb.block_size as usize]) } {
return None;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry.content.multiblock_write.flags = JMWFlags::Stored as u32;
if !unsafe { write_journal_entry(entry_index, sb, bd, entry) } {
return None;
}
// return the journal entry index
Some(entry_index)
}