2023-07-25 07:03:06 -07:00
#![ no_std ]
extern crate alloc ;
2023-08-19 01:37:51 -07:00
use alloc ::collections ::VecDeque ;
2023-08-07 15:15:39 -07:00
use alloc ::vec ;
2023-07-27 09:28:06 -07:00
use alloc ::vec ::Vec ;
use vapfs ::{ BlockDevice , Index } ;
2023-08-12 16:38:37 -07:00
use crate ::structs ::{ Inode , InodeFlags , JBRFlags , JBRTargetType , JMWFlags , JournalBlockWrite , JournalEntry , JournalEntryContents , JournalMultiblockWrite , JournalOperation , ListBlock , Superblock } ;
2023-07-25 07:03:06 -07:00
pub mod btree ;
pub mod structs ;
pub mod bitmap ;
2023-07-27 09:28:06 -07:00
pub mod crc32 ;
2023-09-10 19:09:43 -07:00
pub mod safe ;
2023-09-11 22:49:50 -07:00
pub mod listblock ;
pub mod journal ;
2023-07-25 07:03:06 -07:00
2023-07-27 09:28:06 -07:00
/// Reads the superblock (located at byte offset 1024 of the block device) and returns it.
/// Returns None if the block device is too small to contain a superblock.
2023-07-25 07:03:06 -07:00
pub fn get_superblock ( bd : & mut dyn BlockDevice ) -> Option < Superblock > {
2023-07-27 09:28:06 -07:00
let mut buf : [ u8 ; core ::mem ::size_of ::< Superblock > ( ) ] = [ 0 ; core ::mem ::size_of ::< Superblock > ( ) ] ;
bd . seek ( 1024 ) ;
let read_count = bd . read_blocks ( & mut buf ) ;
if read_count < core ::mem ::size_of ::< Superblock > ( ) {
return None ;
}
let mut superblock = unsafe { core ::ptr ::read ( buf . as_ptr ( ) as * const Superblock ) } ;
superblock . convert_big_endian_to_native ( ) ;
Some ( superblock )
}
/// Performs a direct write of a superblock to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_superblock ( mut sb : Superblock , bd : & mut dyn BlockDevice ) -> bool {
2023-09-11 22:49:50 -07:00
let block_size = sb . block_size ;
2023-07-27 09:28:06 -07:00
sb . convert_native_to_big_endian ( ) ;
let mut buf : [ u8 ; core ::mem ::size_of ::< Superblock > ( ) ] = [ 0 ; core ::mem ::size_of ::< Superblock > ( ) ] ;
core ::ptr ::write ( buf . as_mut_ptr ( ) as * mut Superblock , sb ) ;
2023-09-11 22:49:50 -07:00
bd . seek ( block_size as u64 ) ;
2023-07-27 09:28:06 -07:00
let write_count = bd . write_blocks ( & buf ) ;
write_count = = core ::mem ::size_of ::< Superblock > ( )
}
/// Reads the inode at the given index and returns it.
pub fn read_inode ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < Inode > {
let mut buf : [ u8 ; core ::mem ::size_of ::< Inode > ( ) ] = [ 0 ; core ::mem ::size_of ::< Inode > ( ) ] ;
bd . seek ( ( sb . first_inode_block * sb . block_size as u64 ) + ( index * core ::mem ::size_of ::< Inode > ( ) as u64 ) ) ;
let read_count = bd . read_blocks ( & mut buf ) ;
if read_count < core ::mem ::size_of ::< Inode > ( ) {
return None ;
}
let mut inode = unsafe { core ::ptr ::read ( buf . as_ptr ( ) as * const Inode ) } ;
inode . convert_big_endian_to_native ( ) ;
Some ( inode )
}
/// Performs a direct write of an inode to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_inode ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice , mut inode : Inode ) -> bool {
inode . convert_native_to_big_endian ( ) ;
let mut buf : [ u8 ; core ::mem ::size_of ::< Inode > ( ) ] = [ 0 ; core ::mem ::size_of ::< Inode > ( ) ] ;
core ::ptr ::write ( buf . as_mut_ptr ( ) as * mut Inode , inode ) ;
bd . seek ( ( sb . first_inode_block * sb . block_size as u64 ) + ( index * core ::mem ::size_of ::< Inode > ( ) as u64 ) ) ;
let write_count = bd . write_blocks ( & buf ) ;
write_count = = core ::mem ::size_of ::< Inode > ( )
}
/// Reads a single datablock into memory, may return less than the block size if the end of the block device is reached.
pub fn read_datablock ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice ) -> Vec < u8 > {
let mut buf : Vec < u8 > = Vec ::new ( ) ;
buf . resize ( sb . block_size as usize , 0 ) ;
bd . seek ( ( sb . first_data_block * sb . block_size as u64 ) + ( index * sb . block_size as u64 ) ) ;
bd . read_blocks ( & mut buf ) ;
buf
}
/// Performs a direct write of a datablock to the block device.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn write_datablock ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice , buf : & [ u8 ] ) -> bool {
bd . seek ( ( sb . first_data_block * sb . block_size as u64 ) + ( index * sb . block_size as u64 ) ) ;
let write_count = bd . write_blocks ( buf ) ;
write_count = = sb . block_size as usize
}
/// Checks if a datablock is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_datablock_allocated ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < bool > {
2023-08-12 16:38:37 -07:00
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb . block_size as u64 ;
2023-07-27 09:28:06 -07:00
let bitmap_length = ( sb . data_block_count + 7 ) / 8 ;
if index > = bitmap_length {
return None ;
}
let mut bitmap_buf : Vec < u8 > = Vec ::new ( ) ;
bitmap_buf . resize ( bitmap_length as usize , 0 ) ;
bd . seek ( bitmap_offset ) ;
let read_count = bd . read_blocks ( & mut bitmap_buf ) ;
if read_count < bitmap_length as usize {
return None ;
}
Some ( bitmap ::get_bit ( & bitmap_buf , index as usize ) = = bitmap ::SET )
}
/// Checks if an inode is allocated.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer.
pub fn is_inode_allocated ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < bool > {
2023-08-12 16:38:37 -07:00
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
2023-07-27 09:28:06 -07:00
// length is inode_count / 8 rounded up
2023-08-12 16:38:37 -07:00
let bitmap_offset = 1024 + sb . block_size as u64 + ( ( sb . data_block_count + 7 ) / 8 ) ;
2023-07-27 09:28:06 -07:00
let bitmap_length = ( sb . inode_count + 7 ) / 8 ;
if index > = bitmap_length {
return None ;
}
let mut bitmap_buf : Vec < u8 > = Vec ::new ( ) ;
bitmap_buf . resize ( bitmap_length as usize , 0 ) ;
bd . seek ( bitmap_offset ) ;
let read_count = bd . read_blocks ( & mut bitmap_buf ) ;
if read_count < bitmap_length as usize {
return None ;
}
Some ( bitmap ::get_bit ( & bitmap_buf , index as usize ) = = bitmap ::SET )
}
/// Finds the first unallocated datablock and returns its index.
/// Will return None if no unallocated datablock is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_datablock ( sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < Index > {
2023-08-12 16:38:37 -07:00
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb . block_size as u64 ;
2023-07-27 09:28:06 -07:00
let bitmap_length = ( sb . data_block_count + 7 ) / 8 ;
let mut bitmap_buf : Vec < u8 > = Vec ::new ( ) ;
bitmap_buf . resize ( bitmap_length as usize , 0 ) ;
bd . seek ( bitmap_offset ) ;
let read_count = bd . read_blocks ( & mut bitmap_buf ) ;
if read_count < bitmap_length as usize {
return None ;
}
bitmap ::find_first_bit_equal_to ( & bitmap_buf , bitmap ::UNSET ) . map ( | index | index as Index )
}
2023-08-07 15:15:39 -07:00
/// Finds a number of unallocated datablocks and returns their indices.
/// Will return None if not enough unallocated datablocks are found, or if the block device cannot fill the buffer.
pub fn find_count_unallocated_datablocks ( sb : & Superblock , bd : & mut dyn BlockDevice , count : usize ) -> Option < Vec < Index > > {
2023-08-12 16:38:37 -07:00
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb . block_size as u64 ;
2023-08-07 15:15:39 -07:00
let bitmap_length = ( sb . data_block_count + 7 ) / 8 ;
let mut bitmap_buf : Vec < u8 > = Vec ::new ( ) ;
bitmap_buf . resize ( bitmap_length as usize , 0 ) ;
bd . seek ( bitmap_offset ) ;
let read_count = bd . read_blocks ( & mut bitmap_buf ) ;
if read_count < bitmap_length as usize {
return None ;
}
let mut found = Vec ::new ( ) ;
while found . len ( ) < count {
if let Some ( i ) = bitmap ::find_first_bit_equal_to ( & bitmap_buf , bitmap ::UNSET ) {
found . push ( i as Index ) ;
// set the bit so we don't find it again
bitmap ::set_bit ( & mut bitmap_buf , i , bitmap ::SET ) ;
} else {
return None ;
}
}
Some ( found )
}
2023-07-27 09:28:06 -07:00
/// Finds the first unallocated inode and returns its index.
/// Will return None if no unallocated inode is found, or if the block device cannot fill the buffer.
pub fn find_first_unallocated_inode ( sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < Index > {
2023-08-12 16:38:37 -07:00
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
2023-07-27 09:28:06 -07:00
// length is inode_count / 8 rounded up
2023-08-12 16:38:37 -07:00
let bitmap_offset = 1024 + sb . block_size as u64 + ( ( sb . data_block_count + 7 ) / 8 ) ;
2023-07-27 09:28:06 -07:00
let bitmap_length = ( sb . inode_count + 7 ) / 8 ;
let mut bitmap_buf : Vec < u8 > = Vec ::new ( ) ;
bitmap_buf . resize ( bitmap_length as usize , 0 ) ;
bd . seek ( bitmap_offset ) ;
let read_count = bd . read_blocks ( & mut bitmap_buf ) ;
if read_count < bitmap_length as usize {
return None ;
}
bitmap ::find_first_bit_equal_to ( & bitmap_buf , bitmap ::UNSET ) . map ( | index | index as Index )
}
/// Sets the allocation status of a datablock.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn set_datablock_allocation_status ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice , allocated : bool ) -> bool {
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
2023-08-12 16:38:37 -07:00
// datablock bitmap is at 1024 + block size, length is data_block_count / 8 rounded up
let bitmap_offset = 1024 + sb . block_size as u64 ;
2023-07-27 09:28:06 -07:00
let bitmap_length = ( sb . data_block_count + 7 ) / 8 ;
if index > = bitmap_length {
return false ;
}
let mut bitmap_buf : Vec < u8 > = Vec ::new ( ) ;
bitmap_buf . resize ( bitmap_length as usize , 0 ) ;
bd . seek ( bitmap_offset ) ;
let read_count = bd . read_blocks ( & mut bitmap_buf ) ;
if read_count < bitmap_length as usize {
return false ;
}
bitmap ::set_bit ( & mut bitmap_buf , index as usize , allocated ) ;
bd . seek ( bitmap_offset ) ;
let write_count = bd . write_blocks ( & bitmap_buf ) ;
write_count = = bitmap_length as usize
}
/// Sets the allocation status of an inode.
/// # Safety
/// unsafe because it does not journal the write, and does not update any other metadata.
pub unsafe fn set_inode_allocation_status ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice , allocated : bool ) -> bool {
// todo! we should maybe optimise this to only write the byte that contains the bit, instead of the whole bitmap
// todo! see how much time this saves?
2023-08-12 16:38:37 -07:00
// inode bitmap is at 1024 + block size + datablock_bitmap_length byte offset,
2023-07-27 09:28:06 -07:00
// length is inode_count / 8 rounded up
2023-08-12 16:38:37 -07:00
let bitmap_offset = 1024 + sb . block_size as u64 + ( ( sb . data_block_count + 7 ) / 8 ) ;
2023-07-27 09:28:06 -07:00
let bitmap_length = ( sb . inode_count + 7 ) / 8 ;
if index > = bitmap_length {
return false ;
}
let mut bitmap_buf : Vec < u8 > = Vec ::new ( ) ;
bitmap_buf . resize ( bitmap_length as usize , 0 ) ;
bd . seek ( bitmap_offset ) ;
let read_count = bd . read_blocks ( & mut bitmap_buf ) ;
if read_count < bitmap_length as usize {
return false ;
}
bitmap ::set_bit ( & mut bitmap_buf , index as usize , allocated ) ;
bd . seek ( bitmap_offset ) ;
let write_count = bd . write_blocks ( & bitmap_buf ) ;
write_count = = bitmap_length as usize
2023-07-31 07:43:49 -07:00
}
/// Reads a journal entry by index
pub fn read_journal_entry ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < JournalEntry > {
let mut buf : [ u8 ; core ::mem ::size_of ::< JournalEntry > ( ) ] = [ 0 ; core ::mem ::size_of ::< JournalEntry > ( ) ] ;
bd . seek ( ( sb . first_journal_block * sb . block_size as u64 ) + ( index * core ::mem ::size_of ::< JournalEntry > ( ) as u64 ) ) ;
let read_count = bd . read_blocks ( & mut buf ) ;
if read_count < core ::mem ::size_of ::< JournalEntry > ( ) {
return None ;
}
let mut entry = unsafe { core ::ptr ::read ( buf . as_ptr ( ) as * const JournalEntry ) } ;
entry . convert_big_endian_to_native ( ) ;
Some ( entry )
}
/// Performs a direct write of a journal entry to the block device.
/// # Safety
/// unsafe because it assumes that the entry is correctly formatted and that everything has been performed correctly
pub unsafe fn write_journal_entry ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice , mut entry : JournalEntry ) -> bool {
entry . convert_native_to_big_endian ( ) ;
let mut buf : [ u8 ; core ::mem ::size_of ::< JournalEntry > ( ) ] = [ 0 ; core ::mem ::size_of ::< JournalEntry > ( ) ] ;
core ::ptr ::write ( buf . as_mut_ptr ( ) as * mut JournalEntry , entry ) ;
bd . seek ( ( sb . first_journal_block * sb . block_size as u64 ) + ( index * core ::mem ::size_of ::< JournalEntry > ( ) as u64 ) ) ;
let write_count = bd . write_blocks ( & buf ) ;
write_count = = core ::mem ::size_of ::< JournalEntry > ( )
2023-08-07 15:15:39 -07:00
}
/// Checks if a journal entry has been completed.
/// Will return None if the index is out of bounds or if the block device cannot fill the buffer,
/// or if the entry is invalid
pub fn is_journal_entry_complete ( index : Index , sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < bool > {
let entry = read_journal_entry ( index , sb , bd ) ? ;
// if flags == 0, the entry is complete
const SINGLEBLOCK : u32 = JournalOperation ::SingleBlockWrite as u32 ;
const MULTIBLOCK : u32 = JournalOperation ::MultiblockWrite as u32 ;
match entry . operation {
SINGLEBLOCK = > unsafe { Some ( entry . content . block_write . flags = = 0 ) } ,
MULTIBLOCK = > unsafe { Some ( entry . content . multiblock_write . flags = = 0 ) } ,
_ = > None ,
}
}
/// Returns the index of the next unused journal entry.
/// Will loop around to the beginning of the journal if the end is reached.
pub fn next_journal_position ( sb : & Superblock , bd : & mut dyn BlockDevice ) -> Option < Index > {
2023-08-12 16:38:37 -07:00
let mut index = sb . journal_position as Index + 1 ;
2023-08-07 15:15:39 -07:00
let max_index = ( sb . journal_block_count * sb . block_size as u64 ) / core ::mem ::size_of ::< JournalEntry > ( ) as u64 ;
loop {
let entry = read_journal_entry ( index , sb , bd ) ? ;
// if flags == 0, the entry is complete
// flags should always be the same size and at the same offset in the union, so we can just check one
if unsafe { entry . content . block_write . flags = = 0 } {
return Some ( index ) ;
}
index + = 1 ;
if index > = max_index {
index = 0 ;
}
if index = = sb . journal_position as Index {
// we've looped around to the beginning of the journal
return None ;
}
}
}
2023-09-10 00:02:26 -07:00
/// Returns the index of the containing indirect block for the given dba address and the index in that indirect block, or None if the address is invalid.
pub fn get_indirect_datablock ( sb : & Superblock , bd : & mut dyn BlockDevice , list : ListBlock , address : Index ) -> Option < ( Index , Index ) > {
if address < 32 {
None
2023-08-17 17:59:56 -07:00
} else {
2023-09-10 00:02:26 -07:00
// copy pasted from structs.rs
// if greater than 32, see the following:
// let N = (maximum number of pointers in an indirect block) * 32
// (beginning...count)
// 32..N: single indirect block
// 32+N..N^2: double indirect block
// 32+N^2..N^3: triple indirect block
// 32+N^3..N^4: quadruple indirect block
// 32+N^4..N^5: quintuple indirect block
// 32+N^5..N^6: sextuple indirect block
// block index is address / (max_per_block ^ (1 if single, 2 if double, etc))
// after getting your layer's address, repeat the previous layer's process
let max_per_block = sb . block_size as u64 / 8 ;
let N = max_per_block * 32 ;
let N2 = N * N ;
let N3 = N2 * N ;
let N4 = N3 * N ;
let N5 = N4 * N ;
let N6 = N5 * N ;
let mut address = address - 32 ;
// todo: you could probably rewrite this using like recursion or smth
match address {
_ if address < N = > {
let block_index = address / max_per_block ;
let address = address % max_per_block ;
Some ( ( list . single_indirect_block_address [ block_index as usize ] , address ) )
2023-08-17 17:59:56 -07:00
}
2023-09-10 00:02:26 -07:00
_ if address < N2 = > {
let address = address - N ;
let block_index = ( address ) / ( max_per_block ^ 2 ) ;
let layer2_block = read_datablock ( list . double_indirect_block_address [ block_index as usize ] , sb , bd ) ;
let layer2_address = address / max_per_block ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer2_block [ ( layer2_address * 8 ) as usize .. ( layer2_address * 8 + 8 ) as usize ] ) ;
let layer1_block = u64 ::from_be_bytes ( buf ) ;
let layer1_address = address % max_per_block ;
Some ( ( layer1_block , layer1_address ) )
2023-08-17 17:59:56 -07:00
}
2023-09-10 00:02:26 -07:00
_ if address < N3 = > {
let address = address - N2 ;
let block_index = ( address ) / ( max_per_block ^ 3 ) ;
let layer3_block = read_datablock ( list . triple_indirect_block_address [ block_index as usize ] , sb , bd ) ;
let layer3_address = address / ( max_per_block ^ 2 ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer3_block [ ( layer3_address * 8 ) as usize .. ( layer3_address * 8 + 8 ) as usize ] ) ;
let layer2_block = u64 ::from_be_bytes ( buf ) ;
let layer2_address = ( address % ( max_per_block ^ 2 ) ) / max_per_block ;
let layer2_block = read_datablock ( layer2_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer2_block [ ( layer2_address * 8 ) as usize .. ( layer2_address * 8 + 8 ) as usize ] ) ;
let layer1_block = u64 ::from_be_bytes ( buf ) ;
let layer1_address = address % max_per_block ;
Some ( ( layer1_block , layer1_address ) )
}
_ if address < N4 = > {
let address = address - N3 ;
let block_index = ( address ) / ( max_per_block ^ 4 ) ;
let layer4_block = read_datablock ( list . quadruple_indirect_block_address [ block_index as usize ] , sb , bd ) ;
let layer4_address = address / ( max_per_block ^ 3 ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer4_block [ ( layer4_address * 8 ) as usize .. ( layer4_address * 8 + 8 ) as usize ] ) ;
let layer3_block = u64 ::from_be_bytes ( buf ) ;
let layer3_address = ( address % ( max_per_block ^ 3 ) ) / ( max_per_block ^ 2 ) ;
let layer3_block = read_datablock ( layer3_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer3_block [ ( layer3_address * 8 ) as usize .. ( layer3_address * 8 + 8 ) as usize ] ) ;
let layer2_block = u64 ::from_be_bytes ( buf ) ;
let layer2_address = ( address % ( max_per_block ^ 2 ) ) / max_per_block ;
let layer2_block = read_datablock ( layer2_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer2_block [ ( layer2_address * 8 ) as usize .. ( layer2_address * 8 + 8 ) as usize ] ) ;
let layer1_block = u64 ::from_be_bytes ( buf ) ;
let layer1_address = address % max_per_block ;
Some ( ( layer1_block , layer1_address ) )
}
_ if address < N5 = > {
let address = address - N4 ;
let block_index = ( address ) / ( max_per_block ^ 5 ) ;
let layer5_block = read_datablock ( list . quintuple_indirect_block_address [ block_index as usize ] , sb , bd ) ;
let layer5_address = address / ( max_per_block ^ 4 ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer5_block [ ( layer5_address * 8 ) as usize .. ( layer5_address * 8 + 8 ) as usize ] ) ;
let layer4_block = u64 ::from_be_bytes ( buf ) ;
let layer4_address = ( address % ( max_per_block ^ 4 ) ) / ( max_per_block ^ 3 ) ;
let layer4_block = read_datablock ( layer4_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer4_block [ ( layer4_address * 8 ) as usize .. ( layer4_address * 8 + 8 ) as usize ] ) ;
let layer3_block = u64 ::from_be_bytes ( buf ) ;
let layer3_address = ( address % ( max_per_block ^ 3 ) ) / ( max_per_block ^ 2 ) ;
let layer3_block = read_datablock ( layer3_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer3_block [ ( layer3_address * 8 ) as usize .. ( layer3_address * 8 + 8 ) as usize ] ) ;
let layer2_block = u64 ::from_be_bytes ( buf ) ;
let layer2_address = ( address % ( max_per_block ^ 2 ) ) / max_per_block ;
let layer2_block = read_datablock ( layer2_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer2_block [ ( layer2_address * 8 ) as usize .. ( layer2_address * 8 + 8 ) as usize ] ) ;
let layer1_block = u64 ::from_be_bytes ( buf ) ;
let layer1_address = address % max_per_block ;
Some ( ( layer1_block , layer1_address ) )
}
_ if address < N6 = > {
let address = address - N5 ;
let block_index = ( address ) / ( max_per_block ^ 6 ) ;
let layer6_block = read_datablock ( list . sextuple_indirect_block_address [ block_index as usize ] , sb , bd ) ;
let layer6_address = address / ( max_per_block ^ 5 ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer6_block [ ( layer6_address * 8 ) as usize .. ( layer6_address * 8 + 8 ) as usize ] ) ;
let layer5_block = u64 ::from_be_bytes ( buf ) ;
let layer5_address = ( address % ( max_per_block ^ 5 ) ) / ( max_per_block ^ 4 ) ;
let layer5_block = read_datablock ( layer5_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer5_block [ ( layer5_address * 8 ) as usize .. ( layer5_address * 8 + 8 ) as usize ] ) ;
let layer4_block = u64 ::from_be_bytes ( buf ) ;
let layer4_address = ( address % ( max_per_block ^ 4 ) ) / ( max_per_block ^ 3 ) ;
let layer4_block = read_datablock ( layer4_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer4_block [ ( layer4_address * 8 ) as usize .. ( layer4_address * 8 + 8 ) as usize ] ) ;
let layer3_block = u64 ::from_be_bytes ( buf ) ;
let layer3_address = ( address % ( max_per_block ^ 3 ) ) / ( max_per_block ^ 2 ) ;
let layer3_block = read_datablock ( layer3_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer3_block [ ( layer3_address * 8 ) as usize .. ( layer3_address * 8 + 8 ) as usize ] ) ;
let layer2_block = u64 ::from_be_bytes ( buf ) ;
let layer2_address = ( address % ( max_per_block ^ 2 ) ) / max_per_block ;
let layer2_block = read_datablock ( layer2_block , sb , bd ) ;
let mut buf : [ u8 ; 8 ] = [ 0 ; 8 ] ;
buf . copy_from_slice ( & layer2_block [ ( layer2_address * 8 ) as usize .. ( layer2_address * 8 + 8 ) as usize ] ) ;
let layer1_block = u64 ::from_be_bytes ( buf ) ;
let layer1_address = address % max_per_block ;
Some ( ( layer1_block , layer1_address ) )
}
_ = > None ,
2023-08-17 17:59:56 -07:00
}
}
}
2023-08-07 15:15:39 -07:00
/// Creates a journal entry for a single block write operation.
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
/// Returns None if the journal is full, or if the block device cannot be written to.
/// Returns the journal entry index if successful.
2023-08-12 16:38:37 -07:00
pub fn schedule_single_block_write ( sb : & Superblock , bd : & mut dyn BlockDevice , containing_inode_index : Index , target_type : JBRTargetType , otherwise_datablock_index : Option < Index > , data : & [ u8 ] ) -> Option < Index > {
2023-08-07 15:15:39 -07:00
let entry_index = next_journal_position ( sb , bd ) ? ;
let entry_content = JournalBlockWrite {
flags : 0 ,
2023-08-12 16:38:37 -07:00
target_type : target_type as u32 ,
2023-08-07 15:15:39 -07:00
target_inode : containing_inode_index ,
target_block : otherwise_datablock_index . unwrap_or ( 0 ) ,
2023-08-11 15:04:54 -07:00
real_target_block : 0 , // filled in once flushed
2023-08-07 15:15:39 -07:00
source_block : 0 , // filled in once allocated
source_block_crc32 : 0 , // filled in once allocated
} ;
let mut entry = JournalEntry {
operation : JournalOperation ::SingleBlockWrite as u32 ,
zeroed_content_crc32 : 0 ,
content : JournalEntryContents {
block_write : entry_content ,
} ,
} ;
// write the journal entry
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// find a free data block
let data_block_index = find_first_unallocated_datablock ( sb , bd ) ? ;
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry . content . block_write . source_block = data_block_index ;
entry . content . block_write . flags = JBRFlags ::Chosen as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// allocate the data block
if ! unsafe { set_datablock_allocation_status ( data_block_index , sb , bd , true ) } {
return None ;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry . content . block_write . flags = JBRFlags ::Allocated as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// write the data to the data block
if ! unsafe { write_datablock ( data_block_index , sb , bd , data ) } {
return None ;
}
let written_data = read_datablock ( data_block_index , sb , bd ) ;
// set the crc32 and stored flag
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry . content . block_write . source_block_crc32 = crc32 ::crc32 ( & written_data ) ;
entry . content . block_write . flags = JBRFlags ::Stored as u32 ;
// generate crc32 of the entry
let mut buf : [ u8 ; core ::mem ::size_of ::< JournalEntryContents > ( ) ] = [ 0 ; core ::mem ::size_of ::< JournalEntryContents > ( ) ] ;
2023-08-12 16:38:37 -07:00
let mut clone = entry . content ;
2023-08-07 15:15:39 -07:00
clone . block_write . flags = 0 ;
unsafe { core ::ptr ::write ( buf . as_mut_ptr ( ) as * mut JournalEntryContents , clone ) ; }
entry . zeroed_content_crc32 = crc32 ::crc32 ( & buf ) ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// all further steps will be performed on a journal flush
Some ( entry_index )
}
/// Creates a journal entry for a multi block write operation.
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes.
2023-09-10 00:02:26 -07:00
/// Returns None if the journal is full, if the block device cannot be written to, or if you're somehow trying to write over 2105000 terabytes of data.
2023-08-07 15:15:39 -07:00
/// Returns the journal entry index if successful.
pub fn schedule_multi_block_write ( sb : & Superblock , bd : & mut dyn BlockDevice , containing_inode_index : Index , datablock_start : Index , datablock_count : Index , data : & [ u8 ] ) -> Option < Index > {
let entry_index = next_journal_position ( sb , bd ) ? ;
let entry_content = JournalMultiblockWrite {
flags : 0 ,
target_inode : containing_inode_index ,
target_block : datablock_start ,
target_block_count : datablock_count ,
list_block : 0 ,
2023-08-11 15:59:26 -07:00
old_list_block : 0 , // filled in once flushed
2023-08-07 15:15:39 -07:00
list_block_crc32 : 0 ,
} ;
let mut entry = JournalEntry {
operation : JournalOperation ::MultiblockWrite as u32 ,
zeroed_content_crc32 : 0 ,
content : JournalEntryContents {
multiblock_write : entry_content ,
} ,
} ;
// write the journal entry
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// find a free data block for the list block
2023-08-11 15:59:26 -07:00
let list_block_indexs = find_count_unallocated_datablocks ( sb , bd , 2 ) ? ;
2023-08-07 15:15:39 -07:00
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
2023-08-11 15:59:26 -07:00
entry . content . multiblock_write . list_block = list_block_indexs [ 0 ] ;
entry . content . multiblock_write . old_list_block = list_block_indexs [ 1 ] ;
2023-08-07 15:15:39 -07:00
entry . content . multiblock_write . flags = JMWFlags ::ChosenList as u32 ;
2023-08-11 15:04:54 -07:00
// calculate the crc32
2023-08-12 16:38:37 -07:00
let mut content_cloned = entry . content ;
2023-08-11 15:04:54 -07:00
content_cloned . multiblock_write . flags = 0 ;
let mut buf : [ u8 ; core ::mem ::size_of ::< JournalEntryContents > ( ) ] = [ 0 ; core ::mem ::size_of ::< JournalEntryContents > ( ) ] ;
unsafe { core ::ptr ::write ( buf . as_mut_ptr ( ) as * mut JournalEntryContents , content_cloned ) ; }
entry . zeroed_content_crc32 = crc32 ::crc32 ( & buf ) ;
2023-08-07 15:15:39 -07:00
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// allocate the data block
2023-08-11 15:59:26 -07:00
if ! unsafe { set_datablock_allocation_status ( list_block_indexs [ 0 ] , sb , bd , true ) } {
return None ;
}
if ! unsafe { set_datablock_allocation_status ( list_block_indexs [ 1 ] , sb , bd , true ) } {
2023-08-07 15:15:39 -07:00
return None ;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry . content . multiblock_write . flags = JMWFlags ::AllocatedList as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// find the data blocks
let allocated_blocks = find_count_unallocated_datablocks ( sb , bd , datablock_count as usize ) ? ;
// create a list block
let mut list_block = ListBlock {
2023-09-10 00:02:26 -07:00
count : 0 ,
direct_block_addresses : [ 0 ; 32 ] ,
single_indirect_block_address : [ 0 ; 32 ] ,
double_indirect_block_address : [ 0 ; 32 ] ,
triple_indirect_block_address : [ 0 ; 32 ] ,
quadruple_indirect_block_address : [ 0 ; 32 ] ,
quintuple_indirect_block_address : [ 0 ; 32 ] ,
sextuple_indirect_block_address : [ 0 ; 32 ] ,
2023-08-07 15:15:39 -07:00
} ;
2023-08-11 15:59:26 -07:00
let mut old_list_block = ListBlock {
2023-09-10 00:02:26 -07:00
count : 0 ,
direct_block_addresses : [ 0 ; 32 ] ,
single_indirect_block_address : [ 0 ; 32 ] ,
double_indirect_block_address : [ 0 ; 32 ] ,
triple_indirect_block_address : [ 0 ; 32 ] ,
quadruple_indirect_block_address : [ 0 ; 32 ] ,
quintuple_indirect_block_address : [ 0 ; 32 ] ,
sextuple_indirect_block_address : [ 0 ; 32 ] ,
2023-08-11 15:59:26 -07:00
} ;
2023-08-07 15:15:39 -07:00
let mut indirect_blocks_waiting_for_allocation_to_be_set = Vec ::new ( ) ;
2023-09-10 00:02:26 -07:00
// if using indirect blocks, only fill out the dba
// otherwise, have fun!
if datablock_count > 32 {
list_block . direct_block_addresses . copy_from_slice ( & allocated_blocks [ .. 32 ] ) ;
list_block . count = 32 ;
// set the indirect blocks
let max_per_block = sb . block_size as u64 / 8 ;
let N = max_per_block * 32 ;
let N2 = N * N ;
let N3 = N2 * N ;
let N4 = N3 * N ;
let N5 = N4 * N ;
let N6 = N5 * N ;
// figure out how many blocks we need for all indirect blocks
let mut indirect_blocks_needed = 0 ;
// can we fit them all in the 32 single indirect blocks?
let dba_count = datablock_count - 32 ;
// enclosed in brackets so that you can collapse it in the IDE (:
{
if dba_count < N {
// yes we can
// how many indirect blocks do we need?
indirect_blocks_needed = ( dba_count / max_per_block ) + 1 ;
} else if dba_count < N2 {
// no, but we can fit it in the double indirect blocks
// first, fill up the single indirect blocks
indirect_blocks_needed = N / max_per_block ;
let datablocks_left = dba_count - N ;
// how many double indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N ) + 1 ;
// how many single indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / max_per_block ) + 1 ;
} else if dba_count < N3 {
// this fills up the single and double indirect blocks
indirect_blocks_needed = N / max_per_block ; // 32 single indirect blocks
indirect_blocks_needed + = N2 / N ;
let datablocks_left = dba_count - N2 ;
// how many triple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N2 ) + 1 ;
// how many double indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N ) + 1 ;
// how many single indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / max_per_block ) + 1 ;
} else if dba_count < N4 {
// this fills up the single, double, and triple indirect blocks
indirect_blocks_needed = N / max_per_block ;
indirect_blocks_needed + = N2 / N ;
indirect_blocks_needed + = N3 / N2 ;
let datablocks_left = dba_count - N3 ;
// how many quadruple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N3 ) + 1 ;
// how many triple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N2 ) + 1 ;
// how many double indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N ) + 1 ;
// how many single indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / max_per_block ) + 1 ;
} else if dba_count < N5 {
// this fills up the single, double, triple, and quadruple indirect blocks
indirect_blocks_needed = N / max_per_block ;
indirect_blocks_needed + = N2 / N ;
indirect_blocks_needed + = N3 / N2 ;
indirect_blocks_needed + = N4 / N3 ;
let datablocks_left = dba_count - N4 ;
// how many quintuple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N4 ) + 1 ;
// how many quadruple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N3 ) + 1 ;
// how many triple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N2 ) + 1 ;
// how many double indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N ) + 1 ;
// how many single indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / max_per_block ) + 1 ;
} else if dba_count < N6 {
// this fills up the single, double, triple, quadruple, and quintuple indirect blocks
indirect_blocks_needed = N / max_per_block ;
indirect_blocks_needed + = N2 / N ;
indirect_blocks_needed + = N3 / N2 ;
indirect_blocks_needed + = N4 / N3 ;
indirect_blocks_needed + = N5 / N4 ;
let datablocks_left = dba_count - N5 ;
// how many sextuple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N5 ) + 1 ;
// how many quintuple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N4 ) + 1 ;
// how many quadruple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N3 ) + 1 ;
// how many triple indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N2 ) + 1 ;
// how many double indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / N ) + 1 ;
// how many single indirect blocks do we need?
indirect_blocks_needed + = ( datablocks_left / max_per_block ) + 1 ;
} else {
// congratulations, you've attempted to write around 2105000 terabytes of data
return None ;
2023-08-19 01:37:51 -07:00
}
2023-08-07 15:15:39 -07:00
}
2023-09-10 00:02:26 -07:00
// allocate the indirect blocks
let indirect_blocks = find_count_unallocated_datablocks ( sb , bd , indirect_blocks_needed as usize ) ? ;
// fill with data
let mut i = 0 ;
let mut taken = 0 ;
let data = & allocated_blocks [ 32 .. ] ;
// once again enclosed so that you can collapse it in the IDE (:
{
fn fillwithdata_1 (
sb : & Superblock , bd : & mut dyn BlockDevice , i : & mut u64 , data : & [ Index ] ,
block_size : usize , max_per_block : u64 , indirect_blocks : & mut Vec < Index > ,
taken : & mut usize , dbas : & mut [ Index ] , max : usize ) -> bool {
for block1 in 0 .. max {
if * i > = data . len ( ) as u64 {
break ;
}
let mut buf = vec! [ 0 u8 ; block_size ] ;
let mut j = 0 ;
while j < max_per_block {
let index = * i % max_per_block ;
if * i > = data . len ( ) as u64 {
break ;
}
buf [ index as usize * 8 .. ( index as usize + 1 ) * 8 ] . copy_from_slice ( & data [ * i as usize ] . to_be_bytes ( ) ) ;
* i + = 1 ;
j + = 1 ;
}
if ! unsafe { write_datablock ( indirect_blocks [ * taken ] , sb , bd , & buf ) } {
return false ;
}
* taken + = 1 ;
dbas [ block1 ] = indirect_blocks [ * taken - 1 ] ;
2023-08-19 01:37:51 -07:00
}
2023-09-10 00:02:26 -07:00
true
}
// single indirect blocks
if ! fillwithdata_1 ( sb , bd , & mut i , data , sb . block_size as usize , max_per_block ,
& mut indirect_blocks_waiting_for_allocation_to_be_set , & mut taken ,
& mut list_block . single_indirect_block_address , 32 ) {
return None ;
}
// double indirect blocks
for block2 in 0 .. 32 {
if i > = data . len ( ) as u64 {
break ;
}
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
if ! fillwithdata_1 ( sb , bd , & mut i , data , sb . block_size as usize , max_per_block ,
& mut indirect_blocks_waiting_for_allocation_to_be_set , & mut taken ,
& mut list , max_per_block as usize ) {
2023-08-19 01:37:51 -07:00
return None ;
}
2023-09-10 00:02:26 -07:00
let buf = list . iter ( ) . map ( | x | x . to_be_bytes ( ) ) . flatten ( ) . collect ::< Vec < u8 > > ( ) ;
if ! unsafe { write_datablock ( indirect_blocks [ taken ] , sb , bd , & buf ) } {
return None ;
}
taken + = 1 ;
list_block . double_indirect_block_address [ block2 as usize ] = indirect_blocks [ taken - 1 ] ;
}
2023-08-19 01:37:51 -07:00
2023-09-10 00:02:26 -07:00
// triple indirect blocks
fn fillwithdata_2 (
sb : & Superblock , bd : & mut dyn BlockDevice , i : & mut u64 , data : & [ Index ] ,
block_size : usize , max_per_block : u64 , indirect_blocks : & mut Vec < Index > ,
taken : & mut usize , dbas : & mut [ Index ] , max : usize ) -> bool {
for block3 in 0 .. 32 {
if * i > = data . len ( ) as u64 {
break ;
}
let mut buf = vec! [ 0 u8 ; sb . block_size as usize ] ;
for block2 in 0 .. max_per_block {
if * i > = data . len ( ) as u64 {
break ;
}
let mut buf2 = vec! [ 0 u64 ; max_per_block as usize ] ;
fillwithdata_1 ( sb , bd , i , data , block_size , max_per_block ,
indirect_blocks , taken ,
& mut buf2 , max_per_block as usize ) ;
let buf2 = buf2 . iter ( ) . map ( | x | x . to_be_bytes ( ) ) . flatten ( ) . collect ::< Vec < u8 > > ( ) ;
if ! unsafe { write_datablock ( indirect_blocks [ * taken ] , sb , bd , & buf2 ) } {
return false ;
}
* taken + = 1 ;
buf [ block2 as usize * 8 .. ( block2 as usize + 1 ) * 8 ] . copy_from_slice ( & indirect_blocks [ * taken - 1 ] . to_be_bytes ( ) ) ;
}
if ! unsafe { write_datablock ( indirect_blocks [ * taken ] , sb , bd , & buf ) } {
return false ;
2023-08-19 01:37:51 -07:00
}
2023-09-10 00:02:26 -07:00
* taken + = 1 ;
dbas [ block3 as usize ] = indirect_blocks [ * taken - 1 ] ;
}
true
}
fillwithdata_2 ( sb , bd , & mut i , data , sb . block_size as usize , max_per_block ,
& mut indirect_blocks_waiting_for_allocation_to_be_set , & mut taken ,
& mut list_block . triple_indirect_block_address , 32 ) ;
// quadruple indirect blocks
for block4 in 0 .. 32 {
if i > = data . len ( ) as u64 {
break ;
}
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
fillwithdata_2 ( sb , bd , & mut i , data , sb . block_size as usize , max_per_block ,
& mut indirect_blocks_waiting_for_allocation_to_be_set , & mut taken ,
& mut list , max_per_block as usize ) ;
let buf = list . iter ( ) . map ( | x | x . to_be_bytes ( ) ) . flatten ( ) . collect ::< Vec < u8 > > ( ) ;
if ! unsafe { write_datablock ( indirect_blocks [ taken ] , sb , bd , & buf ) } {
return None ;
}
taken + = 1 ;
list_block . quadruple_indirect_block_address [ block4 as usize ] = indirect_blocks [ taken - 1 ] ;
}
2023-08-07 15:15:39 -07:00
2023-09-10 00:02:26 -07:00
// quintuple indirect blocks
for block5 in 0 .. 32 {
if i > = data . len ( ) as u64 {
break ;
}
let mut list = vec! [ 0 u8 ; max_per_block as usize ] ;
for block4 in 0 .. max_per_block {
if i > = data . len ( ) as u64 {
break ;
}
let mut buf = vec! [ 0 u64 ; max_per_block as usize ] ;
fillwithdata_2 ( sb , bd , & mut i , data , sb . block_size as usize , max_per_block ,
& mut indirect_blocks_waiting_for_allocation_to_be_set , & mut taken ,
& mut buf , max_per_block as usize ) ;
let buf = buf . iter ( ) . map ( | x | x . to_be_bytes ( ) ) . flatten ( ) . collect ::< Vec < u8 > > ( ) ;
if ! unsafe { write_datablock ( indirect_blocks [ taken ] , sb , bd , & buf ) } {
2023-08-19 01:37:51 -07:00
return None ;
}
2023-09-10 00:02:26 -07:00
taken + = 1 ;
list [ block4 as usize * 8 .. ( block4 as usize + 1 ) * 8 ] . copy_from_slice ( & indirect_blocks [ taken - 1 ] . to_be_bytes ( ) ) ;
}
if ! unsafe { write_datablock ( indirect_blocks [ taken ] , sb , bd , & buf ) } {
return None ;
}
taken + = 1 ;
list_block . quintuple_indirect_block_address [ block5 as usize ] = indirect_blocks [ taken - 1 ] ;
}
2023-08-07 15:15:39 -07:00
2023-09-10 00:02:26 -07:00
// sextuple indirect blocks
for block6 in 0 .. 32 {
if i > = data . len ( ) as u64 {
break ;
}
let mut list = vec! [ 0 u8 ; max_per_block as usize ] ;
for block5 in 0 .. max_per_block {
if i > = data . len ( ) as u64 {
break ;
2023-08-19 01:37:51 -07:00
}
2023-09-10 00:02:26 -07:00
let mut buf = vec! [ 0 u8 ; max_per_block as usize ] ;
for block4 in 0 .. max_per_block {
if i > = data . len ( ) as u64 {
break ;
}
let mut buf2 = vec! [ 0 u64 ; max_per_block as usize ] ;
fillwithdata_2 ( sb , bd , & mut i , data , sb . block_size as usize , max_per_block ,
& mut indirect_blocks_waiting_for_allocation_to_be_set , & mut taken ,
& mut buf2 , max_per_block as usize ) ;
let buf2 = buf2 . iter ( ) . map ( | x | x . to_be_bytes ( ) ) . flatten ( ) . collect ::< Vec < u8 > > ( ) ;
if ! unsafe { write_datablock ( indirect_blocks [ taken ] , sb , bd , & buf2 ) } {
return None ;
}
taken + = 1 ;
buf [ block4 as usize * 8 .. ( block4 as usize + 1 ) * 8 ] . copy_from_slice ( & indirect_blocks [ taken - 1 ] . to_be_bytes ( ) ) ;
2023-08-19 01:37:51 -07:00
}
2023-09-10 00:02:26 -07:00
if ! unsafe { write_datablock ( indirect_blocks [ taken ] , sb , bd , & buf ) } {
2023-08-19 01:37:51 -07:00
return None ;
}
2023-09-10 00:02:26 -07:00
taken + = 1 ;
list [ block5 as usize * 8 .. ( block5 as usize + 1 ) * 8 ] . copy_from_slice ( & indirect_blocks [ taken - 1 ] . to_be_bytes ( ) ) ;
2023-08-19 01:37:51 -07:00
}
2023-09-10 00:02:26 -07:00
if ! unsafe { write_datablock ( indirect_blocks [ taken ] , sb , bd , & buf ) } {
return None ;
}
taken + = 1 ;
list_block . sextuple_indirect_block_address [ block6 as usize ] = indirect_blocks [ taken - 1 ] ;
2023-08-07 15:15:39 -07:00
}
}
} else {
2023-08-12 16:38:37 -07:00
list_block . direct_block_addresses [ .. 12 ] . copy_from_slice ( & allocated_blocks [ .. 12 ] ) ;
2023-08-07 15:15:39 -07:00
}
2023-08-11 15:59:26 -07:00
// read target inode, and write the old list block
2023-08-12 16:38:37 -07:00
let target_inode = read_inode ( containing_inode_index , sb , bd ) ? ;
2023-09-10 00:02:26 -07:00
old_list_block = target_inode . listblock ;
2023-08-11 15:59:26 -07:00
// write the list blocks
2023-08-07 15:15:39 -07:00
let buf = [ 0 ; core ::mem ::size_of ::< ListBlock > ( ) ] ;
unsafe { core ::ptr ::write ( buf . as_ptr ( ) as * mut ListBlock , list_block ) ; }
2023-08-11 15:59:26 -07:00
if ! unsafe { write_datablock ( list_block_indexs [ 0 ] , sb , bd , & buf ) } {
return None ;
}
unsafe { core ::ptr ::write ( buf . as_ptr ( ) as * mut ListBlock , old_list_block ) ; }
if ! unsafe { write_datablock ( list_block_indexs [ 1 ] , sb , bd , & buf ) } {
2023-08-07 15:15:39 -07:00
return None ;
}
// set the content and then rewrite the journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry . content . multiblock_write . flags = JMWFlags ::ChosenData as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// if we're using indirect blocks, set the allocation status of the indirect blocks
for block in indirect_blocks_waiting_for_allocation_to_be_set {
if ! unsafe { set_datablock_allocation_status ( block , sb , bd , true ) } {
return None ;
}
}
// set the allocation status of the data blocks
for block in & allocated_blocks {
if ! unsafe { set_datablock_allocation_status ( * block , sb , bd , true ) } {
return None ;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry . content . multiblock_write . flags = JMWFlags ::AllocatedData as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// store the data in the data blocks
for i in 0 .. datablock_count {
if ! unsafe { write_datablock ( allocated_blocks [ i as usize ] , sb , bd , & data [ i as usize * sb . block_size as usize .. ( i as usize + 1 ) * sb . block_size as usize ] ) } {
return None ;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
entry . content . multiblock_write . flags = JMWFlags ::Stored as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , entry ) } {
return None ;
}
// return the journal entry index
Some ( entry_index )
2023-08-11 15:04:54 -07:00
}
/// Checks the integrity of a single block write journal entry
/// Returns true if the journal entry is valid, false otherwise
pub fn verify_single_block_write ( sb : & Superblock , bd : & mut dyn BlockDevice , journal_entry : & JournalEntry ) -> bool {
if journal_entry . operation ! = JournalOperation ::SingleBlockWrite as u32 {
return false ;
}
let content = unsafe { journal_entry . content . block_write } ;
if content . flags > 4 {
return false ;
}
2023-08-12 16:38:37 -07:00
let mut content_clone = journal_entry . content ;
2023-08-11 15:04:54 -07:00
content_clone . block_write . flags = 0 ;
let mut buf = [ 0 ; core ::mem ::size_of ::< JournalEntryContents > ( ) ] ;
unsafe { core ::ptr ::write ( buf . as_mut_ptr ( ) as * mut JournalEntryContents , content_clone ) ; }
let hash = crc32 ::crc32 ( & buf ) ;
if hash ! = journal_entry . zeroed_content_crc32 {
return false ;
}
// check the source data block
let buf = read_datablock ( content . source_block , sb , bd ) ;
let crc32 = crc32 ::crc32 ( & buf ) ;
if crc32 ! = content . source_block_crc32 {
return false ;
}
// should be all good! (:
true
}
/// Flushes a single block write journal entry
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes
/// or if the journal entry is corrupt
/// Returns false if the journal entry is corrupt, the block device is full, or if the block device is read only
/// Otherwise, returns true
pub fn flush_single_block_write ( sb : & Superblock , bd : & mut dyn BlockDevice , entry_index : Index ) -> bool {
// read the journal entry
2023-08-12 16:38:37 -07:00
let journal_entry = read_journal_entry ( entry_index , sb , bd ) ;
2023-08-11 15:04:54 -07:00
if journal_entry . is_none ( ) {
return false ;
}
let mut journal_entry = journal_entry . unwrap ( ) ;
// verify the journal entry
if ! verify_single_block_write ( sb , bd , & journal_entry ) {
return false ;
}
// because everything is verified, we should be able to execute steps 6 through 9 and
// not have to worry about crashes; since the journal entry is good, we can repeat these steps
// until they succeed
let content = unsafe { journal_entry . content . block_write } ;
if content . flags < 3 & & content . flags > 0 {
// source block wasn't written, this entry is corrupt
2023-08-12 16:38:37 -07:00
// set the flags to 0 so that we don't try to flush this entry again
journal_entry . content . block_write . flags = 0 ;
unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } ;
return true ;
}
let content = unsafe { journal_entry . content . block_write } ;
// if flag is 0, we don't need to do anything; return
// we will check again later to see if the flags have changed from our work
if content . flags = = 0 {
return true ;
2023-08-11 15:04:54 -07:00
}
// if flag is 3, either update inode metadata or copy the data to the destination block
if content . flags = = 3 {
2023-08-12 16:38:37 -07:00
if content . target_type = = JBRTargetType ::Inode as u32 {
2023-08-11 15:04:54 -07:00
// copy the data directly to the target inode's block
let buf = read_datablock ( content . source_block , sb , bd ) ;
let mut inode_buf : [ u8 ; core ::mem ::size_of ::< Inode > ( ) ] = [ 0 ; core ::mem ::size_of ::< Inode > ( ) ] ;
inode_buf [ 0 .. core ::mem ::size_of ::< Inode > ( ) ] . clone_from_slice ( & buf [ 0 .. core ::mem ::size_of ::< Inode > ( ) ] ) ;
let inode = unsafe { core ::ptr ::read ( inode_buf . as_ptr ( ) as * const Inode ) } ;
if ! unsafe { write_inode ( content . target_inode , sb , bd , inode ) } {
return false ;
}
2023-08-12 16:38:37 -07:00
} else if content . target_type = = JBRTargetType ::DataBlock as u32 {
2023-08-11 15:04:54 -07:00
// update inode metadata
let inode = read_inode ( content . target_inode , sb , bd ) ;
if inode . is_none ( ) {
return false ;
}
let mut inode = inode . unwrap ( ) ;
// target block is either an index into the direct blocks or an indirect block (if greater than 11)
2023-09-10 19:09:43 -07:00
if content . target_block < 32 {
let previous_block = inode . listblock . direct_block_addresses [ content . target_block as usize ] ;
2023-08-11 15:04:54 -07:00
// update the journal entry
journal_entry . content . block_write . real_target_block = previous_block ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } {
return false ;
}
2023-09-10 19:09:43 -07:00
inode . listblock . direct_block_addresses [ content . target_block as usize ] = content . source_block ;
// if target_block is greater than block_count, we need to update block_count and the inode's byte count
if content . target_block > = inode . block_count {
inode . block_count = content . target_block + 1 ;
inode . size = inode . block_count * sb . block_size as u64 ;
inode . listblock . count = inode . block_count ;
// note: we are assuming here that we are only increasing the size by 1,
// greater size increases may result in weird corruption
// (undefined dbas will be listed as part of the file, which will either result
// in garbage data or leakage from another file)
// fixme: in the future, we should check for this case and allocate blocks if needed
}
2023-08-11 15:04:54 -07:00
// update the inode
if ! unsafe { write_inode ( content . target_inode , sb , bd , inode ) } {
return false ;
}
} else {
2023-09-10 19:09:43 -07:00
let res = get_indirect_datablock (
sb , bd , inode . listblock , content . target_block ) ;
if res = = None {
return false ;
}
let ( indirect_block_addr , indirect_block_index ) = res . unwrap ( ) ;
let mut indirect_block = read_datablock ( indirect_block_addr , sb , bd ) ;
let mut buf = [ 0 u8 ; 8 ] ;
buf . copy_from_slice ( & indirect_block [ ( indirect_block_index * 8 ) as usize .. ( indirect_block_index * 8 + 8 ) as usize ] ) ;
let previous_block = u64 ::from_be_bytes ( buf ) ;
// update the journal entry
journal_entry . content . block_write . real_target_block = previous_block ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } {
return false ;
2023-08-11 15:04:54 -07:00
}
2023-09-10 19:09:43 -07:00
// update the indirect block
buf . copy_from_slice ( & content . source_block . to_be_bytes ( ) ) ;
indirect_block [ ( indirect_block_index * 8 ) as usize .. ( indirect_block_index * 8 + 8 ) as usize ] . copy_from_slice ( & buf ) ;
if ! unsafe { write_datablock ( indirect_block_addr , sb , bd , & indirect_block ) } {
return false ;
2023-08-11 15:04:54 -07:00
}
2023-09-10 19:09:43 -07:00
// if target_block is greater than block_count, we need to update block_count and the inode's byte count
if content . target_block > = inode . block_count {
inode . block_count = content . target_block + 1 ;
inode . size = inode . block_count * sb . block_size as u64 ;
inode . listblock . count = inode . block_count ;
// note: see note above
}
// update the inode
if ! unsafe { write_inode ( content . target_inode , sb , bd , inode ) } {
2023-08-11 15:04:54 -07:00
return false ;
}
2023-09-10 19:09:43 -07:00
2023-08-11 15:04:54 -07:00
}
2023-08-12 16:38:37 -07:00
} else if content . target_type = = JBRTargetType ::Disk as u32 {
// copy the data directly to the offset on the disk
let buf = read_datablock ( content . source_block , sb , bd ) ;
bd . seek ( content . target_block * sb . block_size as u64 ) ;
bd . write_blocks ( & buf ) ;
2023-08-11 15:04:54 -07:00
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry . content . block_write . flags = JMWFlags ::Written as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } {
return false ;
}
}
let content = unsafe { journal_entry . content . block_write } ;
// if flag is 4, deallocate the source block
if content . flags = = 4 {
2023-08-12 16:38:37 -07:00
if content . target_type = = JBRTargetType ::Inode as u32 {
2023-08-11 15:04:54 -07:00
let block_to_deallocate = content . source_block ; // data was copied
if ! unsafe { set_datablock_allocation_status ( block_to_deallocate , sb , bd , false ) } {
return false ;
}
} else {
let block_to_deallocate = content . real_target_block ; // data was moved, this should contain the old block
if ! unsafe { set_datablock_allocation_status ( block_to_deallocate , sb , bd , false ) } {
return false ;
}
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry . content . block_write . flags = JMWFlags ::CompleteAndDeallocated as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } {
return false ;
}
}
let content = unsafe { journal_entry . content . block_write } ;
// if flag is 0, move the journal head to the next entry
if content . flags = = 0 {
2023-08-12 16:38:37 -07:00
// superblock may have changed, read it again
let sb = get_superblock ( bd ) ;
if sb . is_none ( ) {
return false ;
}
let sb = sb . as_ref ( ) . unwrap ( ) ;
2023-08-11 15:04:54 -07:00
let head = sb . journal_position ;
let mut next = head + 1 ;
let max_index = ( ( sb . journal_block_count * sb . block_size as u64 ) / core ::mem ::size_of ::< JournalEntry > ( ) as u64 ) as u32 ;
if next > = max_index {
next = 0 ;
}
// write superblock
let mut sb = * sb ;
sb . journal_position = next ;
if ! unsafe { write_superblock ( sb , bd ) } {
return false ;
}
}
2023-08-11 15:59:26 -07:00
true
}
/// Checks the integrity of a multi block write journal entry
/// Returns true if the journal entry is valid, false otherwise
pub fn verify_multi_block_write ( sb : & Superblock , bd : & mut dyn BlockDevice , journal_entry : & JournalEntry ) -> bool {
if journal_entry . operation ! = JournalOperation ::MultiblockWrite as u32 {
return false ;
}
let content = unsafe { journal_entry . content . multiblock_write } ;
if content . flags > 6 {
return false ;
}
2023-08-12 16:38:37 -07:00
let mut content_clone = journal_entry . content ;
2023-08-11 15:59:26 -07:00
content_clone . multiblock_write . flags = 0 ;
let mut buf = [ 0 ; core ::mem ::size_of ::< JournalEntryContents > ( ) ] ;
unsafe { core ::ptr ::write ( buf . as_mut_ptr ( ) as * mut JournalEntryContents , content_clone ) ; }
let hash = crc32 ::crc32 ( & buf ) ;
if hash ! = journal_entry . zeroed_content_crc32 {
return false ;
}
// check the source data block
let buf = read_datablock ( content . list_block , sb , bd ) ;
let crc32 = crc32 ::crc32 ( & buf ) ;
if crc32 ! = content . list_block_crc32 {
return false ;
}
// should be all good! (:
true
}
/// Flushes a multi block write journal entry
/// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes
/// or if the journal entry is corrupt
/// Returns false if the journal entry is corrupt, the block device is full, or if the block device is read only
/// Otherwise, returns true
pub fn flush_multi_block_write ( sb : & Superblock , bd : & mut dyn BlockDevice , entry_index : Index ) -> bool {
// read the journal entry
2023-08-12 16:38:37 -07:00
let journal_entry = read_journal_entry ( entry_index , sb , bd ) ;
2023-08-11 15:59:26 -07:00
if journal_entry . is_none ( ) {
return false ;
}
let mut journal_entry = journal_entry . unwrap ( ) ;
// verify the journal entry
if ! verify_multi_block_write ( sb , bd , & journal_entry ) {
return false ;
}
// because everything is verified, we should be able to execute steps 8 through 11 and
// not have to worry about crashes; since the journal entry is good, we can repeat these steps
// until they succeed
let content = unsafe { journal_entry . content . multiblock_write } ;
if content . flags < 5 & & content . flags > 0 {
// source block wasn't written, this entry is corrupt
2023-08-12 16:38:37 -07:00
// set the flags to 0 so that we don't try to flush this entry again
journal_entry . content . multiblock_write . flags = 0 ;
unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } ;
return true ;
}
let content = unsafe { journal_entry . content . multiblock_write } ;
// if flag is 0, we don't need to do anything; return
// we will check again later to see if the flags have changed from our work
if content . flags = = 0 {
return true ;
2023-08-11 15:59:26 -07:00
}
// if flag is 5, copy current data to old list block and then overwrite with new data
if content . flags = = 5 {
let inode = read_inode ( content . target_inode , sb , bd ) ;
if inode . is_none ( ) {
return false ;
}
let inode = inode . unwrap ( ) ;
// get dbas of new list block
let buf = read_datablock ( content . list_block , sb , bd ) ;
let list_block = unsafe { core ::ptr ::read ( buf . as_ptr ( ) as * const ListBlock ) } ;
// update inode
let mut inode = inode ;
2023-09-10 19:09:43 -07:00
inode . listblock = list_block ;
2023-08-11 15:59:26 -07:00
if ! unsafe { write_inode ( content . target_inode , sb , bd , inode ) } {
return false ;
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry . content . multiblock_write . flags = JMWFlags ::Written as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } {
return false ;
}
}
let content = unsafe { journal_entry . content . multiblock_write } ;
2023-08-21 22:04:10 -07:00
// if flag is 6, we will deallocate all blocks in the old list block (using stupid method so that it's faster)
2023-08-11 15:59:26 -07:00
if content . flags = = 6 {
2023-08-21 22:04:10 -07:00
let mut unused_datablocks : Vec < Index > = Vec ::new ( ) ;
2023-08-11 15:59:26 -07:00
let list_block = read_datablock ( content . list_block , sb , bd ) ;
let list_block = unsafe { core ::ptr ::read ( list_block . as_ptr ( ) as * const ListBlock ) } ;
let old_list_block = read_datablock ( content . old_list_block , sb , bd ) ;
let old_list_block = unsafe { core ::ptr ::read ( old_list_block . as_ptr ( ) as * const ListBlock ) } ;
2023-08-21 22:04:10 -07:00
2023-09-10 19:09:43 -07:00
let mut taken = 0 ;
for i in 0 .. 32 {
if old_list_block . count > i {
unused_datablocks . push ( old_list_block . direct_block_addresses [ i as usize ] ) ;
taken + = 1 ;
2023-08-21 22:04:10 -07:00
}
}
2023-09-10 19:09:43 -07:00
let max_per_block = sb . block_size as u64 / 8 ;
let N = max_per_block * 32 ;
let N2 = N * N ;
let N3 = N2 * N ;
let N4 = N3 * N ;
let N5 = N4 * N ;
let N6 = N5 * N ;
// enclosed to make collapsable
// todo! we should refactor this code eventually so that we don't have
2023-09-11 22:49:50 -07:00
// todo! multi-hundred line long things cluttering everything like this
2023-09-10 19:09:43 -07:00
{
fn pushunused1 (
sb : & Superblock , bd : & mut dyn BlockDevice , list_block : & [ Index ] , old_count : usize ,
unused_datablocks : & mut Vec < Index > , taken : & mut usize , max1 : usize , max2 : usize ,
max_per_block : u64 ) -> bool {
for block2 in 0 .. max2 {
if * taken > = old_count {
break ;
}
let buf = read_datablock ( list_block [ block2 ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if * taken + j as usize > = old_count {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
}
j + = 1 ;
}
for block1 in 0 .. max1 {
if unused_datablocks . len ( ) > = * taken {
break ;
}
let buf = read_datablock ( list [ block1 ] , sb , bd ) ;
let mut j = 0 ;
while j < max_per_block {
if unused_datablocks . len ( ) > = * taken {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
unused_datablocks . push ( dba ) ;
* taken + = 1 ;
}
j + = 1 ;
}
}
unused_datablocks . push ( list_block [ block2 ] ) ;
* taken + = 1 ;
}
true
}
// double indirect blocks
if ! pushunused1 (
sb , bd , & old_list_block . double_indirect_block_address , old_list_block . count as usize ,
& mut unused_datablocks , & mut taken , max_per_block as usize , 32 , max_per_block ) {
return false ;
}
// triple indirect blocks
fn pushunused2 (
sb : & Superblock , bd : & mut dyn BlockDevice , list_block : & [ Index ] , old_count : usize ,
unused_datablocks : & mut Vec < Index > , taken : & mut usize , max1 : usize , max2 : usize ,
max3 : usize , max_per_block : u64 ) -> bool {
for block3 in 0 .. max3 {
if * taken > = old_count {
break ;
}
let buf = read_datablock ( list_block [ block3 ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if * taken + j as usize > = old_count {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
}
j + = 1 ;
}
if ! pushunused1 (
sb , bd , & list , old_count ,
unused_datablocks , taken , max1 , max2 , max_per_block ) {
return false ;
}
unused_datablocks . push ( list_block [ block3 ] ) ;
* taken + = 1 ;
}
true
}
if ! pushunused2 (
sb , bd , & old_list_block . triple_indirect_block_address , old_list_block . count as usize ,
& mut unused_datablocks , & mut taken , max_per_block as usize , max_per_block as usize , 32 , max_per_block ) {
return false ;
}
// quadruple indirect blocks
for block4 in 0 .. 32 {
if taken > = old_list_block . count as usize {
break ;
}
let buf = read_datablock ( old_list_block . quadruple_indirect_block_address [ block4 as usize ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
}
j + = 1 ;
}
if ! pushunused2 (
sb , bd , & list , old_list_block . count as usize ,
& mut unused_datablocks , & mut taken , max_per_block as usize , max_per_block as usize , max_per_block as usize , max_per_block ) {
return false ;
}
unused_datablocks . push ( old_list_block . quadruple_indirect_block_address [ block4 as usize ] ) ;
taken + = 1 ;
}
// quintuple indirect blocks
for block5 in 0 .. 32 {
if taken > = old_list_block . count as usize {
break ;
}
let buf = read_datablock ( old_list_block . quintuple_indirect_block_address [ block5 as usize ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
}
j + = 1 ;
}
for block4 in 0 .. max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let buf = read_datablock ( list [ block4 as usize ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
2023-08-11 15:59:26 -07:00
}
2023-09-10 19:09:43 -07:00
j + = 1 ;
}
if ! pushunused2 (
sb , bd , & list , old_list_block . count as usize ,
& mut unused_datablocks , & mut taken , max_per_block as usize , max_per_block as usize , max_per_block as usize , max_per_block ) {
return false ;
2023-08-11 15:59:26 -07:00
}
2023-09-10 19:09:43 -07:00
unused_datablocks . push ( list [ block4 as usize ] ) ;
taken + = 1 ;
}
unused_datablocks . push ( old_list_block . quintuple_indirect_block_address [ block5 as usize ] ) ;
taken + = 1 ;
}
// sextuple indirect blocks
for block6 in 0 .. 32 {
if taken > = old_list_block . count as usize {
break ;
}
let buf = read_datablock ( old_list_block . sextuple_indirect_block_address [ block6 as usize ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if taken > = old_list_block . count as usize {
2023-08-21 22:04:10 -07:00
break ;
2023-08-11 15:59:26 -07:00
}
2023-09-10 19:09:43 -07:00
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
}
j + = 1 ;
2023-08-11 15:59:26 -07:00
}
2023-09-10 19:09:43 -07:00
for block5 in 0 .. max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let buf = read_datablock ( list [ block5 as usize ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
}
j + = 1 ;
}
for block4 in 0 .. max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let buf = read_datablock ( list [ block4 as usize ] , sb , bd ) ;
let mut j = 0 ;
let mut list = vec! [ 0 u64 ; max_per_block as usize ] ;
while j < max_per_block {
if taken > = old_list_block . count as usize {
break ;
}
let mut buf2 = [ 0 u8 ; 8 ] ;
buf2 . copy_from_slice ( & buf [ j as usize * 8 .. ( j * 8 + 8 ) as usize ] ) ;
let dba = u64 ::from_be_bytes ( buf2 ) ;
if dba ! = 0 {
list [ j as usize ] = dba ;
}
j + = 1 ;
}
if ! pushunused2 (
sb , bd , & list , old_list_block . count as usize ,
& mut unused_datablocks , & mut taken , max_per_block as usize , max_per_block as usize , max_per_block as usize , max_per_block ) {
return false ;
}
unused_datablocks . push ( list [ block4 as usize ] ) ;
taken + = 1 ;
}
unused_datablocks . push ( list [ block5 as usize ] ) ;
taken + = 1 ;
}
unused_datablocks . push ( old_list_block . sextuple_indirect_block_address [ block6 as usize ] ) ;
taken + = 1 ;
2023-08-11 15:59:26 -07:00
}
}
// deallocate unused blocks
for dba in unused_datablocks {
if ! unsafe { set_datablock_allocation_status ( dba , sb , bd , false ) } {
return false ;
}
}
// deallocate old list block
if ! unsafe { set_datablock_allocation_status ( content . old_list_block , sb , bd , false ) } {
return false ;
}
// deallocate list block
if ! unsafe { set_datablock_allocation_status ( content . list_block , sb , bd , false ) } {
return false ;
}
// update journal entry
// note: cLion incorrectly says that this is unsafe, writing to a union is safe
journal_entry . content . multiblock_write . flags = JMWFlags ::CompleteAndDeallocated as u32 ;
if ! unsafe { write_journal_entry ( entry_index , sb , bd , journal_entry ) } {
return false ;
}
}
let content = unsafe { journal_entry . content . multiblock_write } ;
// if flag is 0, move the journal head to the next entry
if content . flags = = 0 {
let head = sb . journal_position ;
let mut next = head + 1 ;
let max_index = ( ( sb . journal_block_count * sb . block_size as u64 ) / core ::mem ::size_of ::< JournalEntry > ( ) as u64 ) as u32 ;
if next > = max_index {
next = 0 ;
}
// write superblock
let mut sb = * sb ;
sb . journal_position = next ;
if ! unsafe { write_superblock ( sb , bd ) } {
return false ;
}
}
2023-08-11 15:04:54 -07:00
true
2023-08-12 16:38:37 -07:00
}
#[ derive(Copy, Clone, Debug, PartialEq, Eq) ]
pub enum JournaledWriteResult {
Success ,
OutOfDiskSpace ,
UnderlyingBlockDeviceError ,
PotentialFilesystemCorruption ,
}
/// flushes all pending journal entries until the index is reached
/// if plus_one is true, then the index is inclusive, otherwise it is exclusive
/// returns true if the index was reached, false if the index was not reached
pub fn flush_count_entries ( sb : & Superblock , bd : & mut dyn BlockDevice , mut to : Index , plus_one : bool ) -> bool {
let mut sb = * sb ;
let mut head = sb . journal_position as Index ;
let max_index = ( ( sb . journal_block_count * sb . block_size as u64 ) / core ::mem ::size_of ::< JournalEntry > ( ) as u64 ) as Index ;
if head > = max_index {
head = 0 ;
}
if plus_one {
to + = 1 ;
}
if to > = max_index {
if plus_one {
while to > = max_index {
to - = max_index ;
}
} else {
return false ; // please no infinite loops (:
}
}
while head ! = to {
let entry = read_journal_entry ( head , & sb , bd ) ;
if entry . is_none ( ) {
head + = 1 ;
if head > = max_index {
head = 0 ;
}
continue ;
}
let entry = entry . unwrap ( ) ;
const SINGLE_BLOCK_WRITE : u32 = JournalOperation ::SingleBlockWrite as u32 ;
const MULTI_BLOCK_WRITE : u32 = JournalOperation ::MultiblockWrite as u32 ;
match entry . operation {
SINGLE_BLOCK_WRITE = > {
flush_single_block_write ( & sb , bd , head ) ;
}
MULTI_BLOCK_WRITE = > {
flush_multi_block_write ( & sb , bd , head ) ;
}
_ = > { }
}
// reread superblock
let sb_opt = get_superblock ( bd ) ;
if sb_opt . is_none ( ) {
return false ;
}
sb = sb_opt . unwrap ( ) ;
head + = 1 ;
if head > = max_index {
head = 0 ;
}
}
true
}
/// attempts to figure out why we couldn't create a journal entry, and returns success if it was able to resolve the issue
pub fn why_cant_make_journal_entry ( sb : & Superblock , bd : & mut dyn BlockDevice ) -> JournaledWriteResult {
if find_first_unallocated_datablock ( sb , bd ) . is_none ( ) {
return JournaledWriteResult ::OutOfDiskSpace ;
} else {
// the journal is probably full, flush the current entry
let current_entry = read_journal_entry ( sb . journal_position as Index , sb , bd ) ;
if current_entry . is_none ( ) {
return JournaledWriteResult ::UnderlyingBlockDeviceError ;
}
let current_entry = current_entry . unwrap ( ) ;
const SINGLE_BLOCK_WRITE : u32 = JournalOperation ::SingleBlockWrite as u32 ;
const MULTI_BLOCK_WRITE : u32 = JournalOperation ::MultiblockWrite as u32 ;
match current_entry . operation {
SINGLE_BLOCK_WRITE = > {
if ! flush_single_block_write ( sb , bd , sb . journal_position as Index ) {
return JournaledWriteResult ::PotentialFilesystemCorruption ;
}
}
MULTI_BLOCK_WRITE = > {
if ! flush_multi_block_write ( sb , bd , sb . journal_position as Index ) {
return JournaledWriteResult ::PotentialFilesystemCorruption ;
}
}
_ = > {
return JournaledWriteResult ::PotentialFilesystemCorruption ;
}
}
}
JournaledWriteResult ::Success
}
/// "safely" overwrites the contents of the superblock with the given superblock struct
/// # Safety
/// this function is unsafe because it writes to the superblock, which is a critical part of the filesystem
/// the writes will be journaled, but if the superblock becomes corrupted then that will not matter
pub unsafe fn journaled_write_superblock ( current_superblock : & Superblock , bd : & mut dyn BlockDevice , new_superblock : Superblock , flush_immediately : bool ) -> JournaledWriteResult {
// convert superblock to buffer
let buf = [ 0 u8 ; core ::mem ::size_of ::< Superblock > ( ) ] ;
unsafe { core ::ptr ::write ( buf . as_ptr ( ) as * mut Superblock , new_superblock ) } ;
// create journal entry
let mut journal_entry = schedule_single_block_write (
current_superblock , bd , 0 ,
JBRTargetType ::Disk , Some ( 1024 ) ,
& buf ,
) ;
// if none...
if journal_entry . is_none ( ) {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry ( current_superblock , bd ) ;
if why ! = JournaledWriteResult ::Success {
return why ;
}
// try again
journal_entry = schedule_single_block_write (
current_superblock , bd , 0 ,
JBRTargetType ::Disk , Some ( 1024 ) ,
& buf ,
) ;
if journal_entry . is_none ( ) {
return JournaledWriteResult ::UnderlyingBlockDeviceError ;
}
}
let journal_entry = journal_entry . unwrap ( ) ;
// if flush_immediately is true, flush all writes until the journal entry is complete
#[ allow(clippy::collapsible_if) ] // this is more readable
if flush_immediately {
if ! flush_count_entries ( current_superblock , bd , journal_entry , true ) {
return JournaledWriteResult ::PotentialFilesystemCorruption ;
}
}
JournaledWriteResult ::Success
}
/// overwrites the contents of an inode with the given inode struct
/// if you want to update the contents of an inode, this is the function you want
pub fn journaled_write_inode ( sb : & Superblock , bd : & mut dyn BlockDevice , old_inode : Index , new_inode : Inode , flush_immediately : bool ) -> JournaledWriteResult {
// convert inode to buffer
let buf = [ 0 u8 ; core ::mem ::size_of ::< Inode > ( ) ] ;
unsafe { core ::ptr ::write ( buf . as_ptr ( ) as * mut Inode , new_inode ) } ;
// create journal entry
let mut journal_entry = schedule_single_block_write (
sb , bd , old_inode ,
JBRTargetType ::Inode , None ,
& buf ,
) ;
// if none...
if journal_entry . is_none ( ) {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry ( sb , bd ) ;
if why ! = JournaledWriteResult ::Success {
return why ;
}
// try again
journal_entry = schedule_single_block_write (
sb , bd , old_inode ,
JBRTargetType ::Inode , None ,
& buf ,
) ;
if journal_entry . is_none ( ) {
return JournaledWriteResult ::UnderlyingBlockDeviceError ;
}
}
let journal_entry = journal_entry . unwrap ( ) ;
// if flush_immediately is true, flush all writes until the journal entry is complete
#[ allow(clippy::collapsible_if) ] // this is more readable
if flush_immediately {
if ! flush_count_entries ( sb , bd , journal_entry , true ) {
return JournaledWriteResult ::PotentialFilesystemCorruption ;
}
}
JournaledWriteResult ::Success
}
2023-08-21 22:04:10 -07:00
/// writes data blocks of an inode to the disk, uses single block writes
2023-08-12 16:38:37 -07:00
/// if you want to write data to the disk, this is likely the function you want
/// # Important Node
/// if data.len() is not a multiple of the block size, undefined behavior may occur
pub fn journaled_write_data ( sb : & Superblock , bd : & mut dyn BlockDevice , inode : Index , from_block : Index , data : & [ u8 ] , flush_immediately : bool ) -> JournaledWriteResult {
// create journal entry
2023-08-21 22:04:10 -07:00
let mut journal_entries = {
let mut journal_entries = Vec ::new ( ) ;
for i in 0 .. ( data . len ( ) / sb . block_size as usize ) {
journal_entries . push ( schedule_single_block_write (
2023-08-12 16:38:37 -07:00
sb , bd , inode ,
JBRTargetType ::DataBlock , Some ( from_block ) ,
data ,
2023-08-21 22:04:10 -07:00
) ) ;
}
journal_entries
} ;
2023-08-12 16:38:37 -07:00
2023-08-21 22:04:10 -07:00
while let Some ( mut journal_entry ) = journal_entries . pop ( ) {
// if none...
2023-08-12 16:38:37 -07:00
if journal_entry . is_none ( ) {
2023-08-21 22:04:10 -07:00
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry ( sb , bd ) ;
if why ! = JournaledWriteResult ::Success {
return why ;
}
// try again
2023-09-11 22:49:50 -07:00
journal_entry = schedule_single_block_write (
2023-08-21 22:04:10 -07:00
sb , bd , inode ,
JBRTargetType ::DataBlock , Some ( from_block ) ,
data ,
2023-09-11 22:49:50 -07:00
) ;
2023-08-21 22:04:10 -07:00
if journal_entry . is_none ( ) {
return JournaledWriteResult ::UnderlyingBlockDeviceError ;
}
2023-08-12 16:38:37 -07:00
}
2023-08-21 22:04:10 -07:00
let journal_entry = journal_entry . unwrap ( ) ;
2023-08-12 16:38:37 -07:00
2023-08-21 22:04:10 -07:00
// if flush_immediately is true, flush all writes until the journal entry is complete
#[ allow(clippy::collapsible_if) ] // this is more readable
if flush_immediately {
if ! flush_count_entries ( sb , bd , journal_entry , true ) {
return JournaledWriteResult ::PotentialFilesystemCorruption ;
}
2023-08-12 16:38:37 -07:00
}
}
JournaledWriteResult ::Success
2023-07-25 07:03:06 -07:00
}