diff --git a/src/lib.rs b/src/lib.rs index 0c7b23e..52a4506 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,6 +2,7 @@ extern crate alloc; +use alloc::collections::VecDeque; use alloc::vec; use alloc::vec::Vec; use vapfs::{BlockDevice, Index}; @@ -295,19 +296,19 @@ pub fn next_journal_position(sb: &Superblock, bd: &mut dyn BlockDevice) -> Optio } } -/// Reads an indirect datablock from a list of 3 indirect datablocks. -pub fn read_indirect_datablock(sb: &Superblock, bd: &mut dyn BlockDevice, dbas: [Index; 12], address: Index) -> Vec { +/// Returns the index of an indirectly indexed datablock, or 0 if it does not exist. +pub fn get_indirect_datablock(sb: &Superblock, bd: &mut dyn BlockDevice, dbas: [Index; 12], address: Index) -> Index { if address < 12 { - return read_datablock(dbas[address as usize], sb, bd); + dbas[address as usize] } else { - let mut n = address - 12; + let n = address - 12; let mut blocks_left = n / (sb.block_size as u64 - 8); let mut indexes_left = n % (sb.block_size as u64 - 8); let mut current_block = dbas[9]; // first indirect block let mut visited = vec![]; loop { if visited.contains(¤t_block) { - return vec![]; // we've looped around + return 0; } visited.push(current_block); let mut head = 0; @@ -329,7 +330,7 @@ pub fn read_indirect_datablock(sb: &Superblock, bd: &mut dyn BlockDevice, dbas: } else { // if indexes_left == 0, we found the correct index if indexes_left == 0 { - return read_datablock(ptr, sb, bd); + return ptr; } else { indexes_left -= 1; } @@ -358,7 +359,7 @@ pub fn read_indirect_datablock(sb: &Superblock, bd: &mut dyn BlockDevice, dbas: } else { // if indexes_left == 0, we found the correct index if indexes_left == 0 { - return read_datablock(ptr, sb, bd); + return ptr; } else { indexes_left -= 1; } @@ -531,43 +532,119 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con // if using indirect blocks, fit the remaining entries into the indirect blocks // layout is u64 count followed by u64 addresses - let max_addresses_per_block = (sb.block_size as usize - core::mem::size_of::()) / core::mem::size_of::(); + let max_addresses_per_block = (sb.block_size as usize - core::mem::size_of::()) / (core::mem::size_of::() * 2); let mut indirect_block_count = (datablock_count - 9) / max_addresses_per_block as u64; // if the count is not a multiple of the max addresses per block, add one if (datablock_count - 9) % max_addresses_per_block as u64 != 0 { indirect_block_count += 1; } - // if the count is over 3, return None - if indirect_block_count > 3 { - return None; - } + // if the count is over 3, we'll need to use nested indirect blocks + // calculate how many layers of indirect blocks we'll need, + // filling max_addresses per block until we have less than max_addresses_per_block left + // this will be the amount of layers required to store the data + let depth = { + let mut depth = 0; + let mut remaining = indirect_block_count; + while remaining > max_addresses_per_block as u64 { + remaining -= max_addresses_per_block as u64; + depth += 1; + } + depth + }; // allocate the indirect blocks - let indirect_blocks = find_count_unallocated_datablocks(sb, bd, indirect_block_count as usize)?; - for i in 0..indirect_block_count { - list_block.direct_block_addresses[9 + i as usize] = indirect_blocks[i as usize]; - indirect_blocks_waiting_for_allocation_to_be_set.push(indirect_blocks[i as usize]); + let indirect_blocks = find_count_unallocated_datablocks(sb, bd, indirect_block_count as usize + (depth * max_addresses_per_block))?; + for i in 0..(indirect_block_count as usize + (depth * max_addresses_per_block)) { + list_block.direct_block_addresses[9 + i] = indirect_blocks[i]; + indirect_blocks_waiting_for_allocation_to_be_set.push(indirect_blocks[i]); } // write the indirect blocks let mut indirect_block_data = vec![0; core::mem::size_of::() * max_addresses_per_block]; - for i in 0..indirect_block_count { - // write the count - let count = if i == indirect_block_count - 1 { - (datablock_count - 9) % max_addresses_per_block as u64 + let mut indirect_blocks_from_previous_layer = VecDeque::new(); + let mut indirect_blocks_from_previous_layer_alt = Vec::new(); + let mut using_alt = false; + let count_per_layer = 16 / (max_addresses_per_block - 8); + for i in 0..(indirect_block_count as usize + (depth * max_addresses_per_block)) { + // we will write the indirect blocks that contain the data blocks first + // then we will write the indirect blocks that contain the indirect blocks + + // are we writing the indirect blocks that contain the data blocks? + let writing_data_blocks = i < indirect_block_count as usize; + if writing_data_blocks { + let count = if i == (indirect_block_count - 1) as usize { // if we're at the last block, not all of the addresses will be used + (datablock_count - 9) % max_addresses_per_block as u64 + } else { + max_addresses_per_block as u64 // otherwise, all of the addresses will be used + }; + // add count + unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count); } + // add addresses + for j in 0..count { + unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + j as usize), allocated_blocks[9 + i * max_addresses_per_block + j as usize]); } + } + + // write the indirect block + if !unsafe { write_datablock(indirect_blocks[i], sb, bd, &indirect_block_data) } { + return None; + } + + indirect_blocks_from_previous_layer.push_back(indirect_blocks[i]); } else { - max_addresses_per_block as u64 - }; - unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count); } + // we're writing the indirect blocks that contain the indirect blocks + if !using_alt { + // write addresses from front of indirect_blocks_from_previous_layer + let count = if indirect_blocks_from_previous_layer.len() > max_addresses_per_block - 8 { + max_addresses_per_block - 8 + } else { + indirect_blocks_from_previous_layer.len() + }; + // add count + unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count as u64); } + // add addresses + for j in 0..count { + unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + j), indirect_blocks_from_previous_layer.pop_front().unwrap_or(0)); } + } - // write the addresses - for j in 0..count { - unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).offset(1 + j as isize), allocated_blocks[(9 + i) as usize + j as usize]); } - } + // write the indirect block + if !unsafe { write_datablock(indirect_blocks[i], sb, bd, &indirect_block_data) } { + return None; + } - // write the data - if !unsafe { write_datablock(list_block.direct_block_addresses[9 + i as usize], sb, bd, &indirect_block_data) } { - return None; + // add the indirect block to the back of indirect_blocks_from_previous_layer_alt + indirect_blocks_from_previous_layer_alt.push(indirect_blocks[i]); + + // if indirect_blocks_from_previous_layer is empty, switch to using_alt + if indirect_blocks_from_previous_layer.is_empty() { + using_alt = true; + } + } else { + // write addresses from front of indirect_blocks_from_previous_layer_alt + let count = if indirect_blocks_from_previous_layer_alt.len() > max_addresses_per_block - 8 { + max_addresses_per_block - 8 + } else { + indirect_blocks_from_previous_layer_alt.len() + }; + // add count + unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count as u64); } + // add addresses + for j in 0..count { + unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + j), indirect_blocks_from_previous_layer_alt.pop().unwrap_or(0)); } + } + + // write the indirect block + if !unsafe { write_datablock(indirect_blocks[i], sb, bd, &indirect_block_data) } { + return None; + } + + // add the indirect block to the back of indirect_blocks_from_previous_layer + indirect_blocks_from_previous_layer.push_back(indirect_blocks[i]); + + // if indirect_blocks_from_previous_layer_alt is empty, switch to using_alt + if indirect_blocks_from_previous_layer_alt.is_empty() { + using_alt = false; + } + } } } } else {