more stuff for infinite dbas

This commit is contained in:
husky 2023-08-21 22:04:10 -07:00
parent 4214816e3f
commit e7d5daa623
No known key found for this signature in database
GPG key ID: 6B3D8CB511646891
2 changed files with 103 additions and 114 deletions

View file

@ -562,9 +562,10 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
// write the indirect blocks
let mut indirect_block_data = vec![0; core::mem::size_of::<u64>() * max_addresses_per_block];
let mut indirect_blocks_from_previous_layer = VecDeque::new();
let mut indirect_blocks_from_previous_layer_alt = Vec::new();
let mut indirect_blocks_from_previous_layer_alt = VecDeque::new();
let mut using_alt = false;
let count_per_layer = 16 / (max_addresses_per_block - 8);
let mut acc: VecDeque<u64> = VecDeque::new(); // how much each previous layer has had
let mut acc_alt: VecDeque<u64> = VecDeque::new(); // how much each previous layer has had
for i in 0..(indirect_block_count as usize + (depth * max_addresses_per_block)) {
// we will write the indirect blocks that contain the data blocks first
// then we will write the indirect blocks that contain the indirect blocks
@ -590,9 +591,11 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
}
indirect_blocks_from_previous_layer.push_back(indirect_blocks[i]);
acc.push_back(count);
} else {
// we're writing the indirect blocks that contain the indirect blocks
if !using_alt {
// write addresses from front of indirect_blocks_from_previous_layer
let count = if indirect_blocks_from_previous_layer.len() > max_addresses_per_block - 8 {
max_addresses_per_block - 8
@ -603,7 +606,10 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count as u64); }
// add addresses
for j in 0..count {
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + j), indirect_blocks_from_previous_layer.pop_front().unwrap_or(0)); }
// get acc value
let acc_val = acc.pop_front().unwrap_or(0);
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + (j * 16)), acc_val); }
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + 8 + (j * 16)), indirect_blocks_from_previous_layer.pop_front().unwrap_or(0)); }
}
// write the indirect block
@ -612,7 +618,8 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
}
// add the indirect block to the back of indirect_blocks_from_previous_layer_alt
indirect_blocks_from_previous_layer_alt.push(indirect_blocks[i]);
indirect_blocks_from_previous_layer_alt.push_back(indirect_blocks[i]);
acc_alt.push_back(count as u64);
// if indirect_blocks_from_previous_layer is empty, switch to using_alt
if indirect_blocks_from_previous_layer.is_empty() {
@ -629,7 +636,10 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
unsafe { core::ptr::write(indirect_block_data.as_mut_ptr() as *mut u64, count as u64); }
// add addresses
for j in 0..count {
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + j), indirect_blocks_from_previous_layer_alt.pop().unwrap_or(0)); }
// get acc value
let acc_val = acc_alt.pop_front().unwrap_or(0);
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + (j * 16)), acc_val); }
unsafe { core::ptr::write((indirect_block_data.as_mut_ptr() as *mut u64).add(8 + 8 + (j * 16)), indirect_blocks_from_previous_layer_alt.pop_front().unwrap_or(0)); }
}
// write the indirect block
@ -639,6 +649,7 @@ pub fn schedule_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, con
// add the indirect block to the back of indirect_blocks_from_previous_layer
indirect_blocks_from_previous_layer.push_back(indirect_blocks[i]);
acc.push_back(count as u64);
// if indirect_blocks_from_previous_layer_alt is empty, switch to using_alt
if indirect_blocks_from_previous_layer_alt.is_empty() {
@ -1019,79 +1030,54 @@ pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_
let content = unsafe { journal_entry.content.multiblock_write };
// if flag is 6, we have to find the differences between the old and new list blocks and deallocate the unused blocks
// if flag is 6, we will deallocate all blocks in the old list block (using stupid method so that it's faster)
if content.flags == 6 {
let mut unused_datablocks: Vec<Index>;
let mut unused_datablocks: Vec<Index> = Vec::new();
let list_block = read_datablock(content.list_block, sb, bd);
let list_block = unsafe { core::ptr::read(list_block.as_ptr() as *const ListBlock) };
let old_list_block = read_datablock(content.old_list_block, sb, bd);
let old_list_block = unsafe { core::ptr::read(old_list_block.as_ptr() as *const ListBlock) };
if !list_block.using_indirect_blocks {
if !old_list_block.using_indirect_blocks {
// simplest case, just check what blocks are different
unused_datablocks = old_list_block.direct_block_addresses.iter()
.filter(|&x| !list_block.direct_block_addresses.contains(x))
.copied().collect();
} else {
// old list block uses indirect blocks, new one doesn't
unused_datablocks = old_list_block.direct_block_addresses[0..9].iter()
.filter(|&x| !list_block.direct_block_addresses.contains(x))
.copied().collect();
// compare indirect blocks
for i in 9..12 {
let indirect_block = read_datablock(old_list_block.direct_block_addresses[i], sb, bd);
let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap());
for j in 0..count as usize {
let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap());
if !list_block.direct_block_addresses.contains(&dba) {
unused_datablocks.push(dba);
for i in 0..9 {
if old_list_block.direct_block_addresses[i] != 0 {
unused_datablocks.push(old_list_block.direct_block_addresses[i]);
}
}
for x in 0..3 {
if !old_list_block.using_indirect_blocks {
unused_datablocks.push(old_list_block.direct_block_addresses[x + 9]);
} else {
// read indirect block
let mut deallocation_queue: Vec<Index> = Vec::new();
let mut buf = vec![];
let mut ptr = old_list_block.direct_block_addresses[x + 9];
deallocation_queue.push(ptr);
while !deallocation_queue.is_empty() {
// read indirect block
buf = read_datablock(ptr, sb, bd);
let mut head = 0;
let count = u64::from_be_bytes(buf[head..head + 8].try_into().unwrap()) as usize;
head += 8;
for i in 0..count {
let is_data: bool = buf[head] != 0;
let mut depth_no_data = buf[head..head + 8].to_vec();
depth_no_data[0] = 0;
let depth = u64::from_be_bytes(depth_no_data.try_into().unwrap()) as usize;
head += 8;
let new_ptr = u64::from_be_bytes(buf[head..head + 8].try_into().unwrap());
if !is_data {
deallocation_queue.push(new_ptr);
} else {
unused_datablocks.push(new_ptr);
}
}
}
}
} else if !old_list_block.using_indirect_blocks {
// new list block uses indirect blocks, old one doesn't
unused_datablocks = old_list_block.direct_block_addresses.iter()
.filter(|&x| !list_block.direct_block_addresses[0..9].contains(x))
.copied().collect();
// compare indirect blocks
for i in 9..12 {
let indirect_block = read_datablock(list_block.direct_block_addresses[i], sb, bd);
let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap());
for j in 0..count as usize {
let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap());
if unused_datablocks.contains(&dba) {
unused_datablocks.retain(|&x| x != dba);
}
}
}
} else {
// both use indirect blocks
// most complicated case, compare all blocks
unused_datablocks = old_list_block.direct_block_addresses[0..9].iter()
.filter(|&x| !list_block.direct_block_addresses[0..9].contains(x))
.copied().collect();
let mut new_indirect_blocks: Vec<Index> = Vec::new();
for i in 9..12 {
let indirect_block = read_datablock(list_block.direct_block_addresses[i], sb, bd);
let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap());
for j in 0..count as usize {
let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap());
new_indirect_blocks.push(dba);
}
}
// compare indirect blocks
for i in 9..12 {
let indirect_block = read_datablock(old_list_block.direct_block_addresses[i], sb, bd);
let count = u64::from_be_bytes(indirect_block[0..8].try_into().unwrap());
for j in 0..count as usize {
let dba = u64::from_be_bytes(indirect_block[(8 + j * 8)..(16 + j * 8)].try_into().unwrap());
if !new_indirect_blocks.contains(&dba) {
unused_datablocks.push(dba);
// deallocate this block
unused_datablocks.push(ptr);
// get next block
if let Some(next_ptr) = deallocation_queue.pop() {
ptr = next_ptr;
} else {
break;
}
}
}
@ -1341,62 +1327,63 @@ pub fn journaled_write_inode(sb: &Superblock, bd: &mut dyn BlockDevice, old_inod
JournaledWriteResult::Success
}
/// writes data blocks of an inode to the disk, automatically decides whether to use single or multi block writes
/// writes data blocks of an inode to the disk, uses single block writes
/// if you want to write data to the disk, this is likely the function you want
/// # Important Node
/// if data.len() is not a multiple of the block size, undefined behavior may occur
pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: Index, from_block: Index, data: &[u8], flush_immediately: bool) -> JournaledWriteResult {
// create journal entry
let mut journal_entry = if data.len() > sb.block_size as _ {
schedule_multi_block_write(
sb, bd, inode,
from_block, (data.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
data,
)
} else {
schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
)
};
// if none...
if journal_entry.is_none() {
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(sb, bd);
if why != JournaledWriteResult::Success {
return why;
}
// try again
journal_entry = if data.len() > sb.block_size as _ {
schedule_multi_block_write(
sb, bd, inode,
from_block, (data.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
data,
)
} else {
schedule_single_block_write(
let mut journal_entries = {
let mut journal_entries = Vec::new();
for i in 0..(data.len() / sb.block_size as usize) {
journal_entries.push(schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
)
};
));
}
journal_entries
};
while let Some(mut journal_entry) = journal_entries.pop() {
// if none...
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// are there any unallocated datablocks left?
let why = why_cant_make_journal_entry(sb, bd);
if why != JournaledWriteResult::Success {
return why;
}
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(sb, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
// try again
journal_entry = if data.len() > sb.block_size as _ {
schedule_multi_block_write(
sb, bd, inode,
from_block, (data.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index,
data,
)
} else {
schedule_single_block_write(
sb, bd, inode,
JBRTargetType::DataBlock, Some(from_block),
data,
)
};
if journal_entry.is_none() {
return JournaledWriteResult::UnderlyingBlockDeviceError;
}
}
let journal_entry = journal_entry.unwrap();
// if flush_immediately is true, flush all writes until the journal entry is complete
#[allow(clippy::collapsible_if)] // this is more readable
if flush_immediately {
if !flush_count_entries(sb, bd, journal_entry, true) {
return JournaledWriteResult::PotentialFilesystemCorruption;
}
}
}
JournaledWriteResult::Success

View file

@ -469,7 +469,9 @@ pub enum JBRTargetType {
/// # JournalMultiblockWrite
/// a special entry for writing to multiple blocks at once,
/// used for circumstances where it is very important that all blocks are either
/// written successfully, or not written at all (i.e. directory blocks)
/// written successfully, or not written at all (i.e. directory blocks).
/// all data stored in an inode must be written at once using this operation,
/// so it may not be suitable for large files.
/// writes are performed as follows:
/// 1. create and write the journal entry
/// 2. allocate a data block to store a MultiblockWriteList