diff --git a/src/btree.rs b/src/btree.rs index afcf6bb..c8f7dcd 100644 --- a/src/btree.rs +++ b/src/btree.rs @@ -2,20 +2,33 @@ use alloc::string::{String, ToString}; use alloc::vec; use alloc::vec::Vec; use ondisk_btree::{BTree, FromBytes, SizeOf, ToBytes}; -use vapfs::{BlockDevice, Index}; +use vapfs::{Index}; use crate::crc32; -use crate::structs::Superblock; -/// # BTd +/// # BTD /// min-degree of all BTrees -pub const BTd: u32 = 3; +pub const BTD: u32 = 3; + +/// # EntryType +/// the type of an entry in a directory listing +#[derive(Debug, Clone)] +pub enum EntryType { + /// a file or directory, stored in an inode + Inode(Index), + /// a hard link (i.e. pointer to inode, cannot be directory) + HardLink(Index), + /// a soft link (i.e. path string, can be directory) + SoftLink(String), + /// this entry is corrupt + Corrupt, +} /// # DirectoryListing /// an entry in a list of all files and directories sharing a common crc32 hash collision (unlikely to happen but just in case!) #[derive(Debug, Clone)] pub struct DirectoryListing { pub name: String, - pub inode: Index, + pub data: EntryType, } /// # BTreeEntry @@ -38,7 +51,20 @@ pub struct Directory { pub btree: BTree, } +impl Default for Directory { + fn default() -> Self { + Self::new() + } +} + impl Directory { + /// creates a new, empty directory + pub fn new() -> Self { + let mut new = Self { backup_entries: Vec::new(), crc32: 0, btree: BTree::new(BTD) }; + // calculate crc32 hash of empty b-tree + new.crc32 = crc32::crc32(&new.btree.to_bytes()); + new + } /// Reads the directory into memory from the given bytes pub fn open(bytes: &[u8]) -> Self { Self::from_bytes(bytes) @@ -50,29 +76,29 @@ impl Directory { } /// Adds a new entry to the directory listing - pub fn new_entry(&mut self, name: &str, inode: Index) { + pub fn new_entry(&mut self, name: &str, data: EntryType) { let crc32 = crc32::crc32(name.as_bytes()); // check if this crc32 hash already exists let index = self.btree.search(crc32); if let Some(node) = index { if let Some(key) = node.keys.iter_mut().find(|key| key.0 == crc32) { // if it does, add the new entry to the existing list - key.1.entries.push(DirectoryListing { name: name.to_string(), inode }); + key.1.entries.push(DirectoryListing { name: name.to_string(), data }); } } else { // if it doesn't, create a new list - let entries = vec![DirectoryListing { name: name.to_string(), inode }]; + let entries = vec![DirectoryListing { name: name.to_string(), data }]; self.btree.insert(crc32, BTreeEntry { crc32, entries }); } } /// Returns an inode for the given entry name - pub fn find(&mut self, name: &str) -> Option { + pub fn find(&mut self, name: &str) -> Option { let crc32 = crc32::crc32(name.as_bytes()); let index = self.btree.search(crc32); if let Some(node) = index { if let Some(key) = node.keys.iter().find(|key| key.0 == crc32) { - return Some(key.1.entries.iter().find(|entry| entry.name == name).unwrap().inode); + return Some(key.1.entries.iter().find(|entry| entry.name == name).unwrap().data.clone()); } } None @@ -86,6 +112,7 @@ impl Directory { if let Some(key) = node.keys.iter_mut().find(|key| key.0 == crc32) { key.1.entries.retain(|entry| entry.name != name); if key.1.entries.is_empty() { + self.btree.remove(crc32); } } } @@ -104,7 +131,7 @@ impl Directory { /// Rebuilds the b-tree from the backup entries pub fn rebuild(&mut self) { - self.btree = BTree::new(BTd); + self.btree = BTree::new(BTD); for entry in &self.backup_entries { let crc32 = crc32::crc32(entry.name.as_bytes()); let index = self.btree.search(crc32); @@ -156,7 +183,29 @@ impl ToBytes for DirectoryListing { let name_len: u32 = self.name.len() as u32; bytes.extend_from_slice(&name_len.to_be_bytes()); bytes.extend_from_slice(self.name.as_bytes()); - bytes.extend_from_slice(&self.inode.to_be_bytes()); + match &self.data { + EntryType::Inode(index) => { + // 0 for inode + bytes.push(0); + bytes.extend_from_slice(&index.to_be_bytes()); + } + EntryType::HardLink(index) => { + // 1 for hard link + bytes.push(1); + bytes.extend_from_slice(&index.to_be_bytes()); + } + EntryType::SoftLink(str) => { + // 2 for soft link + bytes.push(2); + let str_len: u32 = str.len() as u32; + bytes.extend_from_slice(&str_len.to_be_bytes()); + bytes.extend_from_slice(str.as_bytes()); + } + EntryType::Corrupt => { + // 3 for corrupt + bytes.push(3); + } + } bytes } } @@ -165,8 +214,24 @@ impl FromBytes for DirectoryListing { fn from_bytes(bytes: &[u8]) -> Self { let name_len = u32::from_be_bytes(bytes[0..4].try_into().unwrap()) as usize; let name = String::from_utf8(bytes[4..4 + name_len].to_vec()).unwrap(); - let inode = Index::from_be_bytes(bytes[4 + name_len..12 + name_len].try_into().unwrap()); - Self { name, inode } + match bytes[4 + name_len] { + 0 => { + let index = u64::from_be_bytes(bytes[5 + name_len..13 + name_len].try_into().unwrap()); + Self { name, data: EntryType::Inode(index) } + } + 1 => { + let index = u64::from_be_bytes(bytes[5 + name_len..13 + name_len].try_into().unwrap()); + Self { name, data: EntryType::HardLink(index) } + } + 2 => { + let str_len = u32::from_be_bytes(bytes[5 + name_len..9 + name_len].try_into().unwrap()) as usize; + let str = String::from_utf8(bytes[9 + name_len..9 + name_len + str_len].to_vec()).unwrap(); + Self { name, data: EntryType::SoftLink(str) } + } + _ => { + Self { name, data: EntryType::Corrupt } + } + } } } diff --git a/src/journal/mod.rs b/src/journal/mod.rs new file mode 100644 index 0000000..d26ba84 --- /dev/null +++ b/src/journal/mod.rs @@ -0,0 +1 @@ +pub mod file_create; \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index b0aa940..f0ec39c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,6 +13,8 @@ pub mod structs; pub mod bitmap; pub mod crc32; pub mod safe; +pub mod listblock; +pub mod journal; /// Reads the superblock (located at byte offset 1024 of the block device) and returns it. /// Returns None if the block device is too small to contain a superblock. @@ -32,10 +34,11 @@ pub fn get_superblock(bd: &mut dyn BlockDevice) -> Option { /// # Safety /// unsafe because it does not journal the write, and does not update any other metadata. pub unsafe fn write_superblock(mut sb: Superblock, bd: &mut dyn BlockDevice) -> bool { + let block_size = sb.block_size; sb.convert_native_to_big_endian(); let mut buf: [u8; core::mem::size_of::()] = [0; core::mem::size_of::()]; core::ptr::write(buf.as_mut_ptr() as *mut Superblock, sb); - bd.seek(1024); + bd.seek(block_size as u64); let write_count = bd.write_blocks(&buf); write_count == core::mem::size_of::() } @@ -444,10 +447,6 @@ pub fn get_indirect_datablock(sb: &Superblock, bd: &mut dyn BlockDevice, list: L } } -/// Resizes an inode to the given size. -/// # Safety -/// unsafe because it does not journal the write, and does not update any other metadata. - /// Creates a journal entry for a single block write operation. /// Should be safe to call at anytime, and shouldn't corrupt anything if the system crashes. /// Returns None if the journal is full, or if the block device cannot be written to. @@ -1314,7 +1313,7 @@ pub fn flush_multi_block_write(sb: &Superblock, bd: &mut dyn BlockDevice, entry_ // enclosed to make collapsable // todo! we should refactor this code eventually so that we don't have - // multi-hundred line long things cluttering everything like this + // todo! multi-hundred line long things cluttering everything like this { fn pushunused1( sb: &Superblock, bd: &mut dyn BlockDevice, list_block: &[Index], old_count: usize, @@ -1840,19 +1839,11 @@ pub fn journaled_write_data(sb: &Superblock, bd: &mut dyn BlockDevice, inode: In } // try again - journal_entry = if data.len() > sb.block_size as _ { - schedule_multi_block_write( - sb, bd, inode, - from_block, (data.len() as Index + sb.block_size as Index - 1) / sb.block_size as Index, - data, - ) - } else { - schedule_single_block_write( + journal_entry = schedule_single_block_write( sb, bd, inode, JBRTargetType::DataBlock, Some(from_block), data, - ) - }; + ); if journal_entry.is_none() { return JournaledWriteResult::UnderlyingBlockDeviceError; diff --git a/src/listblock.rs b/src/listblock.rs new file mode 100644 index 0000000..9b7b747 --- /dev/null +++ b/src/listblock.rs @@ -0,0 +1,267 @@ +use alloc::vec; +use alloc::vec::Vec; +use vapfs::{BlockDevice, Index}; +use crate::read_datablock; +use crate::structs::{ListBlock, Superblock}; + +pub struct ListblockIter<'a> { + listblock: &'a ListBlock, + index: usize, + buf1: (Index, Vec), + buf2: (Index, Vec), + buf3: (Index, Vec), + buf4: (Index, Vec), + buf5: (Index, Vec), + buf6: (Index, Vec), +} + +impl<'a> ListblockIter<'a> { + pub fn new(listblock: &'a ListBlock) -> Self { + Self { listblock, index: 0, buf1: (0, vec![]), buf2: (0, vec![]), buf3: (0, vec![]), buf4: (0, vec![]), buf5: (0, vec![]), buf6: (0, vec![]) } + } + + pub fn next(&mut self, sb: &Superblock, bd: &mut dyn BlockDevice) -> Option { + if self.index as Index >= self.listblock.count { + return None; + } + + if self.index < 32 { + let index = self.listblock.direct_block_addresses[self.index]; + self.index += 1; + return Some(index); + } else { + // copy pasted from structs.rs + // if greater than 32, see the following: + // let N = (maximum number of pointers in an indirect block) * 32 + // (beginning...count) + // 32..N: single indirect block + // 32+N..N^2: double indirect block + // 32+N^2..N^3: triple indirect block + // 32+N^3..N^4: quadruple indirect block + // 32+N^4..N^5: quintuple indirect block + // 32+N^5..N^6: sextuple indirect block + + // block index is address / (max_per_block ^ (1 if single, 2 if double, etc)) + // after getting your layer's address, repeat the previous layer's process + + let max_per_block = sb.block_size as u64 / 8; + let N = max_per_block * 32; + let N2 = N * N; + let N3 = N2 * N; + let N4 = N3 * N; + let N5 = N4 * N; + let N6 = N5 * N; + + let mut address = self.index as u64 - 32; + + // todo: you could probably rewrite this using like recursion or smth + + match address { + _ if address < N => { + let block_index = address / max_per_block; + let address = address % max_per_block; + if self.buf1.1.len() == 0 || self.buf1.0 != self.listblock.single_indirect_block_address[block_index as usize] { + self.buf1.1 = read_datablock(self.listblock.single_indirect_block_address[block_index as usize], sb, bd); + self.buf1.0 = self.listblock.single_indirect_block_address[block_index as usize]; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf1.1[(address * 8) as usize..(address * 8 + 8) as usize]); + let block = u64::from_be_bytes(buf); + self.index += 1; + Some(block) + } + _ if address < N2 => { + let address = address - N; + let block_index = (address) / (max_per_block ^ 2); + if self.buf2.1.len() == 0 || self.buf2.0 != self.listblock.double_indirect_block_address[block_index as usize] { + self.buf2.1 = read_datablock(self.listblock.double_indirect_block_address[block_index as usize], sb, bd); + self.buf2.0 = self.listblock.double_indirect_block_address[block_index as usize]; + } + let layer2_address = address / max_per_block; + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf2.1[(layer2_address * 8) as usize..(layer2_address * 8 + 8) as usize]); + let layer1_block = u64::from_be_bytes(buf); + let layer1_address = address % max_per_block; + if self.buf1.1.len() == 0 || self.buf1.0 != layer1_block { + self.buf1.1 = read_datablock(layer1_block, sb, bd); + self.buf1.0 = layer1_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf1.1[(layer1_address * 8) as usize..(layer1_address * 8 + 8) as usize]); + let block = u64::from_be_bytes(buf); + self.index += 1; + Some(block) + } + _ if address < N3 => { + let address = address - N2; + let block_index = (address) / (max_per_block ^ 3); + if self.buf3.1.len() == 0 || self.buf3.0 != self.listblock.triple_indirect_block_address[block_index as usize] { + self.buf3.1 = read_datablock(self.listblock.triple_indirect_block_address[block_index as usize], sb, bd); + self.buf3.0 = self.listblock.triple_indirect_block_address[block_index as usize]; + } + let layer3_address = address / (max_per_block ^ 2); + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf3.1[(layer3_address * 8) as usize..(layer3_address * 8 + 8) as usize]); + let layer2_block = u64::from_be_bytes(buf); + let layer2_address = (address % (max_per_block ^ 2)) / max_per_block; + if self.buf2.1.len() == 0 || self.buf2.0 != layer2_block { + self.buf2.1 = read_datablock(layer2_block, sb, bd); + self.buf2.0 = layer2_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf2.1[(layer2_address * 8) as usize..(layer2_address * 8 + 8) as usize]); + let layer1_block = u64::from_be_bytes(buf); + let layer1_address = address % max_per_block; + if self.buf1.1.len() == 0 || self.buf1.0 != layer1_block { + self.buf1.1 = read_datablock(layer1_block, sb, bd); + self.buf1.0 = layer1_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf1.1[(layer1_address * 8) as usize..(layer1_address * 8 + 8) as usize]); + let block = u64::from_be_bytes(buf); + self.index += 1; + Some(block) + } + _ if address < N4 => { + let address = address - N3; + let block_index = (address) / (max_per_block ^ 4); + if self.buf4.1.len() == 0 || self.buf4.0 != self.listblock.quadruple_indirect_block_address[block_index as usize] { + self.buf4.1 = read_datablock(self.listblock.quadruple_indirect_block_address[block_index as usize], sb, bd); + self.buf4.0 = self.listblock.quadruple_indirect_block_address[block_index as usize]; + } + let layer4_address = address / (max_per_block ^ 3); + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf4.1[(layer4_address * 8) as usize..(layer4_address * 8 + 8) as usize]); + let layer3_block = u64::from_be_bytes(buf); + let layer3_address = (address % (max_per_block ^ 3)) / (max_per_block ^ 2); + if self.buf3.1.len() == 0 || self.buf3.0 != layer3_block { + self.buf3.1 = read_datablock(layer3_block, sb, bd); + self.buf3.0 = layer3_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf3.1[(layer3_address * 8) as usize..(layer3_address * 8 + 8) as usize]); + let layer2_block = u64::from_be_bytes(buf); + let layer2_address = (address % (max_per_block ^ 2)) / max_per_block; + if self.buf2.1.len() == 0 || self.buf2.0 != layer2_block { + self.buf2.1 = read_datablock(layer2_block, sb, bd); + self.buf2.0 = layer2_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf2.1[(layer2_address * 8) as usize..(layer2_address * 8 + 8) as usize]); + let layer1_block = u64::from_be_bytes(buf); + let layer1_address = address % max_per_block; + if self.buf1.1.len() == 0 || self.buf1.0 != layer1_block { + self.buf1.1 = read_datablock(layer1_block, sb, bd); + self.buf1.0 = layer1_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf1.1[(layer1_address * 8) as usize..(layer1_address * 8 + 8) as usize]); + let block = u64::from_be_bytes(buf); + self.index += 1; + Some(block) + } + _ if address < N5 => { + let address = address - N4; + let block_index = (address) / (max_per_block ^ 5); + if self.buf5.1.len() == 0 || self.buf5.0 != self.listblock.quintuple_indirect_block_address[block_index as usize] { + self.buf5.1 = read_datablock(self.listblock.quintuple_indirect_block_address[block_index as usize], sb, bd); + self.buf5.0 = self.listblock.quintuple_indirect_block_address[block_index as usize]; + } + let layer5_address = address / (max_per_block ^ 4); + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf5.1[(layer5_address * 8) as usize..(layer5_address * 8 + 8) as usize]); + let layer4_block = u64::from_be_bytes(buf); + let layer4_address = (address % (max_per_block ^ 4)) / (max_per_block ^ 3); + if self.buf4.1.len() == 0 || self.buf4.0 != layer4_block { + self.buf4.1 = read_datablock(layer4_block, sb, bd); + self.buf4.0 = layer4_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf4.1[(layer4_address * 8) as usize..(layer4_address * 8 + 8) as usize]); + let layer3_block = u64::from_be_bytes(buf); + let layer3_address = (address % (max_per_block ^ 3)) / (max_per_block ^ 2); + if self.buf3.1.len() == 0 || self.buf3.0 != layer3_block { + self.buf3.1 = read_datablock(layer3_block, sb, bd); + self.buf3.0 = layer3_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf3.1[(layer3_address * 8) as usize..(layer3_address * 8 + 8) as usize]); + let layer2_block = u64::from_be_bytes(buf); + let layer2_address = (address % (max_per_block ^ 2)) / max_per_block; + if self.buf2.1.len() == 0 || self.buf2.0 != layer2_block { + self.buf2.1 = read_datablock(layer2_block, sb, bd); + self.buf2.0 = layer2_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf2.1[(layer2_address * 8) as usize..(layer2_address * 8 + 8) as usize]); + let layer1_block = u64::from_be_bytes(buf); + let layer1_address = address % max_per_block; + if self.buf1.1.len() == 0 || self.buf1.0 != layer1_block { + self.buf1.1 = read_datablock(layer1_block, sb, bd); + self.buf1.0 = layer1_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf1.1[(layer1_address * 8) as usize..(layer1_address * 8 + 8) as usize]); + let block = u64::from_be_bytes(buf); + self.index += 1; + Some(block) + } + _ if address < N6 => { + let address = address - N5; + let block_index = (address) / (max_per_block ^ 6); + if self.buf6.1.len() == 0 || self.buf6.0 != self.listblock.sextuple_indirect_block_address[block_index as usize] { + self.buf6.1 = read_datablock(self.listblock.sextuple_indirect_block_address[block_index as usize], sb, bd); + self.buf6.0 = self.listblock.sextuple_indirect_block_address[block_index as usize]; + } + let layer6_address = address / (max_per_block ^ 5); + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf6.1[(layer6_address * 8) as usize..(layer6_address * 8 + 8) as usize]); + let layer5_block = u64::from_be_bytes(buf); + let layer5_address = (address % (max_per_block ^ 5)) / (max_per_block ^ 4); + if self.buf5.1.len() == 0 || self.buf5.0 != layer5_block { + self.buf5.1 = read_datablock(layer5_block, sb, bd); + self.buf5.0 = layer5_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf5.1[(layer5_address * 8) as usize..(layer5_address * 8 + 8) as usize]); + let layer4_block = u64::from_be_bytes(buf); + let layer4_address = (address % (max_per_block ^ 4)) / (max_per_block ^ 3); + if self.buf4.1.len() == 0 || self.buf4.0 != layer4_block { + self.buf4.1 = read_datablock(layer4_block, sb, bd); + self.buf4.0 = layer4_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf4.1[(layer4_address * 8) as usize..(layer4_address * 8 + 8) as usize]); + let layer3_block = u64::from_be_bytes(buf); + let layer3_address = (address % (max_per_block ^ 3)) / (max_per_block ^ 2); + if self.buf3.1.len() == 0 || self.buf3.0 != layer3_block { + self.buf3.1 = read_datablock(layer3_block, sb, bd); + self.buf3.0 = layer3_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf3.1[(layer3_address * 8) as usize..(layer3_address * 8 + 8) as usize]); + let layer2_block = u64::from_be_bytes(buf); + let layer2_address = (address % (max_per_block ^ 2)) / max_per_block; + if self.buf2.1.len() == 0 || self.buf2.0 != layer2_block { + self.buf2.1 = read_datablock(layer2_block, sb, bd); + self.buf2.0 = layer2_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf2.1[(layer2_address * 8) as usize..(layer2_address * 8 + 8) as usize]); + let layer1_block = u64::from_be_bytes(buf); + let layer1_address = address % max_per_block; + if self.buf1.1.len() == 0 || self.buf1.0 != layer1_block { + self.buf1.1 = read_datablock(layer1_block, sb, bd); + self.buf1.0 = layer1_block; + } + let mut buf: [u8; 8] = [0; 8]; + buf.copy_from_slice(&self.buf1.1[(layer1_address * 8) as usize..(layer1_address * 8 + 8) as usize]); + let block = u64::from_be_bytes(buf); + self.index += 1; + Some(block) + } + _ => None, + } + } + } +} \ No newline at end of file diff --git a/src/safe.rs b/src/safe.rs index e69de29..82a2c8c 100644 --- a/src/safe.rs +++ b/src/safe.rs @@ -0,0 +1,219 @@ +use alloc::vec; +use ondisk_btree::ToBytes; +use vapfs::{BlockDevice, Timestamp}; +use crate::btree::Directory; +use crate::{bitmap, structs, write_datablock, write_inode, write_journal_entry, write_superblock}; +use crate::structs::{Filetype, Inode, JournalBlockWrite, JournalEntry, JournalEntryContents, ListBlock, Superblock, UNIXMode}; + +pub enum FilesystemError { + /// Returned when an error relating to a lack of storage space occurs + NotEnoughStorageSpace, + /// Returned during filesystem initialisation if the block size is too small + BlockSizeTooSmall, + /// Returned when an error occurs that was not expected + UnexpectedError, +} + +/// initialises a filesystem with the given details +/// block_size is in bytes, and must be at least 2048 bytes +/// bd_size is in blocks, and should not exceed the size of the device that `bd` represents +/// current_time is the current time, in seconds since the unix epoch +/// inode_count is the number of inodes to create, or None to use the default (1/8 of the blocks available) +/// journal_count is the number of journal blocks to create, or None to use the default (128 blocks) +pub fn init_filesystem(bd: &mut dyn BlockDevice, block_size: u32, bd_size: u64, current_time: Timestamp, inode_count: Option, journal_count: Option) -> Result<(), FilesystemError> { + if block_size < 2048 { + return Err(FilesystemError::BlockSizeTooSmall); + } + let mut blocks_available = bd_size as i64; + // if we have less than 5 blocks (probably around the minimum size), return not enough storage space + if blocks_available < 5 { + return Err(FilesystemError::NotEnoughStorageSpace); + } + // minus one for the superblock + blocks_available -= 1; + // subtract journal count from blocks available + blocks_available -= journal_count.unwrap_or(128) as i64; + if blocks_available < 0 { + return Err(FilesystemError::NotEnoughStorageSpace); + } + // inode count is the number of blocks available divided by 8 + let inode_count = inode_count.unwrap_or(blocks_available as u64 / 8); + // each inode takes up one block, subtract that from the blocks available + blocks_available -= inode_count as i64; + if blocks_available < 0 { + return Err(FilesystemError::NotEnoughStorageSpace); + } + // divide inode count by 8 (rounded up) to get the size of the inode bitmap + let inode_bitmap_size = (inode_count + 7) / 8; + // divide the bitmap size by the block size to get the number of blocks needed for the inode bitmap + let inode_bitmap_blocks = (inode_bitmap_size + (block_size as u64 - 1)) / block_size as u64; + // subtract the number of blocks needed for the inode bitmap from the blocks available + blocks_available -= inode_bitmap_blocks as i64; + if blocks_available < 0 { + return Err(FilesystemError::NotEnoughStorageSpace); + } + // divide the number of blocks available by 8 to get size of the data bitmap + let data_bitmap_blocks = (blocks_available as u64 + 7) / 8; + // divide the data bitmap size by the block size to get the number of blocks needed for the data bitmap + let data_bitmap_blocks = (data_bitmap_blocks + (block_size as u64 - 1)) / block_size as u64; + // subtract the number of blocks needed for the data bitmap from the blocks available + blocks_available -= data_bitmap_blocks as i64; + if blocks_available < 0 { + return Err(FilesystemError::NotEnoughStorageSpace); + } + + // layout of disk is roughly + // superblock + // data block bitmap + // inode block bitmap + // inode blocks + // journal blocks + // data blocks + let sb_block = 1; + let data_bitmap_block = sb_block + 1; + let inode_bitmap_block = data_bitmap_block + data_bitmap_blocks; + let inode_blocks = inode_bitmap_block + inode_bitmap_blocks; + let journal_blocks = inode_blocks + inode_count; + let data_blocks = journal_blocks + journal_count.unwrap_or(128); + + // create the superblock + let mut sb = Superblock { + magic: structs::MAGIC, + block_size, + first_data_block: data_blocks, + first_inode_block: inode_blocks, + first_journal_block: journal_blocks, + inode_count, + data_block_count: blocks_available as u64, + journal_block_count: journal_count.unwrap_or(128), + creation_time: current_time, + last_modification_time: current_time, + checksum: 0, + journal_position: 0, + reserved: [0; 7], + }; + + // calculate the checksum + sb.convert_native_to_big_endian(); + sb.recalculate_checksum(); + sb.convert_big_endian_to_native(); + + // create the first inode, which is the root directory + let root_perms = UNIXMode::ReadOwner as u16 | UNIXMode::WriteOwner as u16 | + UNIXMode::ExecuteOwner as u16 | UNIXMode::ReadGroup as u16 | + UNIXMode::ExecuteGroup as u16 | UNIXMode::Read as u16 | UNIXMode::Execute as u16 | + Filetype::Directory as u16; + let mut root_inode = Inode { + mode: root_perms, + link_count: 0, + uid: 0, + gid: 0, + size: 0, + block_count: 0, + creation_time: current_time, + last_access_time: current_time, + last_modification_time: current_time, + last_inode_modification_time: current_time, + deletion_time: 0, + flags: 0, + listblock: ListBlock { + count: 0, + direct_block_addresses: [0; 32], + single_indirect_block_address: [0; 32], + double_indirect_block_address: [0; 32], + triple_indirect_block_address: [0; 32], + quadruple_indirect_block_address: [0; 32], + quintuple_indirect_block_address: [0; 32], + sextuple_indirect_block_address: [0; 32], + }, + checksum: 0, + }; + + let root_data = Directory::new(); + let root_bytes = root_data.to_bytes(); + // how many blocks are needed to store the root directory + let root_block_count = (root_bytes.len() + (block_size as usize - 1)) / block_size as usize; + // if its over 32 blocks, return unexpected error + // (unless my b-tree implementation is awful, an empty directory should not take up 64KB) + if root_block_count > 32 { + return Err(FilesystemError::UnexpectedError); + } + let mut left = root_block_count; + let mut buffer = vec![0; block_size as usize]; + let mut blocki = 0; + let mut bufi = 0; + let mut datai = 0; + while left > 0 { + buffer.clear(); + while bufi < block_size && datai < root_bytes.len() { + buffer.push(root_bytes[datai]); + bufi += 1; + datai += 1; + } + unsafe { write_datablock(blocki, &sb, bd, &buffer) }; + blocki += 1; + bufi = 0; + left -= 1; + } + root_inode.size = root_bytes.len() as u64; + root_inode.block_count = root_block_count as u64; + for i in 0..root_block_count { + root_inode.listblock.direct_block_addresses[i] = i as u64; + } + root_inode.listblock.count = root_block_count as u64; + + // recalculate the inode checksum + root_inode.convert_native_to_big_endian(); + root_inode.recalculate_checksum(); + root_inode.convert_big_endian_to_native(); + + // write the root inode to the first inode block + unsafe { + write_inode(0, &sb, bd, root_inode); + } + + // create the inode bitmap + let mut inode_bitmap = vec![0u8; inode_bitmap_size as usize]; + // set the first bit to 1, as the root inode is allocated + inode_bitmap[0] |= 1; + // write the inode bitmap to the inode bitmap block + bd.seek(inode_bitmap_block * block_size as u64); + bd.write_blocks(&inode_bitmap); + + // create the data bitmap + let mut data_bitmap = vec![0u8; data_bitmap_blocks as usize]; + for i in 0..root_block_count { + bitmap::set_bit(&mut data_bitmap, i, true); + } + // write the data bitmap to the data bitmap block + bd.seek(data_bitmap_block * block_size as u64); + bd.write_blocks(&data_bitmap); + + // zero out the journal blocks + for i in 0..journal_blocks { + unsafe { + write_journal_entry(i, &sb, bd, JournalEntry { + operation: 0, + zeroed_content_crc32: 0, + content: JournalEntryContents { + block_write: JournalBlockWrite { + flags: 0, + target_type: 0, + target_inode: 0, + target_block: 0, + real_target_block: 0, + source_block: 0, + source_block_crc32: 0, + }, + }, + }); + } + } + + // write the superblock to the superblock block + unsafe { + write_superblock(sb, bd); + } + + Ok(()) +} \ No newline at end of file diff --git a/src/structs.rs b/src/structs.rs index 0b160c2..60ac4a4 100644 --- a/src/structs.rs +++ b/src/structs.rs @@ -7,7 +7,7 @@ pub const MAGIC: u64 = 0x766170554653; /// # Superblock /// The primary struct of a VapUFS filesystem, contains metadata about the filesystem. -/// Located at byte offset 2048 of the block device. +/// Located at block 1 of the block device. /// All values are big-endian unless otherwise specified. /// Directly after (i.e. the next block after) the Superblock are the free data blocks bitmap and the free inodes bitmap. /// Free data blocks bitmap is data_block_count / 8 bytes long (rounded up). @@ -31,10 +31,6 @@ pub struct Superblock { pub data_block_count: Index, /// total count of blocks dedicated to journal pub journal_block_count: Index, - /// total count of inodes in use - pub allocated_inode_count: Index, - /// total count of data blocks in use - pub allocated_data_block_count: Index, /// timestamp of creation pub creation_time: Timestamp, /// timestamp of last modification @@ -63,8 +59,6 @@ impl Superblock { self.inode_count = u64::from_be(self.inode_count); self.data_block_count = u64::from_be(self.data_block_count); self.journal_block_count = u64::from_be(self.journal_block_count); - self.allocated_inode_count = u64::from_be(self.allocated_inode_count); - self.allocated_data_block_count = u64::from_be(self.allocated_data_block_count); self.creation_time = u64::from_be(self.creation_time); self.last_modification_time = u64::from_be(self.last_modification_time); for i in 0..8 { @@ -85,8 +79,6 @@ impl Superblock { self.inode_count = u64::to_be(self.inode_count); self.data_block_count = u64::to_be(self.data_block_count); self.journal_block_count = u64::to_be(self.journal_block_count); - self.allocated_inode_count = u64::to_be(self.allocated_inode_count); - self.allocated_data_block_count = u64::to_be(self.allocated_data_block_count); self.creation_time = u64::to_be(self.creation_time); self.last_modification_time = u64::to_be(self.last_modification_time); for i in 0..8 { @@ -350,6 +342,12 @@ pub enum JournalOperation { SingleBlockWrite = 0, /// A multi-block write, described by a `JournalMultiblockWrite` MultiblockWrite = 1, + /// A file creation, described by a `JournalFileCreate` + FileCreate = 2, + /// A file deletion, described by a `JournalFileDelete` + FileDelete = 3, + /// A file truncation, described by a `JournalFileTruncate` + FileTruncate = 4, } /// # JournalBlockWrite