fix a ton of memory related bugs

This commit is contained in:
husky 2025-09-11 16:00:04 -07:00
parent af901cdfb3
commit f13cb2eb12
11 changed files with 215 additions and 230 deletions

View file

@ -2,4 +2,4 @@
storage_device="${1}" storage_device="${1}"
qemu-system-riscv32 -machine virt -bios none -drive if=none,format=raw,file="${storage_device}",id=disk1 -device virtio-blk-device,drive=disk1 -display sdl -device virtio-gpu-device -serial stdio -m 5M -device loader,cpu-num=0,file=target/riscv32imac-unknown-none-elf/release/lbos -monitor telnet:127.0.0.1:1235,server,nowait -d guest_errors,unimp qemu-system-riscv32 -machine virt -bios none -drive if=none,format=raw,file="${storage_device}",id=disk1 -device virtio-blk-device,drive=disk1 -display sdl -device virtio-gpu-device -serial stdio -m 10M -device loader,cpu-num=0,file=target/riscv32imac-unknown-none-elf/release/lbos -monitor telnet:127.0.0.1:1235,server,nowait -d guest_errors,unimp

View file

@ -4,10 +4,11 @@ ENTRY( _start )
MEMORY MEMORY
{ {
rom (wxa) : ORIGIN = 0x80000000, LENGTH = 64 * 1024 rom (wxa) : ORIGIN = 0x80000000, LENGTH = 0x10000
ram (wxa) : ORIGIN = ORIGIN(rom) + LENGTH(rom), LENGTH = 64 * 1024
virtqueues (wxa) : ORIGIN = ORIGIN(ram) + LENGTH(ram), LENGTH = 16 * 1024 * 2 ram (wxa) : ORIGIN = 0x80010000, LENGTH = 0x10000
framebuffer (wxa) : ORIGIN = ORIGIN(virtqueues) + LENGTH(virtqueues), LENGTH = 320 * 240 * 3
virtqueues (wxa) : ORIGIN = 0x80020000, LENGTH = 0x20000
} }
PHDRS PHDRS
@ -38,7 +39,7 @@ SECTIONS {
PROVIDE(_bss_start = .); PROVIDE(_bss_start = .);
*(.sbss .sbss.*) *(.sbss .sbss.*)
*(.bss .bss.*) *(.bss .bss.*)
. = ALIGN(4096); . = ALIGN(512);
PROVIDE(_bss_end = .); PROVIDE(_bss_end = .);
} >ram AT>ram :bss } >ram AT>ram :bss
@ -55,9 +56,9 @@ SECTIONS {
PROVIDE(_tstack_end = _tstack_start + 16384); PROVIDE(_tstack_end = _tstack_start + 16384);
PROVIDE(_heap_start = _tstack_end); PROVIDE(_heap_start = _tstack_end);
PROVIDE(_heap_size = _MEM_END - _heap_start); PROVIDE(_heap_size = _MEM_END - _heap_start);
PROVIDE(_virtio_queue_1_start = ORIGIN(virtqueues)); PROVIDE(_virtio_queue_1_start = ORIGIN(virtqueues));
PROVIDE(_virtio_queue_1_end = _virtio_queue_1_start + 16384); PROVIDE(_virtio_queue_1_end = _virtio_queue_1_start + 0x10000);
PROVIDE(_virtio_queue_2_start = _virtio_queue_1_end); PROVIDE(_virtio_queue_2_start = _virtio_queue_1_end);
PROVIDE(_virtio_queue_2_end = _virtio_queue_2_start + 16384); PROVIDE(_virtio_queue_2_end = _virtio_queue_2_start + 0x10000);
PROVIDE(_framebuffer_start = ORIGIN(framebuffer));
} }

View file

@ -1,4 +1,3 @@
use crate::dev::{framebuffer_update, FRAMEBUFFER_HEIGHT, FRAMEBUFFER_WIDTH};
use crate::dev::framebuffer::{fb_clearscreen, fb_write_char_array}; use crate::dev::framebuffer::{fb_clearscreen, fb_write_char_array};
use crate::spinlock::Spinlock; use crate::spinlock::Spinlock;
use crate::trafficcontrol::TrafficControl; use crate::trafficcontrol::TrafficControl;
@ -38,6 +37,7 @@ impl FramebufferConsole {
// DOES send a framebuffer update! // DOES send a framebuffer update!
pub fn printstr(&mut self, tc: &mut TrafficControl, str: &str) { pub fn printstr(&mut self, tc: &mut TrafficControl, str: &str) {
return;
for c in str.chars() { for c in str.chars() {
let mut was_special_char = false; let mut was_special_char = false;
if c == '\n' || c == '\r' { if c == '\n' || c == '\r' {

View file

@ -1,6 +1,4 @@
use crate::dev::{ use crate::dev::{linebuffer_push, FRAMEBUFFER_HEIGHT, FRAMEBUFFER_WIDTH, LINEBUFFER_ADDR, LINEBUFFER_BPP};
FRAMEBUFFER_ADDR, FRAMEBUFFER_BPP, FRAMEBUFFER_HEIGHT, FRAMEBUFFER_WIDTH, framebuffer_update,
};
use crate::trafficcontrol::TrafficControl; use crate::trafficcontrol::TrafficControl;
use core::sync::atomic::Ordering; use core::sync::atomic::Ordering;
@ -37,69 +35,61 @@ impl FBColor {
pub fn fb_write_char_array(tc: &mut TrafficControl, mut x: usize, mut y: usize, chars: &[char]) { pub fn fb_write_char_array(tc: &mut TrafficControl, mut x: usize, mut y: usize, chars: &[char]) {
const BYTES: [u8; 3] = FB_FG_COLOR.to_bytes(); const BYTES: [u8; 3] = FB_FG_COLOR.to_bytes();
let ogx = x; let fbaddr = LINEBUFFER_ADDR.load(Ordering::Relaxed);
let fbaddr = FRAMEBUFFER_ADDR.load(Ordering::Relaxed);
if fbaddr == 0 { if fbaddr == 0 {
return; return;
} }
let fbstride = FRAMEBUFFER_BPP.load(Ordering::Relaxed) * FRAMEBUFFER_WIDTH;
const CHAR_SIZE: usize = 16; const CHAR_SIZE: usize = 16;
let mut drew_anything = false; let mut drew_anything = false;
for c in chars { for line in 0..FRAMEBUFFER_HEIGHT {
let c = *c; for c in chars {
//if c == '\n' { let c = *c;
// y += CHAR_SIZE; if c == ' ' {
// x = ogx; x += CHAR_SIZE;
// if y * CHAR_SIZE > FRAMEBUFFER_HEIGHT { } else if c as u8 > 32 {
// break; let c = c as u8 - 32;
// } let cx = (c % 16) as usize * CHAR_SIZE;
//} else if c == ' ' { let cy = (c / 16) as usize * CHAR_SIZE;
if c == ' ' { for row in 0..CHAR_SIZE {
x += CHAR_SIZE; if (y+row) != line {
//if x * CHAR_SIZE > FRAMEBUFFER_WIDTH { continue;
// x = ogx; }
// y += CHAR_SIZE; for col in 0..CHAR_SIZE {
// if y * CHAR_SIZE > FRAMEBUFFER_HEIGHT { let coff = (VAPFONT_W * (cy + row)) + (cx + col);
// break; let draw = VAPFONT[coff / 8] & (0x80 >> (coff % 8));
// } if draw != 0 {
//} unsafe {
} else if c as u8 > 32 { //let fb = (fbaddr as *mut u8).add(((y + row) * fbstride) + ((x + col) * 4)) as *mut u32;
let c = c as u8 - 32; let fb = (fbaddr as *mut u8).add((x + col) * 4) as *mut u32;
let cx = (c % 16) as usize * CHAR_SIZE; fb.write_volatile(u32::from_ne_bytes([0, BYTES[0], BYTES[1], BYTES[2]]));
let cy = (c / 16) as usize * CHAR_SIZE; drew_anything = true;
for row in 0..CHAR_SIZE { }
for col in 0..CHAR_SIZE {
let coff = (VAPFONT_W * (cy + row)) + (cx + col);
let draw = VAPFONT[coff / 8] & (0x80 >> (coff % 8));
if draw != 0 {
unsafe {
let fb = (fbaddr as *mut u8).add(((y+row)*fbstride) + ((x+col)*4)) as *mut u32;
fb.write_volatile(u32::from_ne_bytes([0, BYTES[0], BYTES[1], BYTES[2]]));
drew_anything = true;
} }
} }
} }
x += CHAR_SIZE;
} }
x += CHAR_SIZE; }
if drew_anything {
linebuffer_push(tc, line as u32);
} }
} }
framebuffer_update(tc, 0, 0, FRAMEBUFFER_WIDTH as u32, FRAMEBUFFER_HEIGHT as u32);
} }
pub fn fb_clearscreen(tc: &mut TrafficControl) { pub fn fb_clearscreen(tc: &mut TrafficControl) {
const BYTES: [u8; 3] = FB_BG_COLOR.to_bytes(); const BYTES: [u8; 3] = FB_BG_COLOR.to_bytes();
let fbaddr = FRAMEBUFFER_ADDR.load(Ordering::Relaxed); let fbaddr = LINEBUFFER_ADDR.load(Ordering::Relaxed);
if fbaddr == 0 { if fbaddr == 0 {
return; return;
} }
let fbstride = FRAMEBUFFER_BPP.load(Ordering::Relaxed) * FRAMEBUFFER_WIDTH;
for y in 0..FRAMEBUFFER_HEIGHT { for y in 0..FRAMEBUFFER_HEIGHT {
for x in 0..FRAMEBUFFER_WIDTH { for x in 0..FRAMEBUFFER_WIDTH {
unsafe { unsafe {
let fb = (fbaddr as *mut u8).add(((y)*fbstride) + ((x)*4)) as *mut u32; //let fb = (fbaddr as *mut u8).add(((y)*fbstride) + ((x)*4)) as *mut u32;
let fb = (fbaddr as *mut u8).add(x*4) as *mut u32;
fb.write_volatile(u32::from_ne_bytes([0, BYTES[0], BYTES[1], BYTES[2]])); fb.write_volatile(u32::from_ne_bytes([0, BYTES[0], BYTES[1], BYTES[2]]));
} }
} }
linebuffer_push(tc, y as u32);
} }
framebuffer_update(tc, 0, 0, FRAMEBUFFER_WIDTH as u32, FRAMEBUFFER_HEIGHT as u32);
} }

View file

@ -4,11 +4,14 @@ use crate::trafficcontrol::TrafficControl;
pub const FRAMEBUFFER_WIDTH: usize = 320; pub const FRAMEBUFFER_WIDTH: usize = 320;
pub const FRAMEBUFFER_HEIGHT: usize = 240; pub const FRAMEBUFFER_HEIGHT: usize = 240;
pub const LINEBUFFER_BLOCKS: usize = 3;
pub const LINEBUFFER_HEIGHT: usize = 1;
// NOTE: // NOTE:
// FRAMEBUFFER_ADDR should always be 0 if no framebuffer exists // LINEBUFFER_ADDR should always be 0 if no framebuffer exists
// if FRAMEBUFFER_ADDR is NOT 0, then FRAMEBUFFER_STRIDE should also be NOT 0 // if LINEBUFFER_ADDR is NOT 0, then LINEBUFFER_BPP should also be NOT 0
pub static FRAMEBUFFER_ADDR: AtomicUsize = AtomicUsize::new(0); pub static LINEBUFFER_ADDR: AtomicUsize = AtomicUsize::new(0);
pub static FRAMEBUFFER_BPP: AtomicUsize = AtomicUsize::new(0); pub static LINEBUFFER_BPP: AtomicUsize = AtomicUsize::new(0);
#[cfg(feature = "dev_virtio")] #[cfg(feature = "dev_virtio")]
pub mod virtio; pub mod virtio;
@ -26,16 +29,16 @@ pub fn read_sector(tc: &mut TrafficControl, buffer: usize, size: u32, sector: u6
false false
} }
pub fn framebuffer_address() -> usize { pub fn linebuffer_address() -> usize {
FRAMEBUFFER_ADDR.load(Ordering::Relaxed) LINEBUFFER_ADDR.load(Ordering::Relaxed)
} }
pub fn framebuffer_stride() -> usize { pub fn linebuffer_bpp() -> usize {
FRAMEBUFFER_BPP.load(Ordering::Relaxed) LINEBUFFER_BPP.load(Ordering::Relaxed)
} }
pub fn framebuffer_update(tc: &mut TrafficControl, x: u32, y: u32, width: u32, height: u32) { pub fn linebuffer_push(tc: &mut TrafficControl, line: u32) {
#[cfg(feature = "dev_virtio")] #[cfg(feature = "dev_virtio")]
virtio::framebuffer_update(tc, x, y, width, height); virtio::framebuffer_update(tc, line);
} }

View file

@ -1,4 +1,12 @@
use crate::dev::virtio::{Descriptor, VIRTIO_DESC_F_NEXT, VIRTIO_DESC_F_WRITE, VIRTIO_MMIO_GUEST_FEATURES, VIRTIO_MMIO_GUEST_PAGE_SIZE, VIRTIO_MMIO_HOST_FEATURES, VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_NUM_MAX, VIRTIO_MMIO_QUEUE_PFN, VIRTIO_MMIO_QUEUE_SEL, VIRTIO_MMIO_STATUS, VIRTIO_MMIO_STATUS_ACKNOWLEDGE, VIRTIO_MMIO_STATUS_DRIVER, VIRTIO_MMIO_STATUS_DRIVER_OK, VIRTIO_MMIO_STATUS_FAILED, VIRTIO_MMIO_STATUS_FEATURES_OK, VIRTIO_QUEUE_SIZE, VirtQueue, Used}; use crate::dev::virtio::{
Descriptor, Used, VIRTIO_DESC_F_NEXT, VIRTIO_DESC_F_WRITE, VIRTIO_MMIO_GUEST_FEATURES,
VIRTIO_MMIO_GUEST_PAGE_SIZE, VIRTIO_MMIO_HOST_FEATURES, VIRTIO_MMIO_QUEUE_NOTIFY,
VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_NUM_MAX, VIRTIO_MMIO_QUEUE_PFN, VIRTIO_MMIO_QUEUE_SEL,
VIRTIO_MMIO_STATUS, VIRTIO_MMIO_STATUS_ACKNOWLEDGE, VIRTIO_MMIO_STATUS_DRIVER,
VIRTIO_MMIO_STATUS_DRIVER_OK, VIRTIO_MMIO_STATUS_FAILED, VIRTIO_MMIO_STATUS_FEATURES_OK,
VIRTIO_QUEUE_SIZE, VirtQueue,
};
use crate::strprint::u32_hex;
use crate::trafficcontrol::{TaskWait, TrafficControl}; use crate::trafficcontrol::{TaskWait, TrafficControl};
unsafe extern "C" { unsafe extern "C" {
@ -58,7 +66,7 @@ impl VirtIoBlockDevice {
// read host features // read host features
let host_features = let host_features =
unsafe { ((addr + VIRTIO_MMIO_HOST_FEATURES) as *const u32).read_volatile() }; unsafe { ((addr + VIRTIO_MMIO_HOST_FEATURES) as *const u32).read_volatile() };
let guest_features = host_features & !(1 << 5); let guest_features = 0;
let read_only = host_features & (1 << 5) != 0; let read_only = host_features & (1 << 5) != 0;
unsafe { unsafe {
@ -159,10 +167,9 @@ impl VirtIoBlockDevice {
return; return;
} }
let blk_request = { let blk_request =
let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked() }.alloc_one_block(); { unsafe { tc.memory_manager.as_mut().unwrap_unchecked() }.alloc_one_block() }
unsafe { tc.memory_manager.as_mut().unwrap_unchecked().block_to_addr(block) } as *mut Request;
} as *mut Request;
let desc = Descriptor { let desc = Descriptor {
addr: unsafe { &(*blk_request) as *const _ as u64 }, addr: unsafe { &(*blk_request) as *const _ as u64 },
len: size_of::<Request>() as u32, len: size_of::<Request>() as u32,
@ -223,8 +230,18 @@ impl VirtIoBlockDevice {
let tid = unsafe { (*rq).tid }; let tid = unsafe { (*rq).tid };
let rq_block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked() }.addr_to_block(rq as usize); unsafe {
unsafe { tc.memory_manager.as_mut().unwrap_unchecked().free_one_block(rq_block); } tc.memory_manager
.as_mut()
.unwrap_unchecked()
.free_n_blocks(rq as usize, 1);
}
let actually_freed = unsafe {
tc.memory_manager
.as_mut()
.unwrap_unchecked()
.is_addr_free(rq as usize)
};
// awaken // awaken
if let Some(Some(task)) = tc.tasks.get_mut(tid as usize) { if let Some(Some(task)) = tc.tasks.get_mut(tid as usize) {

View file

@ -1,12 +1,11 @@
use crate::arch::serial_port; use crate::arch::serial_port;
use crate::dev::{FRAMEBUFFER_HEIGHT, FRAMEBUFFER_WIDTH}; use crate::dev::{FRAMEBUFFER_HEIGHT, FRAMEBUFFER_WIDTH, LINEBUFFER_BLOCKS};
use crate::dev::virtio::{heap_allocate_type, Descriptor, VirtQueue, VIRTIO_DESC_F_NEXT, VIRTIO_DESC_F_WRITE, VIRTIO_MMIO_GUEST_FEATURES, VIRTIO_MMIO_GUEST_PAGE_SIZE, VIRTIO_MMIO_HOST_FEATURES, VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_NUM_MAX, VIRTIO_MMIO_QUEUE_PFN, VIRTIO_MMIO_QUEUE_SEL, VIRTIO_MMIO_STATUS, VIRTIO_MMIO_STATUS_ACKNOWLEDGE, VIRTIO_MMIO_STATUS_DRIVER, VIRTIO_MMIO_STATUS_DRIVER_OK, VIRTIO_MMIO_STATUS_FAILED, VIRTIO_MMIO_STATUS_FEATURES_OK, VIRTIO_QUEUE_SIZE}; use crate::dev::virtio::{heap_allocate_type, Descriptor, VirtQueue, VIRTIO_DESC_F_NEXT, VIRTIO_DESC_F_WRITE, VIRTIO_MMIO_GUEST_FEATURES, VIRTIO_MMIO_GUEST_PAGE_SIZE, VIRTIO_MMIO_HOST_FEATURES, VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_NUM_MAX, VIRTIO_MMIO_QUEUE_PFN, VIRTIO_MMIO_QUEUE_SEL, VIRTIO_MMIO_STATUS, VIRTIO_MMIO_STATUS_ACKNOWLEDGE, VIRTIO_MMIO_STATUS_DRIVER, VIRTIO_MMIO_STATUS_DRIVER_OK, VIRTIO_MMIO_STATUS_FAILED, VIRTIO_MMIO_STATUS_FEATURES_OK, VIRTIO_QUEUE_SIZE};
use crate::strprint::u32_hex; use crate::strprint::u32_hex;
use crate::trafficcontrol::TrafficControl; use crate::trafficcontrol::TrafficControl;
unsafe extern "C" { unsafe extern "C" {
fn _virtio_queue_2_start(); fn _virtio_queue_2_start();
fn _framebuffer_start();
} }
pub const VIRTIO_GPU_CMD_GET_DISPLAY_INFO: u32 = 0x0100; pub const VIRTIO_GPU_CMD_GET_DISPLAY_INFO: u32 = 0x0100;
@ -181,7 +180,9 @@ impl VirtIoGpuDevice {
} }
// allocate memory for framebuffer // allocate memory for linebuffer
let linebuffer_ptr = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(LINEBUFFER_BLOCKS) };
let latest_response = heap_allocate_type::<CtrlHeader>(tc) as *const _ as usize; let latest_response = heap_allocate_type::<CtrlHeader>(tc) as *const _ as usize;
let mut gpu = Self { let mut gpu = Self {
@ -190,7 +191,7 @@ impl VirtIoGpuDevice {
queue: queue_ptr, queue: queue_ptr,
idx: 0, idx: 0,
ack_used_idx: 0, ack_used_idx: 0,
framebuffer: _framebuffer_start as usize, framebuffer: linebuffer_ptr as usize,
width: FRAMEBUFFER_WIDTH, width: FRAMEBUFFER_WIDTH,
height: FRAMEBUFFER_HEIGHT, height: FRAMEBUFFER_HEIGHT,
}; };
@ -251,7 +252,7 @@ impl VirtIoGpuDevice {
free_me[1] = unsafe { &(*cmd) as *const _ as usize }; free_me[1] = unsafe { &(*cmd) as *const _ as usize };
let mem_entry = heap_allocate_type::<GPUMemEntry>(tc); let mem_entry = heap_allocate_type::<GPUMemEntry>(tc);
mem_entry.addr = gpu.framebuffer as u64; mem_entry.addr = gpu.framebuffer as u64;
mem_entry.length = (gpu.height * gpu.width * 4) as u32; mem_entry.length = (gpu.width * 4) as u32;
mem_entry.padding = 0; mem_entry.padding = 0;
free_me[2] = unsafe { &(*mem_entry) as *const _ as usize }; free_me[2] = unsafe { &(*mem_entry) as *const _ as usize };
@ -321,14 +322,11 @@ impl VirtIoGpuDevice {
//gpu.pending(tc); //gpu.pending(tc);
for free in free_me { for free in free_me {
let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().addr_to_block(free) };
unsafe { unsafe {
tc.memory_manager.as_mut().unwrap_unchecked().free_n_blocks(block, 1); tc.memory_manager.as_mut().unwrap_unchecked().free_n_blocks(free, 1);
} }
} }
gpu.transfer(tc, 0, 0, gpu.width as u32, gpu.height as u32);
Ok(gpu) Ok(gpu)
} }
@ -353,8 +351,12 @@ impl VirtIoGpuDevice {
} }
unsafe { unsafe {
(*(self.queue as *mut VirtQueue)).avail.ring[(*(self.queue as *mut VirtQueue)).avail.idx as usize % VIRTIO_QUEUE_SIZE] = head; (*(self.queue as *mut VirtQueue)).avail.ring
(*(self.queue as *mut VirtQueue)).avail.idx = (*(self.queue as *mut VirtQueue)).avail.idx.wrapping_add(1); [(*(self.queue as *mut VirtQueue)).avail.idx as usize % VIRTIO_QUEUE_SIZE] = head;
}
unsafe {
(*(self.queue as *mut VirtQueue)).avail.idx =
(*(self.queue as *mut VirtQueue)).avail.idx.wrapping_add(1);
} }
// notify // notify
@ -384,8 +386,12 @@ impl VirtIoGpuDevice {
} }
unsafe { unsafe {
(*(self.queue as *mut VirtQueue)).avail.ring[(*(self.queue as *mut VirtQueue)).avail.idx as usize % VIRTIO_QUEUE_SIZE] = head; (*(self.queue as *mut VirtQueue)).avail.ring
(*(self.queue as *mut VirtQueue)).avail.idx = (*(self.queue as *mut VirtQueue)).avail.idx.wrapping_add(1); [(*(self.queue as *mut VirtQueue)).avail.idx as usize % VIRTIO_QUEUE_SIZE] = head;
}
unsafe {
(*(self.queue as *mut VirtQueue)).avail.idx =
(*(self.queue as *mut VirtQueue)).avail.idx.wrapping_add(1);
} }
// notify // notify
@ -397,42 +403,12 @@ impl VirtIoGpuDevice {
// WARNING: THIS VERSION OF PENDING DOES NOT FREE ANYTHING // WARNING: THIS VERSION OF PENDING DOES NOT FREE ANYTHING
pub fn pending(&mut self, tc: &mut TrafficControl) { pub fn pending(&mut self, tc: &mut TrafficControl) {
let queue = unsafe { &(*(self.queue as *mut VirtQueue)) }; let queue = unsafe { &(*(self.queue as *mut VirtQueue)) };
let mut pended = false;
while self.ack_used_idx != queue.used.idx { while self.ack_used_idx != queue.used.idx {
pended = true;
let uart = serial_port();
if let Some(uart) = uart {
//uart.putstr("pended\n");
}
let elem = &queue.used.ring[self.ack_used_idx as usize % VIRTIO_QUEUE_SIZE];
let desc = &queue.desc[elem.id as usize];
let addr = desc.addr as usize;
//let num_blocks = (desc.len as usize).div_ceil(512);
//let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().addr_to_block(addr) };
//// free
//unsafe {
// tc.memory_manager.as_mut().unwrap_unchecked().free_n_blocks(block, num_blocks);
//}
self.ack_used_idx = self.ack_used_idx.wrapping_add(1); self.ack_used_idx = self.ack_used_idx.wrapping_add(1);
if self.ack_used_idx >= VIRTIO_QUEUE_SIZE as u16 {
self.ack_used_idx = 0;
}
}
if !pended {
let uart = serial_port();
if let Some(uart) = uart {
uart.putstr("no pended\n");
}
} }
} }
pub fn transfer(&mut self, tc: &mut TrafficControl, x: u32, y: u32, width: u32, height: u32) { pub fn transfer(&mut self, tc: &mut TrafficControl, y: u32) {
let uart = serial_port();
if let Some(uart) = uart {
//uart.putstr("transfer\n");
}
let mut free_me = [0; 4]; let mut free_me = [0; 4];
let cmd = heap_allocate_type::<TransferToHost2D>(tc); let cmd = heap_allocate_type::<TransferToHost2D>(tc);
@ -442,10 +418,10 @@ impl VirtIoGpuDevice {
cmd.header.ctx_id = 0; cmd.header.ctx_id = 0;
cmd.header.ring_idx = 0; cmd.header.ring_idx = 0;
cmd.header.padding = [0; 3]; cmd.header.padding = [0; 3];
cmd.rect.x = x; cmd.rect.x = 0;
cmd.rect.y = y; cmd.rect.y = y;
cmd.rect.width = width; cmd.rect.width = self.width as u32;
cmd.rect.height = height; cmd.rect.height = 1;
cmd.offset = 0; cmd.offset = 0;
cmd.resource_id = 1; cmd.resource_id = 1;
cmd.padding = 0; cmd.padding = 0;
@ -475,10 +451,10 @@ impl VirtIoGpuDevice {
cmd.header.ctx_id = 0; cmd.header.ctx_id = 0;
cmd.header.ring_idx = 0; cmd.header.ring_idx = 0;
cmd.header.padding = [0; 3]; cmd.header.padding = [0; 3];
cmd.rect.x = x; cmd.rect.x = 0;
cmd.rect.y = y; cmd.rect.y = y;
cmd.rect.width = width; cmd.rect.width = self.width as u32;
cmd.rect.height = height; cmd.rect.height = 1;
cmd.resource_id = 1; cmd.resource_id = 1;
cmd.padding = 0; cmd.padding = 0;
free_me[2] = unsafe { &(*cmd) as *const _ as usize }; free_me[2] = unsafe { &(*cmd) as *const _ as usize };
@ -497,17 +473,9 @@ impl VirtIoGpuDevice {
}; };
self.send_rq_rsp(desc_rq, desc_resp); self.send_rq_rsp(desc_rq, desc_resp);
// awful hack so that we don't overload the queue
while {
self.ack_used_idx == unsafe { (self.queue as *const VirtQueue).read_volatile().used.idx }
} {
}
//self.pending(tc);
for free in free_me.into_iter().skip(1) { for free in free_me.into_iter().skip(1) {
let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().addr_to_block(free) };
unsafe { unsafe {
tc.memory_manager.as_mut().unwrap_unchecked().free_n_blocks(block, 1); tc.memory_manager.as_mut().unwrap_unchecked().free_n_blocks(free, 1);
} }
} }
} }

View file

@ -2,7 +2,7 @@
//! WARNING: virtio is currently completely broken! don't use it! //! WARNING: virtio is currently completely broken! don't use it!
use core::sync::atomic::Ordering; use core::sync::atomic::Ordering;
use crate::dev::{FRAMEBUFFER_ADDR, FRAMEBUFFER_BPP}; use crate::dev::{LINEBUFFER_ADDR, LINEBUFFER_BPP};
use crate::dev::virtio::block::{VirtIoBlockDevice, VirtIoBlockDeviceError}; use crate::dev::virtio::block::{VirtIoBlockDevice, VirtIoBlockDeviceError};
use crate::dev::virtio::gpu::{VirtIoGpuDevice, VirtIoGpuDeviceError}; use crate::dev::virtio::gpu::{VirtIoGpuDevice, VirtIoGpuDeviceError};
use crate::spinlock::Spinlock; use crate::spinlock::Spinlock;
@ -36,7 +36,7 @@ pub const VIRTIO_MMIO_STATUS_FAILED: u32 = 1 << 7;
pub const VIRTIO_DESC_F_NEXT: u16 = 1 << 0; pub const VIRTIO_DESC_F_NEXT: u16 = 1 << 0;
pub const VIRTIO_DESC_F_WRITE: u16 = 1 << 1; pub const VIRTIO_DESC_F_WRITE: u16 = 1 << 1;
pub const VIRTIO_QUEUE_SIZE: usize = 128; pub const VIRTIO_QUEUE_SIZE: usize = 16;
pub static VIRTIO_DEVICES: Spinlock<[Option<VirtIoDevice>; VIRTIO_MMIO_DEVCOUNT]> = Spinlock::new([const { None }; VIRTIO_MMIO_DEVCOUNT]); pub static VIRTIO_DEVICES: Spinlock<[Option<VirtIoDevice>; VIRTIO_MMIO_DEVCOUNT]> = Spinlock::new([const { None }; VIRTIO_MMIO_DEVCOUNT]);
pub static VIRTIO_HARD_BLOCK_DEVICE: Spinlock<Option<u8>> = Spinlock::new(None); pub static VIRTIO_HARD_BLOCK_DEVICE: Spinlock<Option<u8>> = Spinlock::new(None);
@ -83,8 +83,7 @@ pub struct VirtQueue {
pub fn heap_allocate_type<T: Sized>(tc: &mut TrafficControl) -> &'static mut T { pub fn heap_allocate_type<T: Sized>(tc: &mut TrafficControl) -> &'static mut T {
let num_blocks = size_of::<T>().div_ceil(512); let num_blocks = size_of::<T>().div_ceil(512);
let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(num_blocks) }; let addr = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(num_blocks) };
let addr = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().block_to_addr(block) };
unsafe { &mut (*(addr as *mut T)) } unsafe { &mut (*(addr as *mut T)) }
} }
@ -131,11 +130,12 @@ pub fn probe_virtio_devices(tc: &mut TrafficControl) {
} }
} }
16 => { 16 => {
continue;
// gpu device // gpu device
let gpu_device = VirtIoGpuDevice::new_and_init(tc, addr); let gpu_device = VirtIoGpuDevice::new_and_init(tc, addr);
if let Ok(gpu_device) = gpu_device { if let Ok(gpu_device) = gpu_device {
FRAMEBUFFER_ADDR.store(gpu_device.framebuffer, Ordering::Relaxed); LINEBUFFER_ADDR.store(gpu_device.framebuffer, Ordering::Relaxed);
FRAMEBUFFER_BPP.store(4, Ordering::Relaxed); // virtio always uses 4 byte stride LINEBUFFER_BPP.store(4, Ordering::Relaxed); // virtio always uses 4 byte stride
devices[i] = Some(VirtIoDevice::GPUDevice(gpu_device)); devices[i] = Some(VirtIoDevice::GPUDevice(gpu_device));
*VIRTIO_GPU_DEVICE.lock() = Some(i as u8); *VIRTIO_GPU_DEVICE.lock() = Some(i as u8);
if let Some(serial_port) = &serial_port { if let Some(serial_port) = &serial_port {
@ -183,8 +183,17 @@ pub fn handle_interrupt(interrupt: u32, tc: &mut TrafficControl) {
} }
} }
} }
VirtIoDevice::GPUDevice(_) => { VirtIoDevice::GPUDevice(gpudev) => {
// todo: handle gpu interrupts return;
let gpu = {
let lock = VIRTIO_GPU_DEVICE.lock();
*lock
};
if let Some(gpu) = gpu {
if gpu as usize == idx {
gpudev.pending(tc);
}
}
} }
} }
} }
@ -222,7 +231,8 @@ pub fn read_sector(tc: &mut TrafficControl, buffer: usize, size: u32, sector: u6
false false
} }
pub fn framebuffer_update(tc: &mut TrafficControl, x: u32, y: u32, width: u32, height: u32) -> bool { pub fn framebuffer_update(tc: &mut TrafficControl, y: u32) -> bool {
return true;
let idx = { let idx = {
let lock = VIRTIO_GPU_DEVICE.lock(); let lock = VIRTIO_GPU_DEVICE.lock();
*lock *lock
@ -232,7 +242,7 @@ pub fn framebuffer_update(tc: &mut TrafficControl, x: u32, y: u32, width: u32, h
if let Some(device) = devices[idx as usize].as_mut() { if let Some(device) = devices[idx as usize].as_mut() {
return match device { return match device {
VirtIoDevice::GPUDevice(device) => { VirtIoDevice::GPUDevice(device) => {
device.transfer(tc, x, y, width, height); device.transfer(tc, y);
true true
} }
_ => { _ => {

View file

@ -1,4 +1,5 @@
use crate::fs::{Record, RecordType}; use crate::fs::{Record, RecordType};
use crate::strprint::u32_hex;
#[repr(packed(1), C)] #[repr(packed(1), C)]
pub struct BPB { pub struct BPB {
@ -286,9 +287,9 @@ pub fn seek_forward_one_sector(bpb: &BPBUseful, reader: &mut Fat32FileReader) ->
} }
let table_value = let table_value =
unsafe { ((fat_addr+(ent_offset)) as *const u32).read_volatile() } & 0x0FFFFFFF; unsafe { ((fat_addr+(ent_offset)) as *const u32).read_volatile() } & 0x0FFFFFFF;
crate::syscalls::free_blocks(fat_addr, 1);
if table_value >= 0x0FFFFFF8 { if table_value >= 0x0FFFFFF8 {
// no more clusters, whole file has been read // no more clusters, whole file has been read
crate::syscalls::free_blocks(fat_addr, 1);
reader.eof = 1; reader.eof = 1;
return false; return false;
} else if table_value >= 0x00000002 { } else if table_value >= 0x00000002 {
@ -298,12 +299,10 @@ pub fn seek_forward_one_sector(bpb: &BPBUseful, reader: &mut Fat32FileReader) ->
((reader.cluster - 2) * bpb.sectors_per_cluster as usize) + bpb.first_data_sector(); ((reader.cluster - 2) * bpb.sectors_per_cluster as usize) + bpb.first_data_sector();
reader.sector_offset = 0; reader.sector_offset = 0;
if table_value == 0x00000000 || table_value == 0x00000001 { if table_value == 0x00000000 || table_value == 0x00000001 {
crate::syscalls::free_blocks(fat_addr, 1);
reader.eof = 1; reader.eof = 1;
return false; return false;
} else if table_value == 0x0FFFFFF7 { } else if table_value == 0x0FFFFFF7 {
// bad cluster, stop reading // bad cluster, stop reading
crate::syscalls::free_blocks(fat_addr, 1);
crate::syscalls::write_terminal(b"badcluster"); crate::syscalls::write_terminal(b"badcluster");
reader.eof = 1; reader.eof = 1;
return false; return false;
@ -338,6 +337,7 @@ pub fn read_file(bpb: &BPBUseful, reader: &mut Fat32FileReader, buffer: &mut [u8
let mut left_in_sector = 512 - reader.sector_offset; let mut left_in_sector = 512 - reader.sector_offset;
if left_in_sector == 0 { if left_in_sector == 0 {
crate::syscalls::write_terminal(b"SECTORNULL?"); crate::syscalls::write_terminal(b"SECTORNULL?");
crate::syscalls::free_blocks(sector_addr, 1);
return false; return false;
} }
while buffer_idx < buffer.len() && left_in_sector > 0 { while buffer_idx < buffer.len() && left_in_sector > 0 {

View file

@ -10,16 +10,11 @@ unsafe extern "C" {
} }
pub const BLOCK_SIZE: usize = 512; pub const BLOCK_SIZE: usize = 512;
#[cfg(feature = "arch_virt")]
pub const TOTAL_MEMORY: usize = 1048510;
#[cfg(feature = "arch_ppc32")]
pub const TOTAL_MEMORY: usize = 1024 * 1024; // 1MiB;
pub const MEM_BLOCKS: usize = TOTAL_MEMORY / BLOCK_SIZE;
pub struct MemoryManager { pub struct MemoryManager {
pub heap_start: usize, pub heap_start: usize,
pub heap_size: usize, pub heap_size: usize,
pub blockmap: [u8; (MEM_BLOCKS+7) / 8], pub blockmap: [u8; 128/8]
} }
impl MemoryManager { impl MemoryManager {
@ -28,15 +23,19 @@ impl MemoryManager {
{ {
let uart = serial_port(); let uart = serial_port();
if let Some(uart) = uart { if let Some(uart) = uart {
uart.putstr("heap_start: ");
uart.put_bytes(&u32_hex(_heap_start as u32));
uart.putstr("totalmem: "); uart.putstr("totalmem: ");
uart.put_bytes(&u32_hex(MEM_BLOCKS as u32)); uart.put_bytes(&u32_hex(_heap_size as u32));
uart.putstr("\n"); uart.putstr("\n");
} }
} }
Self { Self {
heap_start: _heap_start as _, heap_start: _heap_start as _,
heap_size: _heap_size as _, heap_size: _heap_size as _,
blockmap: [0; (MEM_BLOCKS+7) / 8],
blockmap: [0; 16],
} }
} }
#[cfg(feature = "arch_ppc32")] #[cfg(feature = "arch_ppc32")]
@ -47,22 +46,7 @@ impl MemoryManager {
blockmap: [0; (MEM_BLOCKS+7) / 8], blockmap: [0; (MEM_BLOCKS+7) / 8],
} }
} }
pub fn is_block_free(&self, block: usize) -> bool {
let block = block % 8;
let block = block / 8;
let val = self.blockmap[block];
(val & (1 << block)) == 0
}
pub fn block_to_addr(&self, block: usize) -> usize {
block * BLOCK_SIZE + self.heap_start
}
pub fn addr_to_block(&self, addr: usize) -> usize {
(addr - self.heap_start) / BLOCK_SIZE
}
pub fn alloc_one_block(&mut self) -> usize { pub fn alloc_one_block(&mut self) -> usize {
/* /*
for (i, v) in self.blockmap.iter_mut().enumerate() { for (i, v) in self.blockmap.iter_mut().enumerate() {
@ -81,11 +65,19 @@ impl MemoryManager {
*/ */
self.alloc_n_blocks(1) self.alloc_n_blocks(1)
} }
pub fn free_one_block(&mut self, block: usize) { pub fn free_one_block(&mut self, addr: usize) {
let block = (addr - self.heap_start) / BLOCK_SIZE;
self.blockmap[block / 8] &= !(1 << (block % 8)); self.blockmap[block / 8] &= !(1 << (block % 8));
} }
pub fn is_addr_free(&self, addr: usize) -> bool {
let block = (addr - self.heap_start) / BLOCK_SIZE;
let val = (1 << (block % 8)) & self.blockmap[block / 8];
val == 0
}
// can easily fail if too many blocks are requested, will return 0 on failure // can easily fail if too many blocks are requested, will return 0 on failure
// RETURNS ADDRESS
pub fn alloc_n_blocks(&mut self, n: usize) -> usize { pub fn alloc_n_blocks(&mut self, n: usize) -> usize {
if n == 0 { if n == 0 {
return 0; return 0;
@ -95,23 +87,29 @@ impl MemoryManager {
for i in 0..self.blockmap.len() { for i in 0..self.blockmap.len() {
for j in 0..8 { for j in 0..8 {
let block = i * 8 + j; let block = i * 8 + j;
let valid = block < (self.heap_size / BLOCK_SIZE);
let val = (1 << j) & self.blockmap[i]; let val = (1 << j) & self.blockmap[i];
if val == 0 { if val == 0 && valid {
// this is free // this is free
self.blockmap[i] |= 1 << j; self.blockmap[i] |= 1 << j;
if first_block.is_none() { if first_block.is_none() {
first_block = Some(block); first_block = Some(block);
} }
found += 1; found += 1;
if found >= n { if found == n {
return first_block.unwrap(); let addr = (first_block.unwrap() * BLOCK_SIZE) + self.heap_start;
return addr;
}
if found > n {
rough_panic(['h', 'm', '?'])
} }
} else { } else {
// used, restart search // used, restart search
let mut i = 0; let mut i = 0;
while found > 0 { while found > 0 {
found -= 1; found -= 1;
self.free_one_block(first_block.unwrap() + i); let addr = ((first_block.unwrap() * BLOCK_SIZE) + self.heap_start) + (i * BLOCK_SIZE);
self.free_one_block(addr);
i += 1; i += 1;
} }
first_block = None; first_block = None;
@ -122,19 +120,24 @@ impl MemoryManager {
rough_panic(['o', 'o', 'm']) rough_panic(['o', 'o', 'm'])
} }
pub fn free_n_blocks(&mut self, block: usize, n: usize) { pub fn free_n_blocks(&mut self, addr: usize, n: usize) {
if n == 0 || block >= MEM_BLOCKS || block + n > MEM_BLOCKS { if n == 0 || addr >= self.heap_start + self.heap_size || addr + n * BLOCK_SIZE > self.heap_start + self.heap_size {
return; rough_panic(['b', 'm', 'f'])
} }
for i in 0..n { for i in 0..n {
self.free_one_block(block + i); self.free_one_block(addr + (i * BLOCK_SIZE));
} }
} }
pub fn used_blocks(&self) -> usize { pub fn used_blocks(&self) -> usize {
let mut used_blocks = 0; let mut used_blocks = 0;
for v in self.blockmap.iter() { for (i, v) in self.blockmap.iter().enumerate() {
for j in 0..8 { for j in 0..8 {
let block = i * 8 + j;
let valid = block < (self.heap_size / BLOCK_SIZE);
if !valid {
continue;
}
let val = (1 << j) & *v; let val = (1 << j) & *v;
if val != 0 { if val != 0 {
// used // used

View file

@ -4,7 +4,8 @@ use crate::arch::serial_port;
use crate::memory::{BLOCK_SIZE, MemoryManager}; use crate::memory::{BLOCK_SIZE, MemoryManager};
use crate::spinlock::Spinlock; use crate::spinlock::Spinlock;
use crate::syscalls::SysCall; use crate::syscalls::SysCall;
use crate::{arch}; use crate::{arch, rough_panic};
use crate::strprint::u32_hex;
#[repr(u32)] #[repr(u32)]
pub enum TaskWait { pub enum TaskWait {
@ -13,7 +14,7 @@ pub enum TaskWait {
WaitForTaskExit = 1 << 2, WaitForTaskExit = 1 << 2,
} }
pub const STACK_SIZE_IN_BLOCKS: usize = 16; pub const STACK_SIZE_IN_BLOCKS: usize = 8;
pub const MAX_TASKS: usize = 8; pub const MAX_TASKS: usize = 8;
@ -69,19 +70,12 @@ pub fn handle_syscall(
let add = a1 as *mut TaskSetup; let add = a1 as *mut TaskSetup;
tc.init_mem_if_not(); tc.init_mem_if_not();
let blockalloc = unsafe {
tc.memory_manager
.as_mut()
.unwrap_unchecked()
.alloc_n_blocks(STACK_SIZE_IN_BLOCKS)
};
let sp = unsafe { let sp = unsafe {
tc.memory_manager tc.memory_manager
.as_mut() .as_mut()
.unwrap_unchecked() .unwrap_unchecked()
.block_to_addr(blockalloc) .alloc_n_blocks(STACK_SIZE_IN_BLOCKS)
+ (BLOCK_SIZE * STACK_SIZE_IN_BLOCKS) } + (BLOCK_SIZE * STACK_SIZE_IN_BLOCKS);
};
#[cfg(feature = "arch_virt")] #[cfg(feature = "arch_virt")]
let t = Some(Task { let t = Some(Task {
@ -93,10 +87,8 @@ pub fn handle_syscall(
incoming_notifications: [const { None }; MAX_TASKS], incoming_notifications: [const { None }; MAX_TASKS],
ackwait: [0; MAX_TASKS], ackwait: [0; MAX_TASKS],
task_wait: 0, task_wait: 0,
ddi_mem_start_block: unsafe { ddi_mem_start: unsafe {
tc.memory_manager (*add).ddi_first_addr
.as_mut().unwrap_unchecked()
.addr_to_block((*add).ddi_first_addr)
}, },
ddi_mem_blocks_count: unsafe { (*add).ddi_size / BLOCK_SIZE }, ddi_mem_blocks_count: unsafe { (*add).ddi_size / BLOCK_SIZE },
trap_frame: arch::virt::tasks::setup_task(sp), trap_frame: arch::virt::tasks::setup_task(sp),
@ -134,7 +126,7 @@ pub fn handle_syscall(
tc.memory_manager tc.memory_manager
.as_mut() .as_mut()
.unwrap_unchecked() .unwrap_unchecked()
.free_one_block(blockalloc); .free_n_blocks(sp, 1);
} }
MAX_TASKS + 1 MAX_TASKS + 1
@ -163,34 +155,22 @@ pub fn handle_syscall(
} }
SysCall::AllocBlocks => { SysCall::AllocBlocks => {
let count = a1; let count = a1;
let block = unsafe {
tc.memory_manager
.as_mut()
.unwrap_unchecked()
.alloc_n_blocks(count)
};
let addr = unsafe { let addr = unsafe {
tc.memory_manager tc.memory_manager
.as_mut() .as_mut()
.unwrap_unchecked() .unwrap_unchecked()
.block_to_addr(block) .alloc_n_blocks(count)
}; };
addr addr
} }
SysCall::FreeBlocks => { SysCall::FreeBlocks => {
let addr = a1; let addr = a1;
let count = a2; let count = a2;
let block = unsafe {
tc.memory_manager
.as_mut()
.unwrap_unchecked()
.addr_to_block(addr)
};
unsafe { unsafe {
tc.memory_manager tc.memory_manager
.as_mut() .as_mut()
.unwrap_unchecked() .unwrap_unchecked()
.free_n_blocks(block, count) .free_n_blocks(addr, count)
}; };
0 0
@ -236,8 +216,34 @@ pub fn handle_syscall(
} }
SysCall::InitKernel => { SysCall::InitKernel => {
tc.init_mem_if_not(); tc.init_mem_if_not();
{
// sanity check
const FAILURE: &str = "memory manager sanity check failed";
let mem = unsafe {
tc.memory_manager.as_mut().unwrap_unchecked()
};
let addr = mem.alloc_one_block();
let addr2 = mem.alloc_n_blocks(2);
if mem.is_addr_free(addr) || mem.is_addr_free(addr2) || mem.is_addr_free(addr2 + 512) {
let uart = serial_port();
if let Some(uart) = uart {
uart.putstr(FAILURE);
}
rough_panic([';', '-', ';'])
}
mem.free_one_block(addr);
mem.free_n_blocks(addr2, 2);
if !(mem.is_addr_free(addr) || mem.is_addr_free(addr2) || mem.is_addr_free(addr2 + 512)) {
let uart = serial_port();
if let Some(uart) = uart {
uart.putstr(FAILURE);
}
rough_panic([';', '-', ';'])
}
}
crate::dev::probe_devices(&mut tc); crate::dev::probe_devices(&mut tc);
if crate::dev::FRAMEBUFFER_ADDR.load(Ordering::Relaxed) != 0 { if crate::dev::LINEBUFFER_ADDR.load(Ordering::Relaxed) != 0 {
tc.use_fb_console = true; tc.use_fb_console = true;
{ {
let uart = serial_port(); let uart = serial_port();
@ -251,7 +257,6 @@ pub fn handle_syscall(
SysCall::SendNotification => { SysCall::SendNotification => {
let taskid = a1; let taskid = a1;
let addr = a2; let addr = a2;
let addr_block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().addr_to_block(addr) };
let ci = tc.current; let ci = tc.current;
if let Some(Some(task)) = tc.tasks.get_mut(taskid).as_mut() { if let Some(Some(task)) = tc.tasks.get_mut(taskid).as_mut() {
let waiting = task.wait & TaskWait::WaitForNotification as u32 != 0; let waiting = task.wait & TaskWait::WaitForNotification as u32 != 0;
@ -268,7 +273,7 @@ pub fn handle_syscall(
current_task.ackwait[taskid] = 1; current_task.ackwait[taskid] = 1;
} }
} else { } else {
task.incoming_notifications[ci] = Some(addr_block); // queue it for them task.incoming_notifications[ci] = Some(addr); // queue it for them
} }
0 0
} else { } else {
@ -281,15 +286,9 @@ pub fn handle_syscall(
suspend = true; suspend = true;
if let Some(task) = tc.tasks[ci].as_mut() { if let Some(task) = tc.tasks[ci].as_mut() {
// is there already a pending notification? // is there already a pending notification?
for (i, block) in task.incoming_notifications.iter_mut().enumerate() { for (i, addr) in task.incoming_notifications.iter_mut().enumerate() {
if let Some(block) = block.take() { if let Some(addr) = addr.take() {
// yes, there is // yes, there is
let addr = unsafe {
tc.memory_manager
.as_mut()
.unwrap_unchecked()
.block_to_addr(block as usize)
};
if let Some(sender_task) = tc.tasks[i].as_mut() { if let Some(sender_task) = tc.tasks[i].as_mut() {
if sender_task.ackwait[ci] == 3 { // they are waiting for us to ack if sender_task.ackwait[ci] == 3 { // they are waiting for us to ack
sender_task.ackwait[ci] = 0; sender_task.ackwait[ci] = 0;
@ -352,7 +351,7 @@ pub fn handle_syscall(
0 0
} }
SysCall::EnableFramebufferConsole => { SysCall::EnableFramebufferConsole => {
if crate::dev::FRAMEBUFFER_ADDR.load(Ordering::Relaxed) != 0 { if crate::dev::LINEBUFFER_ADDR.load(Ordering::Relaxed) != 0 {
tc.use_fb_console = true; tc.use_fb_console = true;
// clear the screen // clear the screen
let mut fbcons = crate::dev::framebuffer::console::FBCONSOLE.lock(); let mut fbcons = crate::dev::framebuffer::console::FBCONSOLE.lock();
@ -362,14 +361,14 @@ pub fn handle_syscall(
0 0
} }
SysCall::FramebufferPointer => { SysCall::FramebufferPointer => {
crate::dev::FRAMEBUFFER_ADDR.load(Ordering::Relaxed) crate::dev::LINEBUFFER_ADDR.load(Ordering::Relaxed)
} }
SysCall::FlushFramebufferRect => { SysCall::FlushFramebufferRect => {
let x = a1; let x = a1;
let y = a2; let y = a2;
let w = a3; let w = a3;
let h = a4; let h = a4;
crate::dev::framebuffer_update(&mut tc, x as u32, y as u32, w as u32, h as u32); crate::dev::linebuffer_push(&mut tc, y as u32);
0 0
} }
}, },
@ -389,20 +388,14 @@ pub fn context_switch<'a>(tc: &'a mut TrafficControl, current: Task) -> Option<&
if want_exit { if want_exit {
tc.init_mem_if_not(); tc.init_mem_if_not();
let sp = tc.tasks[i].as_ref().map(|v| v.sp).unwrap_or(0); let sp = tc.tasks[i].as_ref().map(|v| v.sp).unwrap_or(0);
let ddi_start_block = tc.tasks[i].as_ref().map(|v| v.ddi_mem_start_block).unwrap_or(0); let ddi_start_block = tc.tasks[i].as_ref().map(|v| v.ddi_mem_start).unwrap_or(0);
let ddi_blocks_count = tc.tasks[i].as_ref().map(|v| v.ddi_mem_blocks_count).unwrap_or(0); let ddi_blocks_count = tc.tasks[i].as_ref().map(|v| v.ddi_mem_blocks_count).unwrap_or(0);
if sp != 0 { if sp != 0 {
let stackblock = unsafe {
tc.memory_manager
.as_mut()
.unwrap_unchecked()
.addr_to_block(sp - (BLOCK_SIZE * STACK_SIZE_IN_BLOCKS))
};
unsafe { unsafe {
tc.memory_manager tc.memory_manager
.as_mut() .as_mut()
.unwrap_unchecked() .unwrap_unchecked()
.free_n_blocks(stackblock, STACK_SIZE_IN_BLOCKS) .free_n_blocks(sp - (BLOCK_SIZE * STACK_SIZE_IN_BLOCKS), STACK_SIZE_IN_BLOCKS)
}; };
unsafe { unsafe {
tc.memory_manager.as_mut().unwrap_unchecked() tc.memory_manager.as_mut().unwrap_unchecked()
@ -475,7 +468,7 @@ pub struct Task {
pub incoming_notifications: [Option<usize>; MAX_TASKS], pub incoming_notifications: [Option<usize>; MAX_TASKS],
pub ackwait: [u8; MAX_TASKS], // 3 = they have not yet received the notification, 1 = they have received the notification, 0 = we know they received the notification, or don't care pub ackwait: [u8; MAX_TASKS], // 3 = they have not yet received the notification, 1 = they have received the notification, 0 = we know they received the notification, or don't care
pub task_wait: u8, pub task_wait: u8,
pub ddi_mem_start_block: usize, pub ddi_mem_start: usize,
pub ddi_mem_blocks_count: usize, pub ddi_mem_blocks_count: usize,
#[cfg(feature = "arch_virt")] #[cfg(feature = "arch_virt")]
pub trap_frame: arch::virt::trap::TrapFrame, pub trap_frame: arch::virt::trap::TrapFrame,