532 lines
No EOL
18 KiB
Rust
532 lines
No EOL
18 KiB
Rust
use core::sync::atomic::Ordering;
|
|
use liblbos::TaskSetup;
|
|
use crate::arch::serial_port;
|
|
use crate::memory::{BLOCK_SIZE, MemoryManager};
|
|
use crate::spinlock::Spinlock;
|
|
use crate::syscalls::SysCall;
|
|
use crate::{arch, rough_panic};
|
|
use crate::strprint::u32_hex;
|
|
|
|
#[repr(u32)]
|
|
pub enum TaskWait {
|
|
HardBlockDevOperation = 1 << 0,
|
|
WaitForNotification = 1 << 1,
|
|
WaitForTaskExit = 1 << 2,
|
|
}
|
|
|
|
pub const STACK_SIZE_IN_BLOCKS: usize = 8;
|
|
|
|
pub const MAX_TASKS: usize = 8;
|
|
|
|
pub static TC: Spinlock<TrafficControl> = Spinlock::new(TrafficControl::empty());
|
|
|
|
pub const INBUF_LEN: usize = 32;
|
|
|
|
#[repr(C)]
|
|
pub struct KernelInfo {
|
|
pub current_process_count: usize,
|
|
pub total_mem_blocks: usize,
|
|
pub free_mem_blocks: usize,
|
|
pub input_task: u8,
|
|
pub output_task: u8,
|
|
}
|
|
|
|
/// the bool indicates if the task is now suspended
|
|
pub fn handle_syscall(
|
|
sc: SysCall,
|
|
a1: usize,
|
|
a2: usize,
|
|
a3: usize,
|
|
a4: usize,
|
|
a5: usize,
|
|
a6: usize,
|
|
) -> (usize, bool) {
|
|
let mut suspend = false;
|
|
let mut tc = TC.lock();
|
|
(
|
|
match sc {
|
|
SysCall::NoAction => 0,
|
|
SysCall::KernelInfo => {
|
|
let ki = a1 as *mut KernelInfo;
|
|
|
|
let mut process_count = 0;
|
|
for ts in &tc.tasks {
|
|
if ts.is_some() {
|
|
process_count += 1;
|
|
}
|
|
}
|
|
unsafe { (*ki).current_process_count = process_count };
|
|
|
|
let heap_size = unsafe { tc.memory_manager.as_ref().unwrap_unchecked().heap_size };
|
|
|
|
unsafe {
|
|
(*ki).total_mem_blocks = heap_size / BLOCK_SIZE;
|
|
(*ki).free_mem_blocks = (heap_size / BLOCK_SIZE)
|
|
- tc.memory_manager.as_ref().unwrap_unchecked().used_blocks();
|
|
}
|
|
0
|
|
}
|
|
SysCall::CreateTask => {
|
|
let add = a1 as *mut TaskSetup;
|
|
|
|
tc.init_mem_if_not();
|
|
let sp = unsafe {
|
|
tc.memory_manager
|
|
.as_mut()
|
|
.unwrap_unchecked()
|
|
.alloc_n_blocks(STACK_SIZE_IN_BLOCKS)
|
|
} + (BLOCK_SIZE * STACK_SIZE_IN_BLOCKS);
|
|
|
|
#[cfg(feature = "arch_virt")]
|
|
let t = Some(Task {
|
|
epc: unsafe { (*add).epc },
|
|
environment: unsafe { (*add).environment },
|
|
want_exit: false,
|
|
sp,
|
|
wait: 0,
|
|
incoming_notifications: [const { None }; MAX_TASKS],
|
|
ackwait: [0; MAX_TASKS],
|
|
task_wait: 0,
|
|
ddi_mem_start: unsafe {
|
|
(*add).ddi_first_addr
|
|
},
|
|
ddi_mem_blocks_count: unsafe { (*add).ddi_size / BLOCK_SIZE },
|
|
trap_frame: arch::virt::tasks::setup_task(sp),
|
|
});
|
|
|
|
#[cfg(feature = "arch_ppc32")]
|
|
let t = Some(Task {
|
|
want_exit: false,
|
|
sp,
|
|
wait: 0,
|
|
incoming_notifications: [const { None }; MAX_TASKS],
|
|
ackwait: [0; MAX_TASKS],
|
|
trap_frame: arch::ppc32::user_program_initial_trapframe(
|
|
unsafe { (*add).epc },
|
|
sp,
|
|
)
|
|
});
|
|
|
|
for (i, ts) in tc.tasks.iter_mut().enumerate() {
|
|
if ts.is_none() {
|
|
*ts = t;
|
|
if unsafe { (*add).wait_for_task_exit } {
|
|
let ci = tc.current;
|
|
let current_task = tc.tasks[ci].as_mut().unwrap();
|
|
current_task.wait |= TaskWait::WaitForTaskExit as u32;
|
|
current_task.task_wait = i as u8;
|
|
return (i, true);
|
|
} else {
|
|
return (i, false);
|
|
}
|
|
}
|
|
}
|
|
|
|
unsafe {
|
|
tc.memory_manager
|
|
.as_mut()
|
|
.unwrap_unchecked()
|
|
.free_n_blocks(sp, 1);
|
|
}
|
|
|
|
MAX_TASKS + 1
|
|
}
|
|
SysCall::ExitTask => {
|
|
let current = tc.current;
|
|
if let Some(Some(task)) = &mut tc.tasks.get_mut(current) {
|
|
task.want_exit = true;
|
|
}
|
|
|
|
0
|
|
}
|
|
SysCall::CurrentTask => tc.current,
|
|
SysCall::ReadInbuf => {
|
|
let buf = unsafe { core::slice::from_raw_parts_mut(a1 as *mut u8, a2) };
|
|
let mut read = 0;
|
|
while let Some(c) = tc.read_inbuf() {
|
|
buf[read] = c;
|
|
read += 1;
|
|
if read >= buf.len() {
|
|
break;
|
|
}
|
|
}
|
|
|
|
read
|
|
}
|
|
SysCall::AllocBlocks => {
|
|
let count = a1;
|
|
let addr = unsafe {
|
|
tc.memory_manager
|
|
.as_mut()
|
|
.unwrap_unchecked()
|
|
.alloc_n_blocks(count)
|
|
};
|
|
addr
|
|
}
|
|
SysCall::FreeBlocks => {
|
|
let addr = a1;
|
|
let count = a2;
|
|
unsafe {
|
|
tc.memory_manager
|
|
.as_mut()
|
|
.unwrap_unchecked()
|
|
.free_n_blocks(addr, count)
|
|
};
|
|
|
|
0
|
|
}
|
|
SysCall::WriteTerminal => {
|
|
let addr = a1;
|
|
let count = a2;
|
|
if let Some(uart) = serial_port() {
|
|
uart.put_bytes(unsafe {
|
|
core::slice::from_raw_parts(addr as *const u8, count)
|
|
});
|
|
}
|
|
if tc.use_fb_console {
|
|
let mut fbcons = crate::dev::framebuffer::console::FBCONSOLE.lock();
|
|
fbcons.printstr(&mut tc, unsafe {
|
|
core::str::from_utf8_unchecked(unsafe {
|
|
core::slice::from_raw_parts(addr as *const u8, count)
|
|
})
|
|
});
|
|
}
|
|
0
|
|
}
|
|
SysCall::ReadHBD => {
|
|
let sector = a1;
|
|
let buf_addr = a2;
|
|
let count = a3;
|
|
|
|
suspend = true;
|
|
let ci = tc.current;
|
|
if let Some(task) = tc.tasks[ci].as_mut() {
|
|
task.wait |= TaskWait::HardBlockDevOperation as u32;
|
|
}
|
|
|
|
if crate::dev::read_sector(&mut tc, buf_addr, count as u32 * 512, sector as u64) {
|
|
0
|
|
} else {
|
|
suspend = false;
|
|
if let Some(task) = tc.tasks[ci].as_mut() {
|
|
task.wait &= !(TaskWait::HardBlockDevOperation as u32);
|
|
}
|
|
1
|
|
}
|
|
}
|
|
SysCall::InitKernel => {
|
|
tc.init_mem_if_not();
|
|
{
|
|
// sanity check
|
|
const FAILURE: &str = "memory manager sanity check failed";
|
|
let mem = unsafe {
|
|
tc.memory_manager.as_mut().unwrap_unchecked()
|
|
};
|
|
let addr = mem.alloc_one_block();
|
|
let addr2 = mem.alloc_n_blocks(2);
|
|
if mem.is_addr_free(addr) || mem.is_addr_free(addr2) || mem.is_addr_free(addr2 + 512) {
|
|
let uart = serial_port();
|
|
if let Some(uart) = uart {
|
|
uart.putstr(FAILURE);
|
|
}
|
|
rough_panic([';', '-', ';'])
|
|
}
|
|
mem.free_one_block(addr);
|
|
mem.free_n_blocks(addr2, 2);
|
|
|
|
if !(mem.is_addr_free(addr) || mem.is_addr_free(addr2) || mem.is_addr_free(addr2 + 512)) {
|
|
let uart = serial_port();
|
|
if let Some(uart) = uart {
|
|
uart.putstr(FAILURE);
|
|
}
|
|
rough_panic([';', '-', ';'])
|
|
}
|
|
}
|
|
crate::dev::probe_devices(&mut tc);
|
|
if crate::dev::LINEBUFFER_ADDR.load(Ordering::Relaxed) != 0 {
|
|
tc.use_fb_console = true;
|
|
{
|
|
let uart = serial_port();
|
|
if let Some(uart) = uart {
|
|
uart.putstr("using framebuffer console\n");
|
|
}
|
|
}
|
|
}
|
|
0
|
|
}
|
|
SysCall::SendNotification => {
|
|
let taskid = a1;
|
|
let addr = a2;
|
|
let ci = tc.current;
|
|
if let Some(Some(task)) = tc.tasks.get_mut(taskid).as_mut() {
|
|
let waiting = task.wait & TaskWait::WaitForNotification as u32 != 0;
|
|
if waiting {
|
|
// they should stop waiting
|
|
task.wait &= !(TaskWait::WaitForNotification as u32);
|
|
// setup the task's frame
|
|
#[cfg(feature = "arch_virt")]
|
|
{
|
|
task.trap_frame.regs[10] = addr;
|
|
}
|
|
// they preemptively acked, so lets note that down
|
|
if let Some(current_task) = tc.tasks[ci].as_mut() {
|
|
current_task.ackwait[taskid] = 1;
|
|
}
|
|
} else {
|
|
task.incoming_notifications[ci] = Some(addr); // queue it for them
|
|
}
|
|
0
|
|
} else {
|
|
1
|
|
}
|
|
}
|
|
SysCall::WaitForNotification => {
|
|
let ci = tc.current;
|
|
let mut retaddr = 0;
|
|
suspend = true;
|
|
if let Some(task) = tc.tasks[ci].as_mut() {
|
|
// is there already a pending notification?
|
|
for (i, addr) in task.incoming_notifications.iter_mut().enumerate() {
|
|
if let Some(addr) = addr.take() {
|
|
// yes, there is
|
|
if let Some(sender_task) = tc.tasks[i].as_mut() {
|
|
if sender_task.ackwait[ci] == 3 { // they are waiting for us to ack
|
|
sender_task.ackwait[ci] = 0;
|
|
} else { // they are not waiting for us to ack, so we need to preemptively ack them
|
|
sender_task.ackwait[ci] = 1;
|
|
}
|
|
}
|
|
retaddr = addr;
|
|
suspend = false;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
if suspend {
|
|
// no, set wait flag and suspend
|
|
if let Some(task) = tc.tasks[ci].as_mut() {
|
|
task.wait |= TaskWait::WaitForNotification as u32;
|
|
}
|
|
}
|
|
|
|
retaddr
|
|
}
|
|
SysCall::PendingNotifications => {
|
|
let ci = tc.current;
|
|
let mut pending = 0;
|
|
if let Some(task) = tc.tasks[ci].as_mut() {
|
|
for block in task.incoming_notifications.iter() {
|
|
if block.is_some() {
|
|
pending += 1;
|
|
}
|
|
}
|
|
}
|
|
pending
|
|
}
|
|
SysCall::WaitForNotifAck => {
|
|
let taskid = a1;
|
|
let ci = tc.current;
|
|
if let Some(task) = tc.tasks[ci].as_mut() {
|
|
let ack = task.ackwait[taskid];
|
|
if ack == 1 {
|
|
task.ackwait[taskid] = 0;
|
|
} else {
|
|
suspend = true;
|
|
task.ackwait[taskid] = 3;
|
|
}
|
|
}
|
|
0
|
|
}
|
|
SysCall::EnvironmentPointer => {
|
|
let ci = tc.current;
|
|
if let Some(task) = tc.tasks[ci].as_ref() {
|
|
task.environment
|
|
} else {
|
|
0
|
|
}
|
|
}
|
|
SysCall::DisableFramebufferConsole => {
|
|
tc.use_fb_console = false;
|
|
0
|
|
}
|
|
SysCall::EnableFramebufferConsole => {
|
|
if crate::dev::LINEBUFFER_ADDR.load(Ordering::Relaxed) != 0 {
|
|
tc.use_fb_console = true;
|
|
// clear the screen
|
|
let mut fbcons = crate::dev::framebuffer::console::FBCONSOLE.lock();
|
|
fbcons.clear_terminal(&mut tc);
|
|
}
|
|
|
|
0
|
|
}
|
|
SysCall::FramebufferPointer => {
|
|
crate::dev::LINEBUFFER_ADDR.load(Ordering::Relaxed)
|
|
}
|
|
SysCall::FlushFramebufferRect => {
|
|
let x = a1;
|
|
let y = a2;
|
|
let w = a3;
|
|
let h = a4;
|
|
crate::dev::linebuffer_push(&mut tc, y as u32);
|
|
0
|
|
}
|
|
},
|
|
suspend,
|
|
)
|
|
}
|
|
|
|
pub fn context_switch<'a>(tc: &'a mut TrafficControl, current: Task) -> Option<&'a Task> {
|
|
let ci = tc.current;
|
|
tc.tasks[ci] = Some(current);
|
|
|
|
for i in 0..tc.tasks.len() {
|
|
let want_exit = tc.tasks[i]
|
|
.as_ref()
|
|
.map(|task| task.want_exit)
|
|
.unwrap_or(false);
|
|
if want_exit {
|
|
tc.init_mem_if_not();
|
|
let sp = tc.tasks[i].as_ref().map(|v| v.sp).unwrap_or(0);
|
|
let ddi_start_block = tc.tasks[i].as_ref().map(|v| v.ddi_mem_start).unwrap_or(0);
|
|
let ddi_blocks_count = tc.tasks[i].as_ref().map(|v| v.ddi_mem_blocks_count).unwrap_or(0);
|
|
if sp != 0 {
|
|
unsafe {
|
|
tc.memory_manager
|
|
.as_mut()
|
|
.unwrap_unchecked()
|
|
.free_n_blocks(sp - (BLOCK_SIZE * STACK_SIZE_IN_BLOCKS), STACK_SIZE_IN_BLOCKS)
|
|
};
|
|
unsafe {
|
|
tc.memory_manager.as_mut().unwrap_unchecked()
|
|
.free_n_blocks(ddi_start_block, ddi_blocks_count)
|
|
}
|
|
}
|
|
tc.tasks[i] = None;
|
|
// check if any tasks are waiting on this one to exit
|
|
for task in &mut tc.tasks {
|
|
if let Some(task) = task {
|
|
if task.wait & TaskWait::WaitForTaskExit as u32 != 0 && task.task_wait == i as u8 {
|
|
task.wait &= !(TaskWait::WaitForTaskExit as u32);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut next_task = tc.current + 1;
|
|
if next_task >= MAX_TASKS {
|
|
next_task = 0;
|
|
}
|
|
|
|
for _ in 0..(MAX_TASKS + 1) {
|
|
if let Some(task) = tc.tasks[next_task].as_ref() {
|
|
if task.wait == 0 && {
|
|
let mut waiting = false;
|
|
for v in &task.ackwait {
|
|
if *v == 3 {
|
|
waiting = true;
|
|
break;
|
|
}
|
|
}
|
|
!waiting
|
|
} {
|
|
// don't switch to a task that is waiting for something
|
|
tc.current = next_task;
|
|
return Some(task);
|
|
}
|
|
}
|
|
next_task += 1;
|
|
if next_task >= MAX_TASKS {
|
|
next_task = 0;
|
|
}
|
|
}
|
|
|
|
None
|
|
}
|
|
|
|
pub struct TrafficControl {
|
|
pub memory_manager: Option<MemoryManager>,
|
|
pub tasks: [Option<Task>; MAX_TASKS],
|
|
pub first_task_setup: bool,
|
|
pub current: usize,
|
|
pub inbuf: [u8; INBUF_LEN],
|
|
pub inbuf_read: u8,
|
|
pub inbuf_write: u8,
|
|
pub hung_system: bool,
|
|
pub use_fb_console: bool,
|
|
}
|
|
|
|
pub struct Task {
|
|
#[cfg(feature = "arch_virt")]
|
|
pub epc: usize,
|
|
pub environment: usize,
|
|
pub want_exit: bool,
|
|
/// THE ORIGINAL STACK POINTER THAT THE TASK STARTED WITH
|
|
pub sp: usize,
|
|
pub wait: u32,
|
|
pub incoming_notifications: [Option<usize>; MAX_TASKS],
|
|
pub ackwait: [u8; MAX_TASKS], // 3 = they have not yet received the notification, 1 = they have received the notification, 0 = we know they received the notification, or don't care
|
|
pub task_wait: u8,
|
|
pub ddi_mem_start: usize,
|
|
pub ddi_mem_blocks_count: usize,
|
|
#[cfg(feature = "arch_virt")]
|
|
pub trap_frame: arch::virt::trap::TrapFrame,
|
|
#[cfg(feature = "arch_ppc32")]
|
|
pub trap_frame: arch::ppc32::trap::TrapFrame,
|
|
}
|
|
|
|
impl TrafficControl {
|
|
pub const fn empty() -> Self {
|
|
TrafficControl {
|
|
memory_manager: None,
|
|
tasks: [const { None }; MAX_TASKS],
|
|
first_task_setup: false,
|
|
current: MAX_TASKS + 1,
|
|
inbuf: [0; INBUF_LEN],
|
|
inbuf_read: 0,
|
|
inbuf_write: 0,
|
|
hung_system: false,
|
|
use_fb_console: false,
|
|
}
|
|
}
|
|
|
|
pub fn init_mem_if_not(&mut self) {
|
|
if self.memory_manager.is_none() {
|
|
#[cfg(feature = "arch_virt")]
|
|
{
|
|
self.memory_manager = Some(MemoryManager::init())
|
|
}
|
|
#[cfg(feature = "arch_ppc32")]
|
|
{
|
|
use arch::ppc32::PPC_HEAP_START;
|
|
self.memory_manager = Some(MemoryManager::init(PPC_HEAP_START.load(Ordering::Relaxed), TOTAL_MEMORY));
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn write_inbuf(&mut self, c: u8) {
|
|
self.inbuf[self.inbuf_write as usize] = c;
|
|
self.inbuf_write += 1;
|
|
if self.inbuf_write >= INBUF_LEN as u8 {
|
|
self.inbuf_write = 0;
|
|
}
|
|
}
|
|
|
|
pub fn read_inbuf(&mut self) -> Option<u8> {
|
|
if self.inbuf_read == self.inbuf_write {
|
|
return None;
|
|
}
|
|
let c = self.inbuf[self.inbuf_read as usize];
|
|
self.inbuf_read += 1;
|
|
if self.inbuf_read >= INBUF_LEN as u8 {
|
|
self.inbuf_read = 0;
|
|
}
|
|
Some(c)
|
|
}
|
|
}
|
|
|
|
#[unsafe(no_mangle)]
|
|
pub extern "C" fn program_default_exit() -> ! {
|
|
liblbos::syscalls::exit()
|
|
} |