virtio gpu support

This commit is contained in:
husky 2025-09-10 14:06:00 -07:00
parent c9d116c462
commit f238ac7b3b
5 changed files with 486 additions and 2 deletions

View file

@ -2,4 +2,4 @@
storage_device="${1}"
qemu-system-riscv32 -machine virt -bios none -drive if=none,format=raw,file="${storage_device}",id=disk1 -device virtio-blk-device,drive=disk1 -device virtio-gpu -serial mon:stdio -m 5M -kernel target/riscv32imac-unknown-none-elf/release/lbos
qemu-system-riscv32 -machine virt -bios none -drive if=none,format=raw,file="${storage_device}",id=disk1 -device virtio-blk-device,drive=disk1 -device virtio-gpu-device -serial mon:stdio -m 5M -kernel target/riscv32imac-unknown-none-elf/release/lbos

View file

@ -1,5 +1,8 @@
use crate::trafficcontrol::TrafficControl;
pub const FRAMEBUFFER_WIDTH: usize = 320;
pub const FRAMEBUFFER_HEIGHT: usize = 240;
#[cfg(feature = "dev_virtio")]
pub mod virtio;

View file

@ -94,7 +94,7 @@ impl VirtIoBlockDevice {
((addr + VIRTIO_MMIO_QUEUE_SEL) as *mut u32).write_volatile(0);
}
let num_blocks = size_of::<VirtQueue>().div_ceil(512) + 8; // 8 extra blocks will assert that the queue is aligned to 4096 bytes
let queue_block= unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(num_blocks) };
let queue_block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(num_blocks) };
let queue_ptr = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().block_to_addr(queue_block) };
// align up to 4096
let queue_ptr = queue_ptr.wrapping_add(4095) & !(4095);

430
src/dev/virtio/gpu.rs Normal file
View file

@ -0,0 +1,430 @@
use crate::dev::{FRAMEBUFFER_HEIGHT, FRAMEBUFFER_WIDTH};
use crate::dev::virtio::{free_type, heap_allocate_type, Descriptor, VirtQueue, VIRTIO_DESC_F_NEXT, VIRTIO_DESC_F_WRITE, VIRTIO_MMIO_GUEST_FEATURES, VIRTIO_MMIO_HOST_FEATURES, VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_QUEUE_NUM, VIRTIO_MMIO_QUEUE_NUM_MAX, VIRTIO_MMIO_QUEUE_SEL, VIRTIO_MMIO_STATUS, VIRTIO_MMIO_STATUS_ACKNOWLEDGE, VIRTIO_MMIO_STATUS_DRIVER, VIRTIO_MMIO_STATUS_DRIVER_OK, VIRTIO_MMIO_STATUS_FAILED, VIRTIO_MMIO_STATUS_FEATURES_OK, VIRTIO_QUEUE_SIZE};
use crate::trafficcontrol::TrafficControl;
pub const VIRTIO_GPU_CMD_GET_DISPLAY_INFO: u32 = 0x0100;
#[repr(C)]
pub struct CtrlHeader {
pub ctrl_type: u32,
pub flags: u32,
pub fence_id: u32,
pub ctx_id: u32,
pub padding: u32,
}
pub const VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: u32 = 0x0100 + 1;
pub const VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: u32 = 134;
#[repr(C)]
pub struct ResourceCreate2D {
pub header: CtrlHeader,
pub resource_id: u32,
pub format: u32,
pub width: u32,
pub height: u32,
}
pub const VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: u32 = 0x0100 + 6;
#[repr(C)]
pub struct ResourceAttachBacking {
pub header: CtrlHeader,
pub resource_id: u32,
pub nr_entries: u32,
}
#[repr(C)]
pub struct GPUMemEntry {
pub addr: u64,
pub length: u32,
pub padding: u32,
}
pub const VIRTIO_GPU_CMD_SET_SCANOUT: u32 = 0x0100 + 3;
#[repr(C)]
pub struct SetScanout {
pub header: CtrlHeader,
pub rect: VirtIoGpuRect,
pub scanout_id: u32,
pub resource_id: u32,
}
#[repr(C)]
pub struct VirtIoGpuRect {
pub x: u32,
pub y: u32,
pub width: u32,
pub height: u32,
}
pub const VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: u32 = 0x0100 + 5;
#[repr(C)]
pub struct TransferToHost2D {
pub header: CtrlHeader,
pub rect: VirtIoGpuRect,
pub offset: u64,
pub resource_id: u32,
pub padding: u32,
}
pub const VIRTIO_GPU_CMD_RESOURCE_FLUSH: u32 = 0x0100 + 4;
#[repr(C)]
pub struct ResourceFlush {
pub header: CtrlHeader,
pub rect: VirtIoGpuRect,
pub resource_id: u32,
pub padding: u32,
}
pub struct VirtIoGpuDevice {
pub addr: usize,
pub queue: usize,
pub idx: u16,
pub ack_used_idx: u16,
pub framebuffer: usize,
pub width: usize,
pub height: usize,
}
pub enum VirtIoGpuDeviceError {
FeatureSetMismatch,
QueueSetupFailed,
}
impl VirtIoGpuDevice {
pub fn new_and_init(
tc: &mut TrafficControl,
addr: usize,
) -> Result<Self, VirtIoGpuDeviceError> {
// reset device (write 0 to status)
unsafe {
((addr + VIRTIO_MMIO_STATUS) as *mut u32).write_volatile(0);
}
// set ack bit
let mut status = VIRTIO_MMIO_STATUS_ACKNOWLEDGE;
unsafe {
((addr + VIRTIO_MMIO_STATUS) as *mut u32).write_volatile(status);
}
// set driver bit
status |= VIRTIO_MMIO_STATUS_DRIVER;
unsafe {
((addr + VIRTIO_MMIO_STATUS) as *mut u32).write_volatile(status);
}
// read host features
let host_features =
unsafe { ((addr + VIRTIO_MMIO_HOST_FEATURES) as *const u32).read_volatile() };
let guest_features = host_features; // todo: configure properly
unsafe {
((addr + VIRTIO_MMIO_GUEST_FEATURES) as *mut u32).write_volatile(guest_features);
}
status |= VIRTIO_MMIO_STATUS_FEATURES_OK;
unsafe {
((addr + VIRTIO_MMIO_STATUS) as *mut u32).write_volatile(status);
}
// make sure features ok is still set, otherwise failed
if unsafe { ((addr + VIRTIO_MMIO_STATUS) as *const u32).read_volatile() }
& VIRTIO_MMIO_STATUS_FEATURES_OK
== 0
{
unsafe {
((addr + VIRTIO_MMIO_STATUS) as *mut u32).write_volatile(VIRTIO_MMIO_STATUS_FAILED);
}
return Err(VirtIoGpuDeviceError::FeatureSetMismatch);
}
// setup queue
let queue_max_by_device =
unsafe { ((addr + VIRTIO_MMIO_QUEUE_NUM_MAX) as *const u32).read_volatile() };
if queue_max_by_device < VIRTIO_QUEUE_SIZE as _ {
unsafe {
((addr + VIRTIO_MMIO_STATUS) as *mut u32).write_volatile(VIRTIO_MMIO_STATUS_FAILED);
}
return Err(VirtIoGpuDeviceError::QueueSetupFailed);
}
unsafe {
((addr + VIRTIO_MMIO_QUEUE_NUM) as *mut u32).write_volatile(VIRTIO_QUEUE_SIZE as _);
}
unsafe {
((addr + VIRTIO_MMIO_QUEUE_SEL) as *mut u32).write_volatile(0);
}
let num_blocks = size_of::<VirtQueue>().div_ceil(512) + 8; // 8 extra blocks will assert that the queue is aligned to 4096 bytes
let queue_block= unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(num_blocks) };
let queue_ptr = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().block_to_addr(queue_block) };
// align up to 4096
let queue_ptr = queue_ptr.wrapping_add(4095) & !(4095);
// allocate memory for framebuffer
let num_blocks = (FRAMEBUFFER_WIDTH * FRAMEBUFFER_HEIGHT * 4).div_ceil(512); // no alignment requirements afaik
let framebuffer_block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(num_blocks) };
let framebuffer_ptr = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().block_to_addr(framebuffer_block) };
let mut gpu = Self {
addr,
queue: queue_ptr,
idx: 0,
ack_used_idx: 0,
framebuffer: framebuffer_ptr,
width: FRAMEBUFFER_WIDTH,
height: FRAMEBUFFER_HEIGHT,
};
status |= VIRTIO_MMIO_STATUS_DRIVER_OK;
unsafe {
((addr + VIRTIO_MMIO_STATUS) as *mut u32).write_volatile(status);
}
// note: we heap allocate a lot of types here, they will be freed by the self.pending(tc) call
// create host resource 1, ResourceCreate2D
let cmd = heap_allocate_type::<ResourceCreate2D>(tc);
cmd.header.ctrl_type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
cmd.header.flags = 0;
cmd.header.fence_id = 0;
cmd.header.ctx_id = 0;
cmd.header.padding = 0;
cmd.resource_id = 1;
cmd.format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
cmd.width = FRAMEBUFFER_WIDTH as u32;
cmd.height = FRAMEBUFFER_HEIGHT as u32;
let resp = heap_allocate_type::<CtrlHeader>(tc);
let desc_rq = Descriptor {
addr: unsafe { &(*cmd) as *const _ as u64 },
len: size_of::<ResourceCreate2D>() as u32,
flags: VIRTIO_DESC_F_NEXT,
next: 0,
};
let desc_resp = Descriptor {
addr: unsafe { &(*resp) as *const _ as u64 },
len: size_of::<CtrlHeader>() as u32,
flags: VIRTIO_DESC_F_WRITE,
next: 0,
};
gpu.send_rq_rsp(desc_rq, desc_resp);
gpu.pending(tc);
// attach backing, ResourceAttachBacking
let cmd = heap_allocate_type::<ResourceAttachBacking>(tc);
cmd.header.ctrl_type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
cmd.header.flags = 0;
cmd.header.fence_id = 0;
cmd.header.ctx_id = 0;
cmd.header.padding = 0;
cmd.resource_id = 1;
cmd.nr_entries = 1;
let mem_entry = heap_allocate_type::<GPUMemEntry>(tc);
mem_entry.addr = gpu.framebuffer as u64;
mem_entry.length = (gpu.width * gpu.height * 4) as u32;
mem_entry.padding = 0;
let resp = heap_allocate_type::<CtrlHeader>(tc);
let desc_rq = Descriptor {
addr: unsafe { &(*cmd) as *const _ as u64 },
len: size_of::<ResourceAttachBacking>() as u32,
flags: VIRTIO_DESC_F_NEXT,
next: 0,
};
let desc_mem_entry = Descriptor {
addr: unsafe { &(*mem_entry) as *const _ as u64 },
len: size_of::<GPUMemEntry>() as u32,
flags: VIRTIO_DESC_F_NEXT,
next: 0,
};
let desc_resp = Descriptor {
addr: unsafe { &(*resp) as *const _ as u64 },
len: size_of::<CtrlHeader>() as u32,
flags: VIRTIO_DESC_F_WRITE,
next: 0,
};
gpu.send_rq_next_rsp([desc_rq, desc_mem_entry], desc_resp);
gpu.pending(tc);
// set scanout
let cmd = heap_allocate_type::<SetScanout>(tc);
cmd.header.ctrl_type = VIRTIO_GPU_CMD_SET_SCANOUT;
cmd.header.flags = 0;
cmd.header.fence_id = 0;
cmd.header.ctx_id = 0;
cmd.header.padding = 0;
cmd.rect.x = 0;
cmd.rect.y = 0;
cmd.rect.width = gpu.width as u32;
cmd.rect.height = gpu.height as u32;
cmd.scanout_id = 0;
cmd.resource_id = 1;
let resp = heap_allocate_type::<CtrlHeader>(tc);
let desc_rq = Descriptor {
addr: unsafe { &(*cmd) as *const _ as u64 },
len: size_of::<SetScanout>() as u32,
flags: VIRTIO_DESC_F_NEXT,
next: 0,
};
let desc_resp = Descriptor {
addr: unsafe { &(*resp) as *const _ as u64 },
len: size_of::<CtrlHeader>() as u32,
flags: VIRTIO_DESC_F_WRITE,
next: 0,
};
gpu.send_rq_rsp(desc_rq, desc_resp);
gpu.pending(tc);
Ok(gpu)
}
pub fn send_rq_rsp(&mut self, desc_rq: Descriptor, desc_resp: Descriptor) {
self.idx = (self.idx + 1) % VIRTIO_QUEUE_SIZE as u16;
let head = self.idx;
unsafe {
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize] = desc_rq;
}
// assume next is always set, in fact set it if it isn't
unsafe {
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize].flags |= VIRTIO_DESC_F_NEXT;
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize].next =
(self.idx + 1) % VIRTIO_QUEUE_SIZE as u16;
}
self.idx = (self.idx + 1) % VIRTIO_QUEUE_SIZE as u16;
unsafe {
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize] = desc_resp;
}
unsafe {
(*(self.queue as *mut VirtQueue)).avail.ring[(*(self.queue as *mut VirtQueue)).avail.idx as usize % VIRTIO_QUEUE_SIZE] = head;
(*(self.queue as *mut VirtQueue)).avail.idx = (*(self.queue as *mut VirtQueue)).avail.idx.wrapping_add(1);
}
// notify
unsafe {
((self.addr + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32).write_volatile(0);
}
}
pub fn send_rq_next_rsp<T: Sized + IntoIterator<Item = Descriptor>>(&mut self, descs: T, desc_resp: Descriptor) {
let head = (self.idx + 1) % VIRTIO_QUEUE_SIZE as u16;
for desc in descs {
self.idx = (self.idx + 1) % VIRTIO_QUEUE_SIZE as u16;
unsafe {
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize] = desc;
}
// assume next is always set, in fact set it if it isn't
unsafe {
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize].flags |= VIRTIO_DESC_F_NEXT;
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize].next =
(self.idx + 1) % VIRTIO_QUEUE_SIZE as u16;
}
}
self.idx = (self.idx + 1) % VIRTIO_QUEUE_SIZE as u16;
unsafe {
(*(self.queue as *mut VirtQueue)).desc[self.idx as usize] = desc_resp;
}
unsafe {
(*(self.queue as *mut VirtQueue)).avail.ring[(*(self.queue as *mut VirtQueue)).avail.idx as usize % VIRTIO_QUEUE_SIZE] = head;
(*(self.queue as *mut VirtQueue)).avail.idx = (*(self.queue as *mut VirtQueue)).avail.idx.wrapping_add(1);
}
// notify
unsafe {
((self.addr + VIRTIO_MMIO_QUEUE_NOTIFY) as *mut u32).write_volatile(0);
}
}
pub fn pending(&mut self, tc: &mut TrafficControl) {
let queue = unsafe { &(*(self.queue as *mut VirtQueue)) };
while self.ack_used_idx != queue.used.idx {
let elem = &queue.used.ring[self.ack_used_idx as usize % VIRTIO_QUEUE_SIZE];
let desc = &queue.desc[elem.id as usize];
let addr = desc.addr as usize;
let num_blocks = (desc.len as usize).div_ceil(512);
let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().addr_to_block(addr) };
// free
unsafe {
tc.memory_manager.as_mut().unwrap_unchecked().free_n_blocks(block, num_blocks);
}
self.ack_used_idx = self.ack_used_idx.wrapping_add(1);
}
}
pub fn transfer(&mut self, tc: &mut TrafficControl, x: u32, y: u32, width: u32, height: u32) {
// we have enough queue space to send 2 commands (4 descriptors), so lets send
// both the TransferToHost2D and the ResourceFlush commands in one go
// i.e. don't call pending until the flush
let cmd = heap_allocate_type::<TransferToHost2D>(tc);
cmd.header.ctrl_type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
cmd.header.flags = 0;
cmd.header.fence_id = 0;
cmd.header.ctx_id = 0;
cmd.header.padding = 0;
cmd.rect.x = x;
cmd.rect.y = y;
cmd.rect.width = width;
cmd.rect.height = height;
cmd.offset = 0;
cmd.resource_id = 1;
cmd.padding = 0;
let resp = heap_allocate_type::<CtrlHeader>(tc);
let desc_rq = Descriptor {
addr: unsafe { &(*cmd) as *const _ as u64 },
len: size_of::<TransferToHost2D>() as u32,
flags: VIRTIO_DESC_F_NEXT,
next: 0,
};
let desc_resp = Descriptor {
addr: unsafe { &(*resp) as *const _ as u64 },
len: size_of::<CtrlHeader>() as u32,
flags: VIRTIO_DESC_F_WRITE,
next: 0,
};
self.send_rq_rsp(desc_rq, desc_resp);
// resource flush
let cmd = heap_allocate_type::<ResourceFlush>(tc);
cmd.header.ctrl_type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
cmd.header.flags = 0;
cmd.header.fence_id = 0;
cmd.header.ctx_id = 0;
cmd.header.padding = 0;
cmd.rect.x = x;
cmd.rect.y = y;
cmd.rect.width = width;
cmd.rect.height = height;
cmd.resource_id = 1;
cmd.padding = 0;
let resp = heap_allocate_type::<CtrlHeader>(tc);
let desc_rq = Descriptor {
addr: unsafe { &(*cmd) as *const _ as u64 },
len: size_of::<ResourceFlush>() as u32,
flags: VIRTIO_DESC_F_NEXT,
next: 0,
};
let desc_resp = Descriptor {
addr: unsafe { &(*resp) as *const _ as u64 },
len: size_of::<CtrlHeader>() as u32,
flags: VIRTIO_DESC_F_WRITE,
next: 0,
};
self.send_rq_rsp(desc_rq, desc_resp);
self.pending(tc);
}
}

View file

@ -2,11 +2,13 @@
//! WARNING: virtio is currently completely broken! don't use it!
use crate::dev::virtio::block::{VirtIoBlockDevice, VirtIoBlockDeviceError};
use crate::dev::virtio::gpu::{VirtIoGpuDevice, VirtIoGpuDeviceError};
use crate::spinlock::Spinlock;
use crate::strprint::twodigit;
use crate::trafficcontrol::{TrafficControl};
mod block;
mod gpu;
pub const VIRTIO_MMIO_START: usize = 0x1000_1000;
pub const VIRTIO_MMIO_END: usize = 0x1000_8000;
@ -36,9 +38,11 @@ pub const VIRTIO_QUEUE_SIZE: usize = 4;
pub static VIRTIO_DEVICES: Spinlock<[Option<VirtIoDevice>; VIRTIO_MMIO_DEVCOUNT]> = Spinlock::new([const { None }; VIRTIO_MMIO_DEVCOUNT]);
pub static VIRTIO_HARD_BLOCK_DEVICE: Spinlock<Option<u8>> = Spinlock::new(None);
pub static VIRTIO_GPU_DEVICE: Spinlock<Option<u8>> = Spinlock::new(None);
pub enum VirtIoDevice {
BlockDevice(VirtIoBlockDevice),
GPUDevice(VirtIoGpuDevice),
}
#[repr(C)]
@ -75,6 +79,20 @@ pub struct VirtQueue {
pub used: Used,
}
pub fn heap_allocate_type<T: Sized>(tc: &mut TrafficControl) -> &'static mut T {
let num_blocks = size_of::<T>().div_ceil(512);
let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().alloc_n_blocks(num_blocks) };
let addr = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().block_to_addr(block) };
unsafe { &mut (*(addr as *mut T)) }
}
/// WARNING: ptr will be invalid after this function returns!
pub fn free_type<T: Sized>(tc: &mut TrafficControl, ptr: &'static mut T) {
let num_blocks = size_of::<T>().div_ceil(512);
let block = unsafe { tc.memory_manager.as_mut().unwrap_unchecked().addr_to_block(ptr as *const T as usize) };
unsafe { tc.memory_manager.as_mut().unwrap_unchecked().free_n_blocks(block, num_blocks) };
}
pub fn probe_virtio_devices(tc: &mut TrafficControl) {
let serial_port = crate::arch::serial_port();
let mut devices = VIRTIO_DEVICES.lock();
@ -117,6 +135,30 @@ pub fn probe_virtio_devices(tc: &mut TrafficControl) {
}
}
}
16 => {
// gpu device
let gpu_device = VirtIoGpuDevice::new_and_init(tc, addr);
if let Ok(gpu_device) = gpu_device {
devices[i] = Some(VirtIoDevice::GPUDevice(gpu_device));
*VIRTIO_GPU_DEVICE.lock() = Some(i as u8);
if let Some(serial_port) = &serial_port {
serial_port.putstr("virtio gpu device found\n");
}
} else if let Err(e) = gpu_device {
match e {
VirtIoGpuDeviceError::FeatureSetMismatch => {
if let Some(serial_port) = &serial_port {
serial_port.putstr("virtio gpu device feature mismatch\n");
}
}
VirtIoGpuDeviceError::QueueSetupFailed => {
if let Some(serial_port) = &serial_port {
serial_port.putstr("virtio gpu device queue setup failed\n");
}
}
}
}
}
x => {
if let Some(serial_port) = &serial_port {
serial_port.putstr("unsupported device type ");
@ -144,6 +186,9 @@ pub fn handle_interrupt(interrupt: u32, tc: &mut TrafficControl) {
}
}
}
VirtIoDevice::GPUDevice(_) => {
// todo: handle gpu interrupts
}
}
}
}
@ -161,6 +206,12 @@ pub fn read_sector(tc: &mut TrafficControl, buffer: usize, size: u32, sector: u6
device.operation(tc, buffer, size, sector, false);
return true;
}
_ => {
if let Some(serial_port) = &crate::arch::serial_port() {
serial_port.putstr("unexpected device type (not block)\n");
}
return false;
}
}
} else {
let uart = crate::uart::UART::new(0x1000_0000);