Skip to content
Snippets Groups Projects
Commit 9f37a454 authored by Alice Wang's avatar Alice Wang Committed by Pierre-Clément Tosi
Browse files

[virtio] Mark virtio_drivers::Hal implementation as unsafe

As required by virtio_drivers v0.4.0.

Bug: 274732281
Test: m pvmfw_img && atest vmbase_example.integration_test
Change-Id: Ie30fae6962bbe6af2e711b9bb7c475df4a9f9c7b
Merged-In: Ie30fae6962bbe6af2e711b9bb7c475df4a9f9c7b
parent 68980bee
No related branches found
No related tags found
No related merge requests found
......@@ -9,7 +9,21 @@ use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
pub struct HalImpl;
impl Hal for HalImpl {
/// Implements the `Hal` trait for `HalImpl`.
///
/// # Safety
///
/// Callers of this implementatation must follow the safety requirements documented for the unsafe
/// methods.
unsafe impl Hal for HalImpl {
/// Allocates the given number of contiguous physical pages of DMA memory for VirtIO use.
///
/// # Implementation Safety
///
/// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
/// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
/// block of memory using `alloc_shared` and returning a non-null pointer to it that is
/// aligned to `PAGE_SIZE`.
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
let size = pages * PAGE_SIZE;
......@@ -19,7 +33,7 @@ impl Hal for HalImpl {
(paddr, vaddr)
}
fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let size = pages * PAGE_SIZE;
// Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
......@@ -30,7 +44,13 @@ impl Hal for HalImpl {
0
}
fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
/// Converts a physical address used for MMIO to a virtual address which the driver can access.
///
/// # Implementation Safety
///
/// `mmio_phys_to_virt` satisfies the requirement by checking that the mapped memory region
/// is within the PCI MMIO range.
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
// Check that the region is within the PCI MMIO range that we read from the device tree. If
// not, the host is probably trying to do something malicious.
......@@ -48,7 +68,7 @@ impl Hal for HalImpl {
phys_to_virt(paddr)
}
fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
let size = buffer.len();
// TODO: Copy to a pre-shared region rather than allocating and sharing each time.
......@@ -63,7 +83,7 @@ impl Hal for HalImpl {
virt_to_phys(copy)
}
fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
let vaddr = phys_to_virt(paddr);
let size = buffer.len();
if direction == BufferDirection::DeviceToDriver {
......
......@@ -98,7 +98,7 @@ pub fn get_bar_region(pci_info: &PciInfo) -> MemoryRegion {
struct HalImpl;
impl Hal for HalImpl {
unsafe impl Hal for HalImpl {
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
......@@ -110,7 +110,7 @@ impl Hal for HalImpl {
(paddr, vaddr)
}
fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
// Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
......@@ -121,17 +121,17 @@ impl Hal for HalImpl {
0
}
fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
NonNull::new(paddr as _).unwrap()
}
fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
let vaddr = buffer.cast();
// Nothing to do, as the host already has access to all memory.
virt_to_phys(vaddr)
}
fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
unsafe fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
// Nothing to do, as the host already has access to all memory and we didn't copy the buffer
// anywhere else.
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment