Skip to content
Snippets Groups Projects
Commit 44029169 authored by Srivatsa Vaddagiri's avatar Srivatsa Vaddagiri Committed by Pierre-Clément Tosi
Browse files

[pvmfw] Use separate heap for shared memory

Use a separate heap for shared memory allocation on platforms such as
Gunyah, that do not support an API for guest to share its memory with
host at runtime. The separate heap is initialized from the memory
range indicated in swiotlb node's reg property.

Test: m pvmfw_img
Bug: 271493784
Change-Id: I784626c27024f647672abcb4e844903e6dc7be70
Merged-In: I784626c27024f647672abcb4e844903e6dc7be70
parent d43576a9
No related branches found
No related tags found
No related merge requests found
......@@ -25,7 +25,7 @@ use crate::rand;
use core::arch::asm;
use core::num::NonZeroUsize;
use core::slice;
use hyp::get_hypervisor;
use hyp::{get_hypervisor, HypervisorCap};
use log::debug;
use log::error;
use log::info;
......@@ -109,6 +109,18 @@ impl<'a> MemorySlices<'a> {
RebootReason::InvalidFdt
})?;
if !get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
error!("Pre-shared pool range not specified in swiotlb node");
RebootReason::InvalidFdt
})?;
memory.init_shared_pool(range).map_err(|e| {
error!("Failed to initialize pre-shared pool {e}");
RebootReason::InvalidFdt
})?;
}
let kernel_range = if let Some(r) = info.kernel_range {
memory.alloc_range(&r).map_err(|e| {
error!("Failed to obtain the kernel range with DT range: {e}");
......
......@@ -436,7 +436,7 @@ fn patch_serial_info(fdt: &mut Fdt, serial_info: &SerialInfo) -> libfdt::Result<
}
#[derive(Debug)]
struct SwiotlbInfo {
pub struct SwiotlbInfo {
addr: Option<usize>,
size: usize,
align: usize,
......@@ -580,7 +580,7 @@ pub struct DeviceTreeInfo {
num_cpus: usize,
pci_info: PciInfo,
serial_info: SerialInfo,
swiotlb_info: SwiotlbInfo,
pub swiotlb_info: SwiotlbInfo,
}
impl DeviceTreeInfo {
......
......@@ -21,6 +21,9 @@ use crate::mmu;
use alloc::alloc::alloc_zeroed;
use alloc::alloc::dealloc;
use alloc::alloc::handle_alloc_error;
use alloc::boxed::Box;
use buddy_system_allocator::LockedHeap;
use core::alloc::GlobalAlloc as _;
use core::alloc::Layout;
use core::cmp::max;
use core::cmp::min;
......@@ -31,6 +34,7 @@ use core::ptr::NonNull;
use core::result;
use hyp::get_hypervisor;
use log::error;
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
use tinyvec::ArrayVec;
......@@ -107,6 +111,8 @@ pub enum MemoryTrackerError {
FailedToMap,
/// Error from the interaction with the hypervisor.
Hypervisor(hyp::Error),
/// Failure to set `SHARED_POOL`.
SharedPoolSetFailure,
}
impl fmt::Display for MemoryTrackerError {
......@@ -120,6 +126,7 @@ impl fmt::Display for MemoryTrackerError {
Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
Self::FailedToMap => write!(f, "Failed to map the new region"),
Self::Hypervisor(e) => e.fmt(f),
Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
}
}
}
......@@ -132,6 +139,8 @@ impl From<hyp::Error> for MemoryTrackerError {
type Result<T> = result::Result<T, MemoryTrackerError>;
static SHARED_POOL: OnceBox<LockedHeap<32>> = OnceBox::new();
impl MemoryTracker {
const CAPACITY: usize = 5;
const MMIO_CAPACITY: usize = 5;
......@@ -263,6 +272,31 @@ impl MemoryTracker {
Ok(())
}
/// Initialize a separate heap for shared memory allocations.
///
/// Some hypervisors such as Gunyah do not support a MemShare API for guest
/// to share its memory with host. Instead they allow host to designate part
/// of guest memory as "shared" ahead of guest starting its execution. The
/// shared memory region is indicated in swiotlb node. On such platforms use
/// a separate heap to allocate buffers that can be shared with host.
pub fn init_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
let size = NonZeroUsize::new(range.len()).unwrap();
let range = self.alloc_mut(range.start, size)?;
let shared_pool = LockedHeap::<32>::new();
// SAFETY - `range` should be a valid region of memory as validated by
// `validate_swiotlb_info` and not used by any other rust code.
unsafe {
shared_pool.lock().init(range.start, range.len());
}
SHARED_POOL
.set(Box::new(shared_pool))
.map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
Ok(())
}
}
impl Drop for MemoryTracker {
......@@ -305,14 +339,26 @@ fn unshare_range(range: &MemoryRange, granule: usize) -> hyp::Result<()> {
Ok(())
}
/// Allocates a memory range of at least the given size from the global allocator, and shares it
/// with the host. Returns a pointer to the buffer.
/// Allocates a memory range of at least the given size that is shared with
/// host. Returns a pointer to the buffer.
///
/// It will be aligned to the memory sharing granule size supported by the hypervisor.
pub fn alloc_shared(size: usize) -> hyp::Result<NonNull<u8>> {
let layout = shared_buffer_layout(size)?;
let granule = layout.align();
if let Some(shared_pool) = SHARED_POOL.get() {
// Safe because `shared_buffer_layout` panics if the size is 0, so the
// layout must have a non-zero size.
let buffer = unsafe { shared_pool.alloc_zeroed(layout) };
let Some(buffer) = NonNull::new(buffer) else {
handle_alloc_error(layout);
};
return Ok(buffer);
}
// Safe because `shared_buffer_layout` panics if the size is 0, so the layout must have a
// non-zero size.
let buffer = unsafe { alloc_zeroed(layout) };
......@@ -341,6 +387,14 @@ pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, size: usize) -> hyp::Result<()>
let layout = shared_buffer_layout(size)?;
let granule = layout.align();
if let Some(shared_pool) = SHARED_POOL.get() {
// Safe because the memory was allocated by `alloc_shared` above using
// the same allocator, and the layout is the same as was used then.
unsafe { shared_pool.dealloc(vaddr.as_ptr(), layout) };
return Ok(());
}
let paddr = virt_to_phys(vaddr);
unshare_range(&(paddr..paddr + layout.size()), granule)?;
// Safe because the memory was allocated by `alloc_shared` above using the same allocator, and
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment