Skip to content
Snippets Groups Projects
Commit 716ff078 authored by Android Build Coastguard Worker's avatar Android Build Coastguard Worker
Browse files

Snap for 10127524 from 65008dc7 to udc-release

Change-Id: Ifcd18452a62795f467af4b6d20302645dfc5aba0
parents abe564a0 65008dc7
No related branches found
No related tags found
No related merge requests found
......@@ -112,13 +112,18 @@ impl<'a> MemorySlices<'a> {
RebootReason::InvalidFdt
})?;
if !get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
if get_hypervisor().has_cap(HypervisorCap::DYNAMIC_MEM_SHARE) {
memory.init_dynamic_shared_pool().map_err(|e| {
error!("Failed to initialize dynamically shared pool: {e}");
RebootReason::InternalError
})?;
} else {
let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
error!("Pre-shared pool range not specified in swiotlb node");
RebootReason::InvalidFdt
})?;
memory.init_shared_pool(range).map_err(|e| {
memory.init_static_shared_pool(range).map_err(|e| {
error!("Failed to initialize pre-shared pool {e}");
RebootReason::InvalidFdt
})?;
......@@ -261,6 +266,8 @@ fn main_wrapper(
error!("Failed to unshare MMIO ranges: {e}");
RebootReason::InternalError
})?;
// Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
MEMORY.lock().as_mut().unwrap().unshare_all_memory();
get_hypervisor().mmio_guard_unmap(console::BASE_ADDRESS).map_err(|e| {
error!("Failed to unshare the UART: {e}");
RebootReason::InternalError
......
......@@ -98,6 +98,7 @@ pub const fn ceiling_div(num: usize, den: usize) -> Option<usize> {
/// Aligns the given address to the given alignment, if it is a power of two.
///
/// Returns `None` if the alignment isn't a power of two.
#[allow(dead_code)] // Currently unused but might be needed again.
pub const fn align_down(addr: usize, alignment: usize) -> Option<usize> {
if !alignment.is_power_of_two() {
None
......
......@@ -16,14 +16,14 @@
#![deny(unsafe_op_in_unsafe_fn)]
use crate::helpers::{self, align_down, align_up, page_4kb_of, RangeExt, SIZE_4KB, SIZE_4MB};
use crate::helpers::{self, page_4kb_of, RangeExt, SIZE_4KB, SIZE_4MB};
use crate::mmu;
use alloc::alloc::alloc_zeroed;
use alloc::alloc::dealloc;
use alloc::alloc::handle_alloc_error;
use alloc::boxed::Box;
use buddy_system_allocator::LockedHeap;
use core::alloc::GlobalAlloc as _;
use alloc::vec::Vec;
use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
use core::alloc::Layout;
use core::cmp::max;
use core::cmp::min;
......@@ -34,6 +34,7 @@ use core::ptr::NonNull;
use core::result;
use hyp::get_hypervisor;
use log::error;
use log::trace;
use once_cell::race::OnceBox;
use spin::mutex::SpinMutex;
use tinyvec::ArrayVec;
......@@ -111,6 +112,8 @@ pub enum MemoryTrackerError {
FailedToMap,
/// Error from the interaction with the hypervisor.
Hypervisor(hyp::Error),
/// Failure to set `SHARED_MEMORY`.
SharedMemorySetFailure,
/// Failure to set `SHARED_POOL`.
SharedPoolSetFailure,
}
......@@ -126,6 +129,7 @@ impl fmt::Display for MemoryTrackerError {
Self::Overlaps => write!(f, "New region overlaps with tracked regions"),
Self::FailedToMap => write!(f, "Failed to map the new region"),
Self::Hypervisor(e) => e.fmt(f),
Self::SharedMemorySetFailure => write!(f, "Failed to set SHARED_MEMORY"),
Self::SharedPoolSetFailure => write!(f, "Failed to set SHARED_POOL"),
}
}
......@@ -139,7 +143,62 @@ impl From<hyp::Error> for MemoryTrackerError {
type Result<T> = result::Result<T, MemoryTrackerError>;
static SHARED_POOL: OnceBox<LockedHeap<32>> = OnceBox::new();
static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
/// Allocates memory on the heap and shares it with the host.
///
/// Unshares all pages when dropped.
pub struct MemorySharer {
granule: usize,
shared_regions: Vec<(usize, Layout)>,
}
impl MemorySharer {
const INIT_CAP: usize = 10;
pub fn new(granule: usize) -> Self {
assert!(granule.is_power_of_two());
Self { granule, shared_regions: Vec::with_capacity(Self::INIT_CAP) }
}
/// Get from the global allocator a granule-aligned region that suits `hint` and share it.
pub fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
let layout = hint.align_to(self.granule).unwrap().pad_to_align();
assert_ne!(layout.size(), 0);
// SAFETY - layout has non-zero size.
let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
handle_alloc_error(layout);
};
let base = shared.as_ptr() as usize;
let end = base.checked_add(layout.size()).unwrap();
trace!("Sharing memory region {:#x?}", base..end);
for vaddr in (base..end).step_by(self.granule) {
let vaddr = NonNull::new(vaddr as *mut _).unwrap();
get_hypervisor().mem_share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
}
self.shared_regions.push((base, layout));
pool.add_frame(base, end);
}
}
impl Drop for MemorySharer {
fn drop(&mut self) {
while let Some((base, layout)) = self.shared_regions.pop() {
let end = base.checked_add(layout.size()).unwrap();
trace!("Unsharing memory region {:#x?}", base..end);
for vaddr in (base..end).step_by(self.granule) {
let vaddr = NonNull::new(vaddr as *mut _).unwrap();
get_hypervisor().mem_unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
}
// SAFETY - The region was obtained from alloc_zeroed() with the recorded layout.
unsafe { dealloc(base as *mut _, layout) };
}
}
}
impl MemoryTracker {
const CAPACITY: usize = 5;
......@@ -273,23 +332,34 @@ impl MemoryTracker {
Ok(())
}
/// Initialize a separate heap for shared memory allocations.
/// Initialize the shared heap to dynamically share memory from the global allocator.
pub fn init_dynamic_shared_pool(&mut self) -> Result<()> {
let granule = get_hypervisor().memory_protection_granule()?;
let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule));
if previous.is_some() {
return Err(MemoryTrackerError::SharedMemorySetFailure);
}
SHARED_POOL
.set(Box::new(LockedFrameAllocator::new()))
.map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
Ok(())
}
/// Initialize the shared heap from a static region of memory.
///
/// Some hypervisors such as Gunyah do not support a MemShare API for guest
/// to share its memory with host. Instead they allow host to designate part
/// of guest memory as "shared" ahead of guest starting its execution. The
/// shared memory region is indicated in swiotlb node. On such platforms use
/// a separate heap to allocate buffers that can be shared with host.
pub fn init_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
let size = NonZeroUsize::new(range.len()).unwrap();
let range = self.alloc_mut(range.start, size)?;
let shared_pool = LockedHeap::<32>::new();
let shared_pool = LockedFrameAllocator::<32>::new();
// SAFETY - `range` should be a valid region of memory as validated by
// `validate_swiotlb_info` and not used by any other rust code.
unsafe {
shared_pool.lock().init(range.start, range.len());
}
shared_pool.lock().insert(range);
SHARED_POOL
.set(Box::new(shared_pool))
......@@ -297,6 +367,11 @@ impl MemoryTracker {
Ok(())
}
/// Unshares any memory that may have been shared.
pub fn unshare_all_memory(&mut self) {
drop(SHARED_MEMORY.lock().take());
}
}
impl Drop for MemoryTracker {
......@@ -310,71 +385,37 @@ impl Drop for MemoryTracker {
MemoryType::ReadOnly => {}
}
}
self.unshare_all_memory()
}
}
/// Gives the KVM host read, write and execute permissions on the given memory range. If the range
/// is not aligned with the memory protection granule then it will be extended on either end to
/// align.
fn share_range(range: &MemoryRange, granule: usize) -> hyp::Result<()> {
for base in (align_down(range.start, granule)
.expect("Memory protection granule was not a power of two")..range.end)
.step_by(granule)
{
get_hypervisor().mem_share(base as u64)?;
}
Ok(())
}
/// Removes permission from the KVM host to access the given memory range which was previously
/// shared. If the range is not aligned with the memory protection granule then it will be extended
/// on either end to align.
fn unshare_range(range: &MemoryRange, granule: usize) -> hyp::Result<()> {
for base in (align_down(range.start, granule)
.expect("Memory protection granule was not a power of two")..range.end)
.step_by(granule)
{
get_hypervisor().mem_unshare(base as u64)?;
}
Ok(())
}
/// Allocates a memory range of at least the given size that is shared with
/// host. Returns a pointer to the buffer.
///
/// It will be aligned to the memory sharing granule size supported by the hypervisor.
pub fn alloc_shared(size: usize) -> hyp::Result<NonNull<u8>> {
let layout = shared_buffer_layout(size)?;
let granule = layout.align();
if let Some(shared_pool) = SHARED_POOL.get() {
// Safe because `shared_buffer_layout` panics if the size is 0, so the
// layout must have a non-zero size.
let buffer = unsafe { shared_pool.alloc_zeroed(layout) };
let Some(buffer) = NonNull::new(buffer) else {
handle_alloc_error(layout);
};
return Ok(buffer);
}
// Safe because `shared_buffer_layout` panics if the size is 0, so the layout must have a
// non-zero size.
let buffer = unsafe { alloc_zeroed(layout) };
let Some(buffer) = NonNull::new(buffer) else {
pub fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
assert_ne!(layout.size(), 0);
let Some(buffer) = try_shared_alloc(layout) else {
handle_alloc_error(layout);
};
let paddr = virt_to_phys(buffer);
// If share_range fails then we will leak the allocation, but that seems better than having it
// be reused while maybe still partially shared with the host.
share_range(&(paddr..paddr + layout.size()), granule)?;
trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
Ok(buffer)
}
fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
let mut shared_pool = SHARED_POOL.get().unwrap().lock();
if let Some(buffer) = shared_pool.alloc_aligned(layout) {
Some(NonNull::new(buffer as _).unwrap())
} else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
shared_memory.refill(&mut shared_pool, layout);
shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
} else {
None
}
}
/// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
///
/// The size passed in must be the size passed to the original `alloc_shared` call.
......@@ -383,41 +424,13 @@ pub fn alloc_shared(size: usize) -> hyp::Result<NonNull<u8>> {
///
/// The memory must have been allocated by `alloc_shared` with the same size, and not yet
/// deallocated.
pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, size: usize) -> hyp::Result<()> {
let layout = shared_buffer_layout(size)?;
let granule = layout.align();
if let Some(shared_pool) = SHARED_POOL.get() {
// Safe because the memory was allocated by `alloc_shared` above using
// the same allocator, and the layout is the same as was used then.
unsafe { shared_pool.dealloc(vaddr.as_ptr(), layout) };
return Ok(());
}
let paddr = virt_to_phys(vaddr);
unshare_range(&(paddr..paddr + layout.size()), granule)?;
// Safe because the memory was allocated by `alloc_shared` above using the same allocator, and
// the layout is the same as was used then.
unsafe { dealloc(vaddr.as_ptr(), layout) };
pub unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
Ok(())
}
/// Returns the layout to use for allocating a buffer of at least the given size shared with the
/// host.
///
/// It will be aligned to the memory sharing granule size supported by the hypervisor.
///
/// Panics if `size` is 0.
fn shared_buffer_layout(size: usize) -> hyp::Result<Layout> {
assert_ne!(size, 0);
let granule = get_hypervisor().memory_protection_granule()?;
let allocated_size =
align_up(size, granule).expect("Memory protection granule was not a power of two");
Ok(Layout::from_size_align(allocated_size, granule).unwrap())
}
/// Returns an iterator which yields the base address of each 4 KiB page within the given range.
fn page_iterator(range: &MemoryRange) -> impl Iterator<Item = usize> {
(page_4kb_of(range.start)..range.end).step_by(SIZE_4KB)
......
// Copyright 2023, The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! HAL for the virtio_drivers crate.
use super::pci::PCI_INFO;
use crate::helpers::RangeExt as _;
use crate::memory::{alloc_shared, dealloc_shared, phys_to_virt, virt_to_phys};
use core::{
ops::Range,
ptr::{copy_nonoverlapping, NonNull},
};
use log::debug;
use core::alloc::Layout;
use core::mem::size_of;
use core::ptr::{copy_nonoverlapping, NonNull};
use log::trace;
use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
pub struct HalImpl;
impl Hal for HalImpl {
/// Implements the `Hal` trait for `HalImpl`.
///
/// # Safety
///
/// Callers of this implementatation must follow the safety requirements documented in the `Hal`
/// trait for the unsafe methods.
unsafe impl Hal for HalImpl {
/// Allocates the given number of contiguous physical pages of DMA memory for VirtIO use.
///
/// # Implementation Safety
///
/// `dma_alloc` ensures the returned DMA buffer is not aliased with any other allocation or
/// reference in the program until it is deallocated by `dma_dealloc` by allocating a unique
/// block of memory using `alloc_shared` and returning a non-null pointer to it that is
/// aligned to `PAGE_SIZE`.
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
let size = pages * PAGE_SIZE;
let vaddr =
alloc_shared(size).expect("Failed to allocate and share VirtIO DMA range with host");
let vaddr = alloc_shared(dma_layout(pages))
.expect("Failed to allocate and share VirtIO DMA range with host");
// TODO(ptosi): Move this zeroing to virtio_drivers, if it silently wants a zeroed region.
// SAFETY - vaddr points to a region allocated for the caller so is safe to access.
unsafe { core::ptr::write_bytes(vaddr.as_ptr(), 0, dma_layout(pages).size()) };
let paddr = virt_to_phys(vaddr);
(paddr, vaddr)
}
fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let size = pages * PAGE_SIZE;
// Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
// the layout is the same as was used then.
unsafe {
dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO DMA range with host");
}
unsafe fn dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
// SAFETY - Memory was allocated by `dma_alloc` using `alloc_shared` with the same size.
unsafe { dealloc_shared(vaddr, dma_layout(pages)) }
.expect("Failed to unshare VirtIO DMA range with host");
0
}
fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
/// Converts a physical address used for MMIO to a virtual address which the driver can access.
///
/// # Implementation Safety
///
/// `mmio_phys_to_virt` satisfies the requirement by checking that the mapped memory region
/// is within the PCI MMIO range.
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
let pci_info = PCI_INFO.get().expect("VirtIO HAL used before PCI_INFO was initialised");
let bar_range = {
let start = pci_info.bar_range.start.try_into().unwrap();
let end = pci_info.bar_range.end.try_into().unwrap();
start..end
};
let mmio_range = paddr..paddr.checked_add(size).expect("PCI MMIO region end overflowed");
// Check that the region is within the PCI MMIO range that we read from the device tree. If
// not, the host is probably trying to do something malicious.
if !contains_range(
&pci_info.bar_range,
&(paddr.try_into().expect("PCI MMIO region start was outside of 32-bit address space")
..paddr
.checked_add(size)
.expect("PCI MMIO region end overflowed")
.try_into()
.expect("PCI MMIO region end was outside of 32-bit address space")),
) {
panic!("PCI MMIO region was outside of expected BAR range.");
}
assert!(
mmio_range.is_within(&bar_range),
"PCI MMIO region was outside of expected BAR range.",
);
phys_to_virt(paddr)
}
fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
let size = buffer.len();
// TODO: Copy to a pre-shared region rather than allocating and sharing each time.
// Allocate a range of pages, copy the buffer if necessary, and share the new range instead.
let copy =
alloc_shared(size).expect("Failed to allocate and share VirtIO buffer with host");
let bounce = alloc_shared(bb_layout(size))
.expect("Failed to allocate and share VirtIO bounce buffer with host");
let paddr = virt_to_phys(bounce);
if direction == BufferDirection::DriverToDevice {
unsafe {
copy_nonoverlapping(buffer.as_ptr() as *mut u8, copy.as_ptr(), size);
}
let src = buffer.cast::<u8>().as_ptr().cast_const();
trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) initialized from {src:?}");
// SAFETY - Both regions are valid, properly aligned, and don't overlap.
unsafe { copy_nonoverlapping(src, bounce.as_ptr(), size) };
}
virt_to_phys(copy)
paddr
}
fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
let vaddr = phys_to_virt(paddr);
unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
let bounce = phys_to_virt(paddr);
let size = buffer.len();
if direction == BufferDirection::DeviceToDriver {
debug!(
"Copying VirtIO buffer back from {:#x} to {:#x}.",
paddr,
buffer.as_ptr() as *mut u8 as usize
);
unsafe {
copy_nonoverlapping(vaddr.as_ptr(), buffer.as_ptr() as *mut u8, size);
}
let dest = buffer.cast::<u8>().as_ptr();
trace!("VirtIO bounce buffer at {bounce:?} (PA:{paddr:#x}) copied back to {dest:?}");
// SAFETY - Both regions are valid, properly aligned, and don't overlap.
unsafe { copy_nonoverlapping(bounce.as_ptr(), dest, size) };
}
// Unshare and deallocate the shared copy of the buffer.
debug!("Unsharing VirtIO buffer {:#x}", paddr);
// Safe because the memory was allocated by `share` using `alloc_shared`, and the size is
// the same as was used then.
unsafe {
dealloc_shared(vaddr, size).expect("Failed to unshare VirtIO buffer with host");
}
// SAFETY - Memory was allocated by `share` using `alloc_shared` with the same size.
unsafe { dealloc_shared(bounce, bb_layout(size)) }
.expect("Failed to unshare and deallocate VirtIO bounce buffer");
}
}
/// Returns true if `inner` is entirely contained within `outer`.
fn contains_range(outer: &Range<u32>, inner: &Range<u32>) -> bool {
inner.start >= outer.start && inner.end <= outer.end
fn dma_layout(pages: usize) -> Layout {
let size = pages.checked_mul(PAGE_SIZE).unwrap();
Layout::from_size_align(size, PAGE_SIZE).unwrap()
}
fn bb_layout(size: usize) -> Layout {
// In theory, it would be legal to align to 1-byte but use a larger alignment for good measure.
const VIRTIO_BOUNCE_BUFFER_ALIGN: usize = size_of::<u128>();
Layout::from_size_align(size, VIRTIO_BOUNCE_BUFFER_ALIGN).unwrap()
}
......@@ -98,7 +98,7 @@ pub fn get_bar_region(pci_info: &PciInfo) -> MemoryRegion {
struct HalImpl;
impl Hal for HalImpl {
unsafe impl Hal for HalImpl {
fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
debug!("dma_alloc: pages={}", pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
......@@ -110,7 +110,7 @@ impl Hal for HalImpl {
(paddr, vaddr)
}
fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
// Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
......@@ -121,17 +121,17 @@ impl Hal for HalImpl {
0
}
fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
NonNull::new(paddr as _).unwrap()
}
fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
let vaddr = buffer.cast();
// Nothing to do, as the host already has access to all memory.
virt_to_phys(vaddr)
}
fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
unsafe fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
// Nothing to do, as the host already has access to all memory and we didn't copy the buffer
// anywhere else.
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment