Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions src/addr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,19 @@ def_usize_addr! {
pub type GuestPhysAddr;
}

/// Note: This is just a conversion in number and has no semantic meaning.
///
/// Why we need this conversion?
/// Because `GenericPTE` provided by `page_table_entry::x86_64` only accepts `PhysAddr` as the physical address type.
/// Introduce `GuestPhysAddr` concept into `GenericPTE` will bring a lot of complexity.
///
/// I just implement this ugly conversion to make things work.
impl From<PhysAddr> for GuestPhysAddr {
fn from(addr: PhysAddr) -> Self {
Self::from_usize(addr.into())
}
}

def_usize_addr_formatter! {
GuestVirtAddr = "GVA:{}";
GuestPhysAddr = "GPA:{}";
Expand Down
98 changes: 84 additions & 14 deletions src/address_space/backend/alloc.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
use memory_addr::{PageIter4K, PhysAddr};
use page_table_multiarch::{MappingFlags, PageSize, PagingHandler};
use memory_addr::{
MemoryAddr, PAGE_SIZE_1G, PAGE_SIZE_2M, PAGE_SIZE_4K, PageIter1G, PageIter2M, PageIter4K,
PhysAddr,
};
use page_table_multiarch::{
GenericPTE, MappingFlags, PageSize, PageTable64, PagingHandler, PagingMetaData,
};

use super::Backend;
use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable};

impl<H: PagingHandler> Backend<H> {
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Backend<M, PTE, H> {
/// Creates a new allocation mapping backend.
pub const fn new_alloc(populate: bool) -> Self {
Self::Alloc {
Expand All @@ -15,22 +19,56 @@ impl<H: PagingHandler> Backend<H> {

pub(crate) fn map_alloc(
&self,
start: GuestPhysAddr,
start: M::VirtAddr,
size: usize,
flags: MappingFlags,
pt: &mut PageTable<H>,
pt: &mut PageTable64<M, PTE, H>,
populate: bool,
) -> bool {
debug!(
"map_alloc: [{:#x}, {:#x}) {:?} (populate={})",
start,
start + size,
start.add(size),
flags,
populate
);
if populate {
// allocate all possible physical frames for populated mapping.
for addr in PageIter4K::new(start, start + size).unwrap() {

let mut start_addr = start;
let end_addr = start_addr.add(size);
// First try to allocate 1GB pages if the start address is aligned and
// the size is large enough.
if start_addr.is_aligned(PAGE_SIZE_1G) && size >= PAGE_SIZE_1G {
for addr in PageIter1G::new(start_addr, end_addr.align_down(PAGE_SIZE_1G)).unwrap()
{
if H::alloc_frames(PAGE_SIZE_1G / PAGE_SIZE_4K, PAGE_SIZE_1G)
.and_then(|frame| pt.map(addr, frame, PageSize::Size1G, flags).ok())
.is_none()
{
return false;
}
}
start_addr = end_addr;
}

// Then try to allocate 2MB pages if the start address is aligned and
// the size is large enough.
if start_addr.is_aligned(PAGE_SIZE_2M) && size >= PAGE_SIZE_2M {
for addr in PageIter2M::new(start_addr, end_addr.align_down(PAGE_SIZE_2M)).unwrap()
{
if H::alloc_frames(PAGE_SIZE_2M / PAGE_SIZE_4K, PAGE_SIZE_2M)
.and_then(|frame| pt.map(addr, frame, PageSize::Size2M, flags).ok())
.is_none()
{
return false;
}
}
start_addr = end_addr;
}

// Then try to allocate 4K pages.
for addr in PageIter4K::new(start_addr, end_addr).unwrap() {
if H::alloc_frame()
.and_then(|frame| pt.map(addr, frame, PageSize::Size4K, flags).ok())
.is_none()
Expand All @@ -55,13 +93,45 @@ impl<H: PagingHandler> Backend<H> {

pub(crate) fn unmap_alloc(
&self,
start: GuestPhysAddr,
start: M::VirtAddr,
size: usize,
pt: &mut PageTable<H>,
pt: &mut PageTable64<M, PTE, H>,
_populate: bool,
) -> bool {
debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size);
for addr in PageIter4K::new(start, start + size).unwrap() {
debug!("unmap_alloc: [{:#x}, {:#x})", start, start.add(size));

let mut addr = start;
while addr < start.add(size) {
if let Ok((frame, _flags, page_size)) = pt.query(addr) {
// Deallocate the physical frame if there is a mapping in the
// page table.
match page_size {
PageSize::Size1G => {
if !addr.is_aligned(PAGE_SIZE_1G) {
return false;
}
H::dealloc_frames(frame, PAGE_SIZE_1G / PAGE_SIZE_4K);
}
PageSize::Size2M => {
if !addr.is_aligned(PAGE_SIZE_2M) {
return false;
}
H::dealloc_frames(frame, PAGE_SIZE_2M / PAGE_SIZE_4K);
}
PageSize::Size4K => {
if !addr.is_aligned(PAGE_SIZE_4K) {
return false;
}
H::dealloc_frame(frame);
}
}
addr = addr.add(page_size as usize);
} else {
// It's fine if the page is not mapped.
}
}

for addr in PageIter4K::new(start, start.add(size)).unwrap() {
if let Ok((frame, page_size, _)) = pt.unmap(addr) {
// Deallocate the physical frame if there is a mapping in the
// page table.
Expand All @@ -78,9 +148,9 @@ impl<H: PagingHandler> Backend<H> {

pub(crate) fn handle_page_fault_alloc(
&self,
vaddr: GuestPhysAddr,
vaddr: M::VirtAddr,
orig_flags: MappingFlags,
pt: &mut PageTable<H>,
pt: &mut PageTable64<M, PTE, H>,
populate: bool,
) -> bool {
if populate {
Expand Down
33 changes: 18 additions & 15 deletions src/address_space/backend/linear.rs
Original file line number Diff line number Diff line change
@@ -1,51 +1,54 @@
use memory_addr::PhysAddr;
use page_table_multiarch::{MappingFlags, PagingHandler};
use memory_addr::{MemoryAddr, PhysAddr};
use page_table_multiarch::{GenericPTE, MappingFlags, PageTable64, PagingHandler, PagingMetaData};

use super::Backend;
use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable};

impl<H: PagingHandler> Backend<H> {
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Backend<M, PTE, H> {
/// Creates a new linear mapping backend.
pub const fn new_linear(pa_va_offset: usize) -> Self {
Self::Linear { pa_va_offset }
pub const fn new_linear(pa_va_offset: usize, allow_huge: bool) -> Self {
Self::Linear {
pa_va_offset,
allow_huge,
}
}

pub(crate) fn map_linear(
&self,
start: GuestPhysAddr,
start: M::VirtAddr,
size: usize,
flags: MappingFlags,
pt: &mut PageTable<H>,
pt: &mut PageTable64<M, PTE, H>,
allow_huge: bool,
pa_va_offset: usize,
) -> bool {
let pa_start = PhysAddr::from(start.as_usize() - pa_va_offset);
let pa_start = PhysAddr::from(start.into() - pa_va_offset);
debug!(
"map_linear: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}",
start,
start + size,
start.add(size),
pa_start,
pa_start + size,
flags
);
pt.map_region(
start,
|va| PhysAddr::from(va.as_usize() - pa_va_offset),
|va| PhysAddr::from(va.into() - pa_va_offset),
size,
flags,
false,
allow_huge,
false,
)
.is_ok()
}

pub(crate) fn unmap_linear(
&self,
start: GuestPhysAddr,
start: M::VirtAddr,
size: usize,
pt: &mut PageTable<H>,
pt: &mut PageTable64<M, PTE, H>,
_pa_va_offset: usize,
) -> bool {
debug!("unmap_linear: [{:#x}, {:#x})", start, start + size);
debug!("unmap_linear: [{:#x}, {:#x})", start, start.add(size));
pt.unmap_region(start, size, true).is_ok()
}
}
66 changes: 42 additions & 24 deletions src/address_space/backend/mod.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
//! Memory mapping backends.

use memory_addr::MemoryAddr;
use memory_set::MappingBackend;
use page_table_multiarch::{MappingFlags, PagingHandler};

use crate::{GuestPhysAddr, npt::NestedPageTable as PageTable};
use page_table_multiarch::{GenericPTE, MappingFlags, PageTable64, PagingHandler, PagingMetaData};

mod alloc;
mod linear;
Expand All @@ -16,7 +15,7 @@ mod linear;
/// contiguous and their addresses should be known when creating the mapping.
/// - **Allocation**: used in general, or for lazy mappings. The target physical
/// frames are obtained from the global allocator.
pub enum Backend<H: PagingHandler> {
pub enum Backend<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> {
/// Linear mapping backend.
///
/// The offset between the virtual address and the physical address is
Expand All @@ -25,6 +24,7 @@ pub enum Backend<H: PagingHandler> {
Linear {
/// `vaddr - paddr`.
pa_va_offset: usize,
allow_huge: bool,
},
/// Allocation mapping backend.
///
Expand All @@ -36,14 +36,20 @@ pub enum Backend<H: PagingHandler> {
/// Whether to populate the physical frames when creating the mapping.
populate: bool,
/// A phantom data for the paging handler.
_phantom: core::marker::PhantomData<H>,
_phantom: core::marker::PhantomData<(M, PTE, H)>,
},
}

impl<H: PagingHandler> Clone for Backend<H> {
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Clone for Backend<M, PTE, H> {
fn clone(&self) -> Self {
match *self {
Self::Linear { pa_va_offset } => Self::Linear { pa_va_offset },
Self::Linear {
pa_va_offset,
allow_huge,
} => Self::Linear {
pa_va_offset,
allow_huge,
},
Self::Alloc { populate, .. } => Self::Alloc {
populate,
_phantom: core::marker::PhantomData,
Expand All @@ -52,49 +58,61 @@ impl<H: PagingHandler> Clone for Backend<H> {
}
}

impl<H: PagingHandler> MappingBackend for Backend<H> {
type Addr = GuestPhysAddr;
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> MappingBackend for Backend<M, PTE, H> {
type Addr = M::VirtAddr;
type Flags = MappingFlags;
type PageTable = PageTable<H>;
type PageTable = PageTable64<M, PTE, H>;

fn map(
&self,
start: GuestPhysAddr,
start: M::VirtAddr,
size: usize,
flags: MappingFlags,
pt: &mut PageTable<H>,
pt: &mut Self::PageTable,
) -> bool {
match *self {
Self::Linear { pa_va_offset } => self.map_linear(start, size, flags, pt, pa_va_offset),
Self::Linear {
pa_va_offset,
allow_huge,
} => self.map_linear(start, size, flags, pt, allow_huge, pa_va_offset),
Self::Alloc { populate, .. } => self.map_alloc(start, size, flags, pt, populate),
}
}

fn unmap(&self, start: GuestPhysAddr, size: usize, pt: &mut PageTable<H>) -> bool {
fn unmap(&self, start: M::VirtAddr, size: usize, pt: &mut Self::PageTable) -> bool {
match *self {
Self::Linear { pa_va_offset } => self.unmap_linear(start, size, pt, pa_va_offset),
Self::Linear { pa_va_offset, .. } => self.unmap_linear(start, size, pt, pa_va_offset),
Self::Alloc { populate, .. } => self.unmap_alloc(start, size, pt, populate),
}
}

fn protect(
&self,
_start: GuestPhysAddr,
_size: usize,
_new_flags: MappingFlags,
_page_table: &mut PageTable<H>,
start: M::VirtAddr,
size: usize,
new_flags: MappingFlags,
page_table: &mut Self::PageTable,
) -> bool {
// a stub here
true
debug!(
"protect_region({:#x}) [{:#x}, {:#x}) {:?}",
page_table.root_paddr(),
start,
start.add(size),
new_flags,
);
page_table
.protect_region(start, size, new_flags, true)
.map(|tlb| tlb.ignore())
.is_ok()
}
}

impl<H: PagingHandler> Backend<H> {
impl<M: PagingMetaData, PTE: GenericPTE, H: PagingHandler> Backend<M, PTE, H> {
pub(crate) fn handle_page_fault(
&self,
vaddr: GuestPhysAddr,
vaddr: M::VirtAddr,
orig_flags: MappingFlags,
page_table: &mut PageTable<H>,
page_table: &mut PageTable64<M, PTE, H>,
) -> bool {
match *self {
Self::Linear { .. } => false, // Linear mappings should not trigger page faults.
Expand Down
Loading
Loading