use axalloc::global_allocator;
use axhal::mem::{phys_to_virt, virt_to_phys};
use axhal::paging::{MappingFlags, PageSize, PageTable};
use memory_addr::{PageIter4K, PhysAddr, VirtAddr, PAGE_SIZE_4K};
use super::Backend;
fn alloc_frame(zeroed: bool) -> Option<PhysAddr> {
let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?);
if zeroed {
unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) };
}
let paddr = virt_to_phys(vaddr);
Some(paddr)
}
fn dealloc_frame(frame: PhysAddr) {
let vaddr = phys_to_virt(frame);
global_allocator().dealloc_pages(vaddr.as_usize(), 1);
}
impl Backend {
pub const fn new_alloc(populate: bool) -> Self {
Self::Alloc { populate }
}
pub(crate) fn map_alloc(
&self,
start: VirtAddr,
size: usize,
flags: MappingFlags,
pt: &mut PageTable,
populate: bool,
) -> bool {
debug!(
"map_alloc: [{:#x}, {:#x}) {:?} (populate={})",
start,
start + size,
flags,
populate
);
if populate {
for addr in PageIter4K::new(start, start + size).unwrap() {
if let Some(frame) = alloc_frame(true) {
if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) {
tlb.ignore(); } else {
return false;
}
}
}
true
} else {
let flags = MappingFlags::empty();
pt.map_region(start, |_| 0.into(), size, flags, false, false)
.map(|tlb| tlb.ignore())
.is_ok()
}
}
pub(crate) fn unmap_alloc(
&self,
start: VirtAddr,
size: usize,
pt: &mut PageTable,
_populate: bool,
) -> bool {
debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size);
for addr in PageIter4K::new(start, start + size).unwrap() {
if let Ok((frame, page_size, tlb)) = pt.unmap(addr) {
if page_size.is_huge() {
return false;
}
tlb.flush();
dealloc_frame(frame);
} else {
}
}
true
}
pub(crate) fn handle_page_fault_alloc(
&self,
vaddr: VirtAddr,
orig_flags: MappingFlags,
pt: &mut PageTable,
populate: bool,
) -> bool {
if populate {
false } else if let Some(frame) = alloc_frame(true) {
pt.remap(vaddr, frame, orig_flags)
.map(|(_, tlb)| tlb.flush())
.is_ok()
} else {
false
}
}
}