#![no_std]
#[macro_use]
extern crate log;
extern crate alloc;
mod aspace;
mod backend;
pub use self::aspace::AddrSpace;
pub use self::backend::Backend;
use axerrno::{AxError, AxResult};
use axhal::mem::phys_to_virt;
use kspin::SpinNoIrq;
use lazyinit::LazyInit;
use memory_addr::{va, PhysAddr, VirtAddr};
use memory_set::MappingError;
static KERNEL_ASPACE: LazyInit<SpinNoIrq<AddrSpace>> = LazyInit::new();
fn mapping_err_to_ax_err(err: MappingError) -> AxError {
warn!("Mapping error: {:?}", err);
match err {
MappingError::InvalidParam => AxError::InvalidInput,
MappingError::AlreadyExists => AxError::AlreadyExists,
MappingError::BadState => AxError::BadState,
}
}
pub fn new_kernel_aspace() -> AxResult<AddrSpace> {
let mut aspace = AddrSpace::new_empty(
va!(axconfig::KERNEL_ASPACE_BASE),
axconfig::KERNEL_ASPACE_SIZE,
)?;
for r in axhal::mem::memory_regions() {
if r.size == 0 {
info!("Skip zero-size memory region: {:?}", r);
continue;
}
aspace.map_linear(phys_to_virt(r.paddr), r.paddr, r.size, r.flags.into())?;
}
Ok(aspace)
}
pub fn new_user_aspace(base: VirtAddr, size: usize) -> AxResult<AddrSpace> {
let mut aspace = AddrSpace::new_empty(base, size)?;
if !cfg!(target_arch = "aarch64") {
aspace.copy_mappings_from(&kernel_aspace().lock())?;
}
Ok(aspace)
}
pub fn kernel_aspace() -> &'static SpinNoIrq<AddrSpace> {
&KERNEL_ASPACE
}
pub fn kernel_page_table_root() -> PhysAddr {
KERNEL_ASPACE.lock().page_table_root()
}
pub fn init_memory_management() {
info!("Initialize virtual memory management...");
let kernel_aspace = new_kernel_aspace().expect("failed to initialize kernel address space");
debug!("kernel address space init OK: {:#x?}", kernel_aspace);
KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace));
axhal::paging::set_kernel_page_table_root(kernel_page_table_root());
}
pub fn init_memory_management_secondary() {
axhal::paging::set_kernel_page_table_root(kernel_page_table_root());
}