#![cfg_attr(not(test), no_std)]
mod area;
mod backend;
mod shared;
pub use area::MapArea;
use axerrno::{AxError, AxResult};
pub use backend::MemBackend;
extern crate alloc;
use alloc::{collections::BTreeMap, sync::Arc, vec::Vec};
use core::sync::atomic::{AtomicI32, Ordering};
use page_table_entry::GenericPTE;
use shared::SharedMem;
use spinlock::SpinNoIrq;
#[macro_use]
extern crate log;
use axhal::{
arch::flush_tlb,
mem::{memory_regions, phys_to_virt, PhysAddr, VirtAddr, PAGE_SIZE_4K},
paging::{MappingFlags, PageSize, PageTable},
};
static SHMID: AtomicI32 = AtomicI32::new(1);
pub static SHARED_MEMS: SpinNoIrq<BTreeMap<i32, Arc<SharedMem>>> = SpinNoIrq::new(BTreeMap::new());
pub static KEY_TO_SHMID: SpinNoIrq<BTreeMap<i32, i32>> = SpinNoIrq::new(BTreeMap::new());
pub struct MemorySet {
page_table: PageTable,
owned_mem: BTreeMap<usize, MapArea>,
private_mem: BTreeMap<i32, Arc<SharedMem>>,
attached_mem: Vec<(VirtAddr, MappingFlags, Arc<SharedMem>)>,
}
impl MemorySet {
pub fn page_table_token(&self) -> usize {
self.page_table.root_paddr().as_usize()
}
pub fn new_empty() -> Self {
Self {
page_table: PageTable::try_new().expect("Error allocating page table."),
owned_mem: BTreeMap::new(),
private_mem: BTreeMap::new(),
attached_mem: Vec::new(),
}
}
pub fn new_with_kernel_mapped() -> Self {
let mut page_table = PageTable::try_new().expect("Error allocating page table.");
for r in memory_regions() {
debug!(
"mapping kernel region [0x{:x}, 0x{:x})",
usize::from(phys_to_virt(r.paddr)),
usize::from(phys_to_virt(r.paddr)) + r.size,
);
page_table
.map_region(phys_to_virt(r.paddr), r.paddr, r.size, r.flags.into(), true)
.expect("Error mapping kernel memory");
}
Self {
page_table,
owned_mem: BTreeMap::new(),
private_mem: BTreeMap::new(),
attached_mem: Vec::new(),
}
}
pub fn page_table_root_ppn(&self) -> PhysAddr {
self.page_table.root_paddr()
}
pub fn max_va(&self) -> VirtAddr {
self.owned_mem
.last_key_value()
.map(|(_, area)| area.end_va())
.unwrap_or_default()
}
pub fn new_region(
&mut self,
vaddr: VirtAddr,
size: usize,
flags: MappingFlags,
data: Option<&[u8]>,
backend: Option<MemBackend>,
) {
let num_pages = (size + PAGE_SIZE_4K - 1) / PAGE_SIZE_4K;
let area = match data {
Some(data) => MapArea::new_alloc(
vaddr,
num_pages,
flags,
Some(data),
backend,
&mut self.page_table,
)
.unwrap(),
None => MapArea::new_lazy(vaddr, num_pages, flags, backend, &mut self.page_table),
};
info!(
"allocating [0x{:x}, 0x{:x}) to [0x{:x}, 0x{:x}) flag: {:?}",
usize::from(vaddr),
usize::from(vaddr) + size,
usize::from(area.vaddr),
usize::from(area.vaddr) + area.size(),
flags
);
assert!(self.owned_mem.insert(area.vaddr.into(), area).is_none());
}
pub fn split_for_area(&mut self, start: VirtAddr, size: usize) {
let end = start + size;
assert!(end.is_aligned_4k());
let mut overlapped_area: Vec<(usize, MapArea)> = Vec::new();
let mut prev_area: BTreeMap<usize, MapArea> = BTreeMap::new();
for _ in 0..self.owned_mem.len() {
let (idx, area) = self.owned_mem.pop_first().unwrap();
if area.overlap_with(start, end) {
overlapped_area.push((idx, area));
} else {
prev_area.insert(idx, area);
}
}
self.owned_mem = prev_area;
info!("splitting for [{:?}, {:?})", start, end);
for (_, mut area) in overlapped_area {
if area.contained_in(start, end) {
info!(" drop [{:?}, {:?})", area.vaddr, area.end_va());
area.dealloc(&mut self.page_table);
drop(area);
} else if area.strict_contain(start, end) {
info!(
" split [{:?}, {:?}) into 2 areas",
area.vaddr,
area.end_va()
);
let new_area = area.remove_mid(start, end, &mut self.page_table);
assert!(self
.owned_mem
.insert(new_area.vaddr.into(), new_area)
.is_none());
assert!(self.owned_mem.insert(area.vaddr.into(), area).is_none());
} else if start <= area.vaddr && area.vaddr < end {
info!(
" shrink_left [{:?}, {:?}) to [{:?}, {:?})",
area.vaddr,
area.end_va(),
end,
area.end_va()
);
area.shrink_left(end, &mut self.page_table);
assert!(self.owned_mem.insert(area.vaddr.into(), area).is_none());
} else {
info!(
" shrink_right [{:?}, {:?}) to [{:?}, {:?})",
area.vaddr,
area.end_va(),
area.vaddr,
start
);
area.shrink_right(start, &mut self.page_table);
assert!(self.owned_mem.insert(area.vaddr.into(), area).is_none());
}
}
}
pub fn find_free_area(&self, hint: VirtAddr, size: usize) -> Option<VirtAddr> {
let mut last_end = hint.max(axconfig::USER_MEMORY_START.into()).as_usize();
let mut segments: Vec<_> = self
.owned_mem
.iter()
.map(|(start, mem)| (*start, *start + mem.size()))
.collect();
segments.extend(
self.attached_mem
.iter()
.map(|(start, _, mem)| (start.as_usize(), start.as_usize() + mem.size())),
);
segments.sort();
for (start, end) in segments {
if last_end + size <= start {
return Some(last_end.into());
}
last_end = end;
}
None
}
pub fn mmap(
&mut self,
start: VirtAddr,
size: usize,
flags: MappingFlags,
fixed: bool,
backend: Option<MemBackend>,
) -> isize {
let size = (size + PAGE_SIZE_4K - 1) / PAGE_SIZE_4K * PAGE_SIZE_4K;
info!(
"[mmap] vaddr: [{:?}, {:?}), {:?}, fixed: {}, backend: {}",
start,
start + size,
flags,
fixed,
backend.is_some()
);
let addr = if fixed {
self.split_for_area(start, size);
self.new_region(start, size, flags, None, backend);
axhal::arch::flush_tlb(None);
start.as_usize() as isize
} else {
info!("find free area");
let start = self.find_free_area(start, size);
match start {
Some(start) => {
info!("found area [{:?}, {:?})", start, start + size);
self.new_region(start, size, flags, None, backend);
flush_tlb(None);
start.as_usize() as isize
}
None => -1,
}
};
debug!("[mmap] return addr: 0x{:x}", addr);
addr
}
pub fn munmap(&mut self, start: VirtAddr, size: usize) {
let size = (size + PAGE_SIZE_4K - 1) / PAGE_SIZE_4K * PAGE_SIZE_4K;
info!("[munmap] [{:?}, {:?})", start, (start + size).align_up_4k());
self.split_for_area(start, size);
}
pub fn msync(&mut self, start: VirtAddr, size: usize) {
let end = start + size;
for area in self.owned_mem.values_mut() {
if area.backend.is_none() {
continue;
}
if area.overlap_with(start, end) {
for page_index in 0..area.pages.len() {
let page_vaddr = area.vaddr + page_index * PAGE_SIZE_4K;
if page_vaddr >= start && page_vaddr < end {
area.sync_page_with_backend(page_index);
}
}
}
}
}
pub fn mprotect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) {
info!(
"[mprotect] addr: [{:?}, {:?}), flags: {:?}",
start,
start + size,
flags
);
let end = start + size;
assert!(end.is_aligned_4k());
flush_tlb(None);
let mut overlapped_area: Vec<(usize, MapArea)> = Vec::new();
let mut prev_area: BTreeMap<usize, MapArea> = BTreeMap::new();
for _ in 0..self.owned_mem.len() {
let (idx, area) = self.owned_mem.pop_first().unwrap();
if area.overlap_with(start, end) {
overlapped_area.push((idx, area));
} else {
prev_area.insert(idx, area);
}
}
self.owned_mem = prev_area;
for (_, mut area) in overlapped_area {
if area.contained_in(start, end) {
area.update_flags(flags, &mut self.page_table);
} else if area.strict_contain(start, end) {
let (mut mid, right) = area.split3(start, end);
mid.update_flags(flags, &mut self.page_table);
assert!(self.owned_mem.insert(mid.vaddr.into(), mid).is_none());
assert!(self.owned_mem.insert(right.vaddr.into(), right).is_none());
} else if start <= area.vaddr && area.vaddr < end {
let right = area.split(end);
area.update_flags(flags, &mut self.page_table);
assert!(self.owned_mem.insert(right.vaddr.into(), right).is_none());
} else {
let mut right = area.split(start);
right.update_flags(flags, &mut self.page_table);
assert!(self.owned_mem.insert(right.vaddr.into(), right).is_none());
}
assert!(self.owned_mem.insert(area.vaddr.into(), area).is_none());
}
axhal::arch::flush_tlb(None);
}
pub fn handle_page_fault(&mut self, addr: VirtAddr, flags: MappingFlags) -> AxResult<()> {
match self
.owned_mem
.values_mut()
.find(|area| area.vaddr <= addr && addr < area.end_va())
{
Some(area) => {
if !area.handle_page_fault(addr, flags, &mut self.page_table) {
return Err(AxError::BadAddress);
}
Ok(())
}
None => {
error!("Page fault address {:?} not found in memory set ", addr);
Err(AxError::BadAddress)
}
}
}
pub fn unmap_user_areas(&mut self) {
for (_, area) in self.owned_mem.iter_mut() {
area.dealloc(&mut self.page_table);
}
self.owned_mem.clear();
}
pub fn query(&self, vaddr: VirtAddr) -> AxResult<(PhysAddr, MappingFlags, PageSize)> {
if let Ok((paddr, flags, size)) = self.page_table.query(vaddr) {
Ok((paddr, flags, size))
} else {
Err(AxError::InvalidInput)
}
}
pub fn map_page_without_alloc(
&mut self,
vaddr: VirtAddr,
paddr: PhysAddr,
flags: MappingFlags,
) -> AxResult<()> {
self.page_table
.map_region(vaddr, paddr, PAGE_SIZE_4K, flags, false)
.map_err(|_| AxError::InvalidInput)
}
pub fn create_shared_mem(
key: i32,
size: usize,
pid: u64,
uid: u32,
gid: u32,
mode: u16,
) -> AxResult<(i32, SharedMem)> {
let mut key_map = KEY_TO_SHMID.lock();
let shmid = SHMID.fetch_add(1, Ordering::Release);
key_map.insert(key, shmid);
let mem = SharedMem::try_new(key, size, pid, uid, gid, mode)?;
Ok((shmid, mem))
}
pub fn add_shared_mem(shmid: i32, mem: SharedMem) {
let mut mem_map = SHARED_MEMS.lock();
assert!(mem_map.insert(shmid, Arc::new(mem)).is_none());
}
pub fn add_private_shared_mem(&mut self, shmid: i32, mem: SharedMem) {
assert!(self.private_mem.insert(shmid, Arc::new(mem)).is_none());
}
pub fn get_shared_mem(shmid: i32) -> Option<Arc<SharedMem>> {
SHARED_MEMS.lock().get(&shmid).cloned()
}
pub fn get_private_shared_mem(&self, shmid: i32) -> Option<Arc<SharedMem>> {
self.private_mem.get(&shmid).cloned()
}
pub fn attach_shared_mem(&mut self, mem: Arc<SharedMem>, addr: VirtAddr, flags: MappingFlags) {
self.page_table
.map_region(addr, mem.paddr(), mem.size(), flags, false)
.unwrap();
self.attached_mem.push((addr, flags, mem));
}
pub fn detach_shared_mem(&mut self, _shmid: i32) {
todo!()
}
}
impl MemorySet {
pub fn manual_alloc_for_lazy(&mut self, addr: VirtAddr) -> AxResult<()> {
if let Some((_, area)) = self
.owned_mem
.iter_mut()
.find(|(_, area)| area.vaddr <= addr && addr < area.end_va())
{
let entry = self.page_table.get_entry_mut(addr);
if entry.is_err() {
return Err(AxError::InvalidInput);
}
let entry = entry.unwrap().0;
if !entry.is_present() {
if !area.handle_page_fault(addr, entry.flags(), &mut self.page_table) {
return Err(AxError::BadAddress);
}
}
Ok(())
} else {
Err(AxError::InvalidInput)
}
}
pub fn manual_alloc_range_for_lazy(&mut self, start: VirtAddr, end: VirtAddr) -> AxResult<()> {
if start > end {
return Err(AxError::InvalidInput);
}
let start: usize = start.align_down_4k().into();
let end: usize = end.align_down_4k().into();
for addr in (start..=end).step_by(PAGE_SIZE_4K) {
debug!("allocating page at {:x}", addr);
self.manual_alloc_for_lazy(addr.into())?;
}
Ok(())
}
pub fn manual_alloc_type_for_lazy<T: Sized>(&mut self, obj: *const T) -> AxResult<()> {
let start = obj as usize;
let end = start + core::mem::size_of::<T>() - 1;
self.manual_alloc_range_for_lazy(start.into(), end.into())
}
}
impl MemorySet {
pub fn clone_or_err(&self) -> AxResult<Self> {
let mut page_table = PageTable::try_new().expect("Error allocating page table.");
for r in memory_regions() {
debug!(
"mapping kernel region [0x{:x}, 0x{:x})",
usize::from(phys_to_virt(r.paddr)),
usize::from(phys_to_virt(r.paddr)) + r.size,
);
page_table
.map_region(phys_to_virt(r.paddr), r.paddr, r.size, r.flags.into(), true)
.expect("Error mapping kernel memory");
}
let mut owned_mem: BTreeMap<usize, MapArea> = BTreeMap::new();
for (vaddr, area) in self.owned_mem.iter() {
info!("vaddr: {:X?}, new_area: {:X?}", vaddr, area.vaddr);
match area.clone_alloc(&mut page_table) {
Ok(new_area) => {
info!("new area: {:X?}", new_area.vaddr);
owned_mem.insert(*vaddr, new_area);
Ok(())
}
Err(err) => Err(err),
}?;
}
let mut new_memory = Self {
page_table,
owned_mem,
private_mem: self.private_mem.clone(),
attached_mem: Vec::new(),
};
for (addr, flags, mem) in &self.attached_mem {
new_memory.attach_shared_mem(mem.clone(), *addr, *flags);
}
Ok(new_memory)
}
}
impl Drop for MemorySet {
fn drop(&mut self) {
self.unmap_user_areas();
}
}