1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
use core::fmt;
use axerrno::{ax_err, AxError, AxResult};
use axhal::mem::phys_to_virt;
use axhal::paging::{MappingFlags, PageTable};
use memory_addr::{
is_aligned_4k, MemoryAddr, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, PAGE_SIZE_4K,
};
use memory_set::{MemoryArea, MemorySet};
use crate::backend::Backend;
use crate::mapping_err_to_ax_err;
/// The virtual memory address space.
pub struct AddrSpace {
va_range: VirtAddrRange,
areas: MemorySet<Backend>,
pt: PageTable,
}
impl AddrSpace {
/// Returns the address space base.
pub const fn base(&self) -> VirtAddr {
self.va_range.start
}
/// Returns the address space end.
pub const fn end(&self) -> VirtAddr {
self.va_range.end
}
/// Returns the address space size.
pub fn size(&self) -> usize {
self.va_range.size()
}
/// Returns the reference to the inner page table.
pub const fn page_table(&self) -> &PageTable {
&self.pt
}
/// Returns the root physical address of the inner page table.
pub const fn page_table_root(&self) -> PhysAddr {
self.pt.root_paddr()
}
/// Checks if the address space contains the given address range.
pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
self.va_range
.contains_range(VirtAddrRange::from_start_size(start, size))
}
/// Creates a new empty address space.
pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
Ok(Self {
va_range: VirtAddrRange::from_start_size(base, size),
areas: MemorySet::new(),
pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
})
}
/// Copies page table mappings from another address space.
///
/// It copies the page table entries only rather than the memory regions,
/// usually used to copy a portion of the kernel space mapping to the
/// user space.
///
/// Returns an error if the two address spaces overlap.
pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
if self.va_range.overlaps(other.va_range) {
return ax_err!(InvalidInput, "address space overlap");
}
self.pt.copy_from(&other.pt, other.base(), other.size());
Ok(())
}
/// Finds a free area that can accommodate the given size.
///
/// The search starts from the given hint address, and the area should be within the given limit range.
///
/// Returns the start address of the free area. Returns None if no such area is found.
pub fn find_free_area(
&self,
hint: VirtAddr,
size: usize,
limit: VirtAddrRange,
) -> Option<VirtAddr> {
self.areas.find_free_area(hint, size, limit)
}
/// Add a new linear mapping.
///
/// See [`Backend`] for more details about the mapping backends.
///
/// The `flags` parameter indicates the mapping permissions and attributes.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn map_linear(
&mut self,
start_vaddr: VirtAddr,
start_paddr: PhysAddr,
size: usize,
flags: MappingFlags,
) -> AxResult {
if !self.contains_range(start_vaddr, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start_vaddr.is_aligned_4k() || !start_paddr.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
let offset = start_vaddr.as_usize() - start_paddr.as_usize();
let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
self.areas
.map(area, &mut self.pt, false)
.map_err(mapping_err_to_ax_err)?;
Ok(())
}
/// Add a new allocation mapping.
///
/// See [`Backend`] for more details about the mapping backends.
///
/// The `flags` parameter indicates the mapping permissions and attributes.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn map_alloc(
&mut self,
start: VirtAddr,
size: usize,
flags: MappingFlags,
populate: bool,
) -> AxResult {
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
self.areas
.map(area, &mut self.pt, false)
.map_err(mapping_err_to_ax_err)?;
Ok(())
}
/// Removes mappings within the specified virtual address range.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
self.areas
.unmap(start, size, &mut self.pt)
.map_err(mapping_err_to_ax_err)?;
Ok(())
}
/// To process data in this area with the given function.
///
/// Now it supports reading and writing data in the given interval.
///
/// # Arguments
/// - `start`: The start virtual address to process.
/// - `size`: The size of the data to process.
/// - `f`: The function to process the data, whose arguments are the start virtual address,
/// the offset and the size of the data.
///
/// # Notes
/// The caller must ensure that the permission of the operation is allowed.
fn process_area_data<F>(&self, start: VirtAddr, size: usize, mut f: F) -> AxResult
where
F: FnMut(VirtAddr, usize, usize),
{
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
let mut cnt = 0;
// If start is aligned to 4K, start_align_down will be equal to start_align_up.
let end_align_up = (start + size).align_up_4k();
for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
.expect("Failed to create page iterator")
{
let (mut paddr, _, _) = self.pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
if copy_size == 0 {
break;
}
if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
let align_offset = start.align_offset_4k();
copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
paddr += align_offset;
}
f(phys_to_virt(paddr), cnt, copy_size);
cnt += copy_size;
}
Ok(())
}
/// To read data from the address space.
///
/// # Arguments
///
/// * `start` - The start virtual address to read.
/// * `buf` - The buffer to store the data.
pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
})
}
/// To write data to the address space.
///
/// # Arguments
///
/// * `start_vaddr` - The start virtual address to write.
/// * `buf` - The buffer to write to the address space.
pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
})
}
/// Updates mapping within the specified virtual address range.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
// TODO
self.pt
.protect_region(start, size, flags, true)
.map_err(|_| AxError::BadState)?
.ignore();
Ok(())
}
/// Removes all mappings in the address space.
pub fn clear(&mut self) {
self.areas.clear(&mut self.pt).unwrap();
}
/// Handles a page fault at the given address.
///
/// `access_flags` indicates the access type that caused the page fault.
///
/// Returns `true` if the page fault is handled successfully (not a real
/// fault).
pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: MappingFlags) -> bool {
if !self.va_range.contains(vaddr) {
return false;
}
if let Some(area) = self.areas.find(vaddr) {
let orig_flags = area.flags();
if orig_flags.contains(access_flags) {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
}
}
false
}
}
impl fmt::Debug for AddrSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("AddrSpace")
.field("va_range", &self.va_range)
.field("page_table_root", &self.pt.root_paddr())
.field("areas", &self.areas)
.finish()
}
}
impl Drop for AddrSpace {
fn drop(&mut self) {
self.clear();
}
}