1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
use allocator::AllocError;
use axerrno::{AxError, AxResult};
use memory_addr::{PhysAddr, VirtAddr};

use crate::{global_allocator, PAGE_SIZE};

extern crate alloc;
use alloc::vec::Vec;

/// A RAII wrapper of contiguous 4K-sized pages.
///
/// It will automatically deallocate the pages when dropped.
#[derive(Debug)]
pub struct GlobalPage {
    start_vaddr: VirtAddr,
    num_pages: usize,
}

impl GlobalPage {
    /// Allocate one 4K-sized page.
    pub fn alloc() -> AxResult<Self> {
        global_allocator()
            .alloc_pages(1, PAGE_SIZE)
            .map(|vaddr| Self {
                start_vaddr: vaddr.into(),
                num_pages: 1,
            })
            .map_err(alloc_err_to_ax_err)
    }

    /// Allocate one 4K-sized page and fill with zero.
    pub fn alloc_zero() -> AxResult<Self> {
        let mut p = Self::alloc()?;
        p.zero();
        Ok(p)
    }

    /// Allocate contiguous 4K-sized pages.
    pub fn alloc_contiguous(num_pages: usize, align_pow2: usize) -> AxResult<Self> {
        global_allocator()
            .alloc_pages(num_pages, align_pow2)
            .map(|vaddr| Self {
                start_vaddr: vaddr.into(),
                num_pages,
            })
            .map_err(alloc_err_to_ax_err)
    }

    /// Get the start virtual address of this page.
    pub fn start_vaddr(&self) -> VirtAddr {
        self.start_vaddr
    }

    /// Get the start physical address of this page.
    pub fn start_paddr<F>(&self, virt_to_phys: F) -> PhysAddr
    where
        F: FnOnce(VirtAddr) -> PhysAddr,
    {
        virt_to_phys(self.start_vaddr)
    }

    /// Get the total size (in bytes) of these page(s).
    pub fn size(&self) -> usize {
        self.num_pages * PAGE_SIZE
    }

    /// Convert to a raw pointer.
    pub fn as_ptr(&self) -> *const u8 {
        self.start_vaddr.as_ptr()
    }

    /// Convert to a mutable raw pointer.
    pub fn as_mut_ptr(&mut self) -> *mut u8 {
        self.start_vaddr.as_mut_ptr()
    }

    /// Fill `self` with `byte`.
    pub fn fill(&mut self, byte: u8) {
        unsafe { core::ptr::write_bytes(self.as_mut_ptr(), byte, self.size()) }
    }

    /// Fill `self` with zero.
    pub fn zero(&mut self) {
        self.fill(0)
    }

    /// Forms a slice that can read data.
    pub fn as_slice(&self) -> &[u8] {
        unsafe { core::slice::from_raw_parts(self.as_ptr(), self.size()) }
    }

    /// Forms a mutable slice that can write data.
    pub fn as_slice_mut(&mut self) -> &mut [u8] {
        unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr(), self.size()) }
    }
}

impl Drop for GlobalPage {
    fn drop(&mut self) {
        global_allocator().dealloc_pages(self.start_vaddr.into(), self.num_pages);
    }
}

const fn alloc_err_to_ax_err(e: AllocError) -> AxError {
    match e {
        AllocError::InvalidParam | AllocError::MemoryOverlap | AllocError::NotAllocated => {
            AxError::InvalidInput
        }
        AllocError::NoMemory => AxError::NoMemory,
    }
}

/// A safe wrapper of a single 4K page.
/// It holds the page's VirtAddr (PhysAddr + offset)
#[derive(Debug)]
pub struct PhysPage {
    /// The start virtual address of this page.
    pub start_vaddr: VirtAddr,
}

impl PhysPage {
    /// Allocate one 4K-sized page.
    pub fn alloc() -> AxResult<Self> {
        global_allocator()
            .alloc_pages(1, PAGE_SIZE)
            .map(|vaddr| Self {
                start_vaddr: vaddr.into(),
            })
            .map_err(alloc_err_to_ax_err)
    }

    /// Allocate some 4K-sized pages and fill with zero.
    pub fn alloc_contiguous(
        num_pages: usize,
        align_pow2: usize,
        data: Option<&[u8]>,
    ) -> AxResult<Vec<Option<Self>>> {
        global_allocator()
            .alloc_pages(num_pages, align_pow2)
            .map(|vaddr| {
                let pages = unsafe {
                    core::slice::from_raw_parts_mut(vaddr as *mut u8, num_pages * PAGE_SIZE)
                };
                pages.fill(0);
                if let Some(data) = data {
                    pages[..data.len()].copy_from_slice(data);
                }

                (0..num_pages)
                    .map(|page_idx| {
                        Some(PhysPage {
                            start_vaddr: (vaddr + page_idx * PAGE_SIZE).into(),
                        })
                    })
                    .collect()
            })
            .map_err(alloc_err_to_ax_err)
    }

    /// Convert to a raw pointer.
    pub fn as_ptr(&self) -> *const u8 {
        self.start_vaddr.as_ptr()
    }

    /// Convert to a mutable raw pointer.
    pub fn as_mut_ptr(&mut self) -> *mut u8 {
        self.start_vaddr.as_mut_ptr()
    }

    /// Forms a slice that can read data.
    pub fn as_slice(&self) -> &[u8] {
        unsafe { core::slice::from_raw_parts(self.as_ptr(), PAGE_SIZE) }
    }

    /// Forms a mutable slice that can write data.
    pub fn as_slice_mut(&mut self) -> &mut [u8] {
        unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr(), PAGE_SIZE) }
    }

    /// Fill `self` with `byte`.
    pub fn fill(&mut self, byte: u8) {
        unsafe { core::ptr::write_bytes(self.as_mut_ptr(), byte, PAGE_SIZE) }
    }
}

impl Drop for PhysPage {
    fn drop(&mut self) {
        global_allocator().dealloc_pages(self.start_vaddr.into(), 1);
    }
}