use alloc::sync::Arc;
use core::sync::atomic::AtomicU64;
use axhal::arch::UspaceContext;
use axmm::AddrSpace;
use axsync::Mutex;
use axtask::{AxTaskRef, TaskExtRef, TaskInner};
pub struct TaskExt {
pub proc_id: usize,
clear_child_tid: AtomicU64,
pub uctx: UspaceContext,
pub aspace: Arc<Mutex<AddrSpace>>,
}
impl TaskExt {
pub const fn new(uctx: UspaceContext, aspace: Arc<Mutex<AddrSpace>>) -> Self {
Self {
proc_id: 233,
uctx,
clear_child_tid: AtomicU64::new(0),
aspace,
}
}
pub(crate) fn clear_child_tid(&self) -> u64 {
self.clear_child_tid
.load(core::sync::atomic::Ordering::Relaxed)
}
pub(crate) fn set_clear_child_tid(&self, clear_child_tid: u64) {
self.clear_child_tid
.store(clear_child_tid, core::sync::atomic::Ordering::Relaxed);
}
}
axtask::def_task_ext!(TaskExt);
pub fn spawn_user_task(aspace: Arc<Mutex<AddrSpace>>, uctx: UspaceContext) -> AxTaskRef {
let mut task = TaskInner::new(
|| {
let curr = axtask::current();
let kstack_top = curr.kernel_stack_top().unwrap();
info!(
"Enter user space: entry={:#x}, ustack={:#x}, kstack={:#x}",
curr.task_ext().uctx.get_ip(),
curr.task_ext().uctx.get_sp(),
kstack_top,
);
unsafe { curr.task_ext().uctx.enter_uspace(kstack_top) };
},
"userboot".into(),
crate::config::KERNEL_STACK_SIZE,
);
task.ctx_mut()
.set_page_table_root(aspace.lock().page_table_root());
task.init_task_ext(TaskExt::new(uctx, aspace));
axtask::spawn_task(task)
}