extern crate alloc;
use alloc::sync::Arc;
use alloc::vec;
use alloc::vec::Vec;
use alloc::{collections::BTreeMap, string::String};
use axerrno::{AxError, AxResult};
use axfs::api::{FileIO, OpenFlags};
use axhal::arch::{
read_trapframe_from_kstack, write_page_table_root0, write_trapframe_to_kstack, TrapFrame,
};
use axhal::mem::{phys_to_virt, VirtAddr};
use axhal::time::current_time_nanos;
use axhal::KERNEL_PROCESS_ID;
use axlog::{debug, error};
use axmem::MemorySet;
use axsync::Mutex;
use axtask::{current, new_task, AxTaskRef, TaskId, RUN_QUEUE};
use core::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, Ordering};
use crate::fd_manager::FdManager;
use crate::flags::CloneFlags;
use crate::futex::FutexRobustList;
use crate::signal::SignalModule;
use crate::stdio::{Stderr, Stdin, Stdout};
use crate::{load_app, yield_now_task};
pub static TID2TASK: Mutex<BTreeMap<u64, AxTaskRef>> = Mutex::new(BTreeMap::new());
pub static PID2PC: Mutex<BTreeMap<u64, Arc<Process>>> = Mutex::new(BTreeMap::new());
const FD_LIMIT_ORIGIN: usize = 1025;
extern "C" {
fn start_signal_trampoline();
}
pub struct Process {
pid: u64,
pub parent: AtomicU64,
pub children: Mutex<Vec<Arc<Process>>>,
pub tasks: Mutex<Vec<AxTaskRef>>,
pub fd_manager: FdManager,
pub is_zombie: AtomicBool,
pub exit_code: AtomicI32,
pub memory_set: Mutex<Arc<Mutex<MemorySet>>>,
pub heap_bottom: AtomicU64,
pub heap_top: AtomicU64,
pub signal_modules: Mutex<BTreeMap<u64, SignalModule>>,
pub robust_list: Mutex<BTreeMap<u64, FutexRobustList>>,
pub blocked_by_vfork: Mutex<bool>,
pub file_path: Mutex<String>,
}
impl Process {
pub fn pid(&self) -> u64 {
self.pid
}
pub fn get_parent(&self) -> u64 {
self.parent.load(Ordering::Acquire)
}
pub fn set_parent(&self, parent: u64) {
self.parent.store(parent, Ordering::Release)
}
pub fn get_exit_code(&self) -> i32 {
self.exit_code.load(Ordering::Acquire)
}
pub fn set_exit_code(&self, exit_code: i32) {
self.exit_code.store(exit_code, Ordering::Release)
}
pub fn get_zombie(&self) -> bool {
self.is_zombie.load(Ordering::Acquire)
}
pub fn set_zombie(&self, status: bool) {
self.is_zombie.store(status, Ordering::Release)
}
pub fn get_heap_top(&self) -> u64 {
self.heap_top.load(Ordering::Acquire)
}
pub fn set_heap_top(&self, top: u64) {
self.heap_top.store(top, Ordering::Release)
}
pub fn get_heap_bottom(&self) -> u64 {
self.heap_bottom.load(Ordering::Acquire)
}
pub fn set_heap_bottom(&self, bottom: u64) {
self.heap_bottom.store(bottom, Ordering::Release)
}
pub fn set_vfork_block(&self, value: bool) {
*self.blocked_by_vfork.lock() = value;
}
pub fn set_file_path(&self, path: String) {
let mut file_path = self.file_path.lock();
*file_path = path;
}
pub fn get_file_path(&self) -> String {
(*self.file_path.lock()).clone()
}
pub fn get_code_if_exit(&self) -> Option<i32> {
if self.get_zombie() {
return Some(self.get_exit_code());
}
None
}
}
impl Process {
pub fn new(
pid: u64,
parent: u64,
memory_set: Mutex<Arc<Mutex<MemorySet>>>,
heap_bottom: u64,
fd_table: Vec<Option<Arc<dyn FileIO>>>,
) -> Self {
Self {
pid,
parent: AtomicU64::new(parent),
children: Mutex::new(Vec::new()),
tasks: Mutex::new(Vec::new()),
is_zombie: AtomicBool::new(false),
exit_code: AtomicI32::new(0),
memory_set,
heap_bottom: AtomicU64::new(heap_bottom),
heap_top: AtomicU64::new(heap_bottom),
fd_manager: FdManager::new(fd_table, FD_LIMIT_ORIGIN),
signal_modules: Mutex::new(BTreeMap::new()),
robust_list: Mutex::new(BTreeMap::new()),
blocked_by_vfork: Mutex::new(false),
file_path: Mutex::new(String::new()),
}
}
pub fn init(args: Vec<String>, envs: &Vec<String>) -> AxResult<AxTaskRef> {
let path = args[0].clone();
let mut memory_set = MemorySet::new_with_kernel_mapped();
{
use axhal::mem::virt_to_phys;
use axhal::paging::MappingFlags;
let signal_trampoline_vaddr: VirtAddr = (axconfig::SIGNAL_TRAMPOLINE).into();
let signal_trampoline_paddr = virt_to_phys((start_signal_trampoline as usize).into());
memory_set.map_page_without_alloc(
signal_trampoline_vaddr,
signal_trampoline_paddr,
MappingFlags::READ
| MappingFlags::EXECUTE
| MappingFlags::USER
| MappingFlags::WRITE,
)?;
}
let page_table_token = memory_set.page_table_token();
if page_table_token != 0 {
unsafe {
write_page_table_root0(page_table_token.into());
#[cfg(target_arch = "riscv64")]
riscv::register::sstatus::set_sum();
};
}
let (entry, user_stack_bottom, heap_bottom) =
if let Ok(ans) = load_app(path.clone(), args, envs, &mut memory_set) {
ans
} else {
error!("Failed to load app {}", path);
return Err(AxError::NotFound);
};
let new_process = Arc::new(Self::new(
TaskId::new().as_u64(),
KERNEL_PROCESS_ID,
Mutex::new(Arc::new(Mutex::new(memory_set))),
heap_bottom.as_usize() as u64,
vec![
Some(Arc::new(Stdin {
flags: Mutex::new(OpenFlags::empty()),
})),
Some(Arc::new(Stdout {
flags: Mutex::new(OpenFlags::empty()),
})),
Some(Arc::new(Stderr {
flags: Mutex::new(OpenFlags::empty()),
})),
],
));
let new_task = new_task(
|| {},
path,
axconfig::TASK_STACK_SIZE,
new_process.pid(),
page_table_token,
false,
);
TID2TASK
.lock()
.insert(new_task.id().as_u64(), Arc::clone(&new_task));
new_task.set_leader(true);
let new_trap_frame =
TrapFrame::app_init_context(entry.as_usize(), user_stack_bottom.as_usize());
write_trapframe_to_kstack(new_task.get_kernel_stack_top().unwrap(), &new_trap_frame);
new_process.tasks.lock().push(Arc::clone(&new_task));
new_process
.signal_modules
.lock()
.insert(new_task.id().as_u64(), SignalModule::init_signal(None));
new_process
.robust_list
.lock()
.insert(new_task.id().as_u64(), FutexRobustList::default());
PID2PC
.lock()
.insert(new_process.pid(), Arc::clone(&new_process));
match PID2PC.lock().get(&KERNEL_PROCESS_ID) {
Some(kernel_process) => {
kernel_process.children.lock().push(new_process);
}
None => {
return Err(AxError::NotFound);
}
}
RUN_QUEUE.lock().add_task(Arc::clone(&new_task));
Ok(new_task)
}
}
impl Process {
pub fn exec(&self, name: String, args: Vec<String>, envs: &Vec<String>) -> AxResult<()> {
if Arc::strong_count(&self.memory_set.lock()) == 1 {
self.memory_set.lock().lock().unmap_user_areas();
} else {
let memory_set = Arc::new(Mutex::new(MemorySet::clone_or_err(
&self.memory_set.lock().lock(),
)?));
*self.memory_set.lock() = memory_set;
self.memory_set.lock().lock().unmap_user_areas();
let new_page_table = self.memory_set.lock().lock().page_table_token();
let mut tasks = self.tasks.lock();
for task in tasks.iter_mut() {
task.inner().set_page_table_token(new_page_table);
}
unsafe {
axhal::arch::write_page_table_root0(new_page_table.into());
}
}
axhal::arch::flush_tlb(None);
let current_task = current();
let mut tasks = self.tasks.lock();
for _ in 0..tasks.len() {
let task = tasks.pop().unwrap();
if task.id() == current_task.id() {
#[cfg(target_arch = "x86_64")]
unsafe {
task.set_tls_force(0);
axhal::arch::write_thread_pointer(0);
}
tasks.push(task);
} else {
TID2TASK.lock().remove(&task.id().as_u64());
RUN_QUEUE.lock().remove_task(&task);
}
}
current_task.set_leader(true);
current_task.reset_time_stat(current_time_nanos() as usize);
current_task.set_name(name.split('/').last().unwrap());
assert!(tasks.len() == 1);
drop(tasks);
let args = if args.is_empty() {
vec![name.clone()]
} else {
args
};
let (entry, user_stack_bottom, heap_bottom) = if let Ok(ans) =
load_app(name.clone(), args, envs, &mut self.memory_set.lock().lock())
{
ans
} else {
error!("Failed to load app {}", name);
return Err(AxError::NotFound);
};
let page_table_token = if self.pid == KERNEL_PROCESS_ID {
0
} else {
self.memory_set.lock().lock().page_table_token()
};
if page_table_token != 0 {
unsafe {
write_page_table_root0(page_table_token.into());
};
}
self.set_heap_bottom(heap_bottom.as_usize() as u64);
self.set_heap_top(heap_bottom.as_usize() as u64);
self.robust_list.lock().clear();
self.robust_list
.lock()
.insert(current_task.id().as_u64(), FutexRobustList::default());
{
use axhal::mem::virt_to_phys;
use axhal::paging::MappingFlags;
self.signal_modules.lock().clear();
self.signal_modules
.lock()
.insert(current_task.id().as_u64(), SignalModule::init_signal(None));
let signal_trampoline_vaddr: VirtAddr = (axconfig::SIGNAL_TRAMPOLINE).into();
let signal_trampoline_paddr = virt_to_phys((start_signal_trampoline as usize).into());
let memory_set_wrapper = self.memory_set.lock();
let mut memory_set = memory_set_wrapper.lock();
if memory_set.query(signal_trampoline_vaddr).is_err() {
let _ = memory_set.map_page_without_alloc(
signal_trampoline_vaddr,
signal_trampoline_paddr,
MappingFlags::READ
| MappingFlags::EXECUTE
| MappingFlags::USER
| MappingFlags::WRITE,
);
}
drop(memory_set);
}
let new_trap_frame =
TrapFrame::app_init_context(entry.as_usize(), user_stack_bottom.as_usize());
write_trapframe_to_kstack(
current_task.get_kernel_stack_top().unwrap(),
&new_trap_frame,
);
Ok(())
}
pub fn clone_task(
&self,
flags: CloneFlags,
stack: Option<usize>,
ptid: usize,
tls: usize,
ctid: usize,
sig_child: bool,
) -> AxResult<u64> {
let new_memory_set = if flags.contains(CloneFlags::CLONE_VM) {
Mutex::new(Arc::clone(&self.memory_set.lock()))
} else {
let memory_set = Arc::new(Mutex::new(MemorySet::clone_or_err(
&self.memory_set.lock().lock(),
)?));
{
use axhal::mem::virt_to_phys;
use axhal::paging::MappingFlags;
let signal_trampoline_vaddr: VirtAddr = (axconfig::SIGNAL_TRAMPOLINE).into();
let signal_trampoline_paddr =
virt_to_phys((start_signal_trampoline as usize).into());
memory_set.lock().map_page_without_alloc(
signal_trampoline_vaddr,
signal_trampoline_paddr,
MappingFlags::READ
| MappingFlags::EXECUTE
| MappingFlags::USER
| MappingFlags::WRITE,
)?;
}
Mutex::new(memory_set)
};
let process_id = if flags.contains(CloneFlags::CLONE_THREAD) {
self.pid
} else {
TaskId::new().as_u64()
};
let parent_id = if flags.contains(CloneFlags::CLONE_PARENT) {
self.get_parent()
} else {
self.pid
};
let new_task = new_task(
|| {},
String::from(self.tasks.lock()[0].name().split('/').last().unwrap()),
axconfig::TASK_STACK_SIZE,
process_id,
new_memory_set.lock().lock().page_table_token(),
sig_child,
);
#[cfg(target_arch = "x86_64")]
if tls == 0 {
unsafe {
new_task.set_tls_force(axhal::arch::read_thread_pointer());
}
}
debug!("new task:{}", new_task.id().as_u64());
TID2TASK
.lock()
.insert(new_task.id().as_u64(), Arc::clone(&new_task));
let new_handler = if flags.contains(CloneFlags::CLONE_SIGHAND) {
self.signal_modules
.lock()
.get_mut(¤t().id().as_u64())
.unwrap()
.signal_handler
.clone()
} else {
Arc::new(Mutex::new(
self.signal_modules
.lock()
.get_mut(¤t().id().as_u64())
.unwrap()
.signal_handler
.lock()
.clone(),
))
};
if flags.contains(CloneFlags::CLONE_PARENT_SETTID)
& self.manual_alloc_for_lazy(ptid.into()).is_ok()
{
unsafe {
*(ptid as *mut i32) = new_task.id().as_u64() as i32;
}
}
if flags.contains(CloneFlags::CLONE_CHILD_SETTID)
|| flags.contains(CloneFlags::CLONE_CHILD_CLEARTID)
{
if flags.contains(CloneFlags::CLONE_CHILD_SETTID) {
new_task.set_child_tid(ctid);
}
if flags.contains(CloneFlags::CLONE_CHILD_CLEARTID) {
new_task.set_clear_child_tid(ctid);
}
if flags.contains(CloneFlags::CLONE_VM) {
if self.manual_alloc_for_lazy(ctid.into()).is_ok() {
unsafe {
*(ctid as *mut i32) = if flags.contains(CloneFlags::CLONE_CHILD_SETTID) {
new_task.id().as_u64() as i32
} else {
0
}
}
} else {
return Err(AxError::BadAddress);
}
} else {
let memory_set_wrapper = self.memory_set.lock();
let mut vm = memory_set_wrapper.lock();
if vm.manual_alloc_for_lazy(ctid.into()).is_ok() {
if let Ok((phyaddr, _, _)) = vm.query(ctid.into()) {
let vaddr: usize = phys_to_virt(phyaddr).into();
unsafe {
*(vaddr as *mut i32) = if flags.contains(CloneFlags::CLONE_CHILD_SETTID)
{
new_task.id().as_u64() as i32
} else {
0
}
}
drop(vm);
} else {
drop(vm);
return Err(AxError::BadAddress);
}
} else {
drop(vm);
return Err(AxError::BadAddress);
}
}
}
let return_id: u64;
if flags.contains(CloneFlags::CLONE_THREAD) {
self.tasks.lock().push(Arc::clone(&new_task));
self.signal_modules.lock().insert(
new_task.id().as_u64(),
SignalModule::init_signal(Some(new_handler)),
);
self.robust_list
.lock()
.insert(new_task.id().as_u64(), FutexRobustList::default());
return_id = new_task.id().as_u64();
} else {
let new_process = Arc::new(Process::new(
process_id,
parent_id,
new_memory_set,
self.get_heap_bottom(),
self.fd_manager.fd_table.lock().clone(),
));
PID2PC.lock().insert(process_id, Arc::clone(&new_process));
new_process.tasks.lock().push(Arc::clone(&new_task));
new_process.signal_modules.lock().insert(
new_task.id().as_u64(),
SignalModule::init_signal(Some(new_handler)),
);
new_process
.robust_list
.lock()
.insert(new_task.id().as_u64(), FutexRobustList::default());
return_id = new_process.pid;
self.children.lock().push(new_process);
};
if !flags.contains(CloneFlags::CLONE_THREAD) {
new_task.set_leader(true);
}
let current_task = current();
let mut trap_frame =
read_trapframe_from_kstack(current_task.get_kernel_stack_top().unwrap());
trap_frame.set_ret_code(0);
if flags.contains(CloneFlags::CLONE_SETTLS) {
#[cfg(not(target_arch = "x86_64"))]
trap_frame.set_tls(tls);
#[cfg(target_arch = "x86_64")]
unsafe {
new_task.set_tls_force(tls);
}
}
if let Some(stack) = stack {
trap_frame.set_user_sp(stack);
}
write_trapframe_to_kstack(new_task.get_kernel_stack_top().unwrap(), &trap_frame);
RUN_QUEUE.lock().add_task(new_task);
if flags.contains(CloneFlags::CLONE_VFORK) {
self.set_vfork_block(true);
yield_now_task();
}
Ok(return_id)
}
}
impl Process {
pub fn manual_alloc_for_lazy(&self, addr: VirtAddr) -> AxResult<()> {
self.memory_set.lock().lock().manual_alloc_for_lazy(addr)
}
pub fn manual_alloc_range_for_lazy(&self, start: VirtAddr, end: VirtAddr) -> AxResult<()> {
self.memory_set
.lock()
.lock()
.manual_alloc_range_for_lazy(start, end)
}
pub fn manual_alloc_type_for_lazy<T: Sized>(&self, obj: *const T) -> AxResult<()> {
self.memory_set
.lock()
.lock()
.manual_alloc_type_for_lazy(obj)
}
}
impl Process {
pub fn alloc_fd(&self, fd_table: &mut Vec<Option<Arc<dyn FileIO>>>) -> AxResult<usize> {
for (i, fd) in fd_table.iter().enumerate() {
if fd.is_none() {
return Ok(i);
}
}
if fd_table.len() >= self.fd_manager.get_limit() as usize {
debug!("fd table is full");
return Err(AxError::StorageFull);
}
fd_table.push(None);
Ok(fd_table.len() - 1)
}
pub fn get_cwd(&self) -> String {
self.fd_manager.cwd.lock().clone()
}
}
impl Process {
pub fn have_signals(&self) -> Option<usize> {
let current_task = current();
self.signal_modules
.lock()
.get(¤t_task.id().as_u64())
.unwrap()
.signal_set
.find_signal()
}
}