use core::ops::Deref;
use core::ptr::copy_nonoverlapping;
use core::str::from_utf8;
extern crate alloc;
use alloc::sync::Arc;
use alloc::{
string::{String, ToString},
vec,
vec::Vec,
};
use axconfig::{MAX_USER_HEAP_SIZE, MAX_USER_STACK_SIZE, USER_HEAP_BASE, USER_STACK_TOP};
use axerrno::{AxError, AxResult};
use axhal::mem::VirtAddr;
use axhal::paging::MappingFlags;
use axhal::time::{current_time_nanos, NANOS_PER_MICROS, NANOS_PER_SEC};
use axhal::KERNEL_PROCESS_ID;
use axlog::{debug, info};
use axmem::MemorySet;
use axsignal::signal_no::SignalNo;
use axsync::Mutex;
use axtask::{current, yield_now, AxTaskRef, CurrentTask, TaskId, TaskState, IDLE_TASK, RUN_QUEUE};
use elf_parser::{
get_app_stack_region, get_auxv_vector, get_elf_entry, get_elf_segments, get_relocate_pairs,
};
use xmas_elf::program::SegmentData;
use crate::flags::WaitStatus;
use crate::futex::clear_wait;
use crate::link::real_path;
use crate::process::{Process, PID2PC, TID2TASK};
use crate::signal::{send_signal_to_process, send_signal_to_thread};
pub fn init_kernel_process() {
let kernel_process = Arc::new(Process::new(
TaskId::new().as_u64(),
0,
Mutex::new(Arc::new(Mutex::new(MemorySet::new_empty()))),
0,
vec![],
));
axtask::init_scheduler();
kernel_process.tasks.lock().push(Arc::clone(unsafe {
IDLE_TASK.current_ref_raw().get_unchecked()
}));
PID2PC.lock().insert(kernel_process.pid(), kernel_process);
}
pub fn current_process() -> Arc<Process> {
let current_task = current();
let current_process = Arc::clone(PID2PC.lock().get(¤t_task.get_process_id()).unwrap());
current_process
}
pub fn exit_current_task(exit_code: i32) -> ! {
let process = current_process();
let current_task = current();
let curr_id = current_task.id().as_u64();
info!("exit task id {} with code _{}_", curr_id, exit_code);
clear_wait(
if current_task.is_leader() {
process.pid()
} else {
curr_id
},
current_task.is_leader(),
);
if current_task.get_sig_child() || current_task.is_leader() {
let parent = process.get_parent();
if parent != KERNEL_PROCESS_ID {
send_signal_to_process(parent as isize, 17).unwrap();
}
}
let clear_child_tid = current_task.get_clear_child_tid();
if clear_child_tid != 0 {
if process
.manual_alloc_for_lazy(clear_child_tid.into())
.is_ok()
{
unsafe {
*(clear_child_tid as *mut i32) = 0;
}
}
}
if current_task.is_leader() {
loop {
let mut all_exited = true;
for task in process.tasks.lock().deref() {
if !task.is_leader() && task.state() != TaskState::Exited {
all_exited = false;
break;
}
}
if !all_exited {
yield_now();
} else {
break;
}
}
TID2TASK.lock().remove(&curr_id);
process.set_exit_code(exit_code);
process.set_zombie(true);
process.tasks.lock().clear();
process.fd_manager.fd_table.lock().clear();
process.signal_modules.lock().clear();
let mut pid2pc = PID2PC.lock();
let kernel_process = pid2pc.get(&KERNEL_PROCESS_ID).unwrap();
for child in process.children.lock().deref() {
child.set_parent(KERNEL_PROCESS_ID);
kernel_process.children.lock().push(Arc::clone(child));
}
if let Some(parent_process) = pid2pc.get(&process.get_parent()) {
parent_process.set_vfork_block(false);
}
pid2pc.remove(&process.pid());
drop(pid2pc);
drop(process);
} else {
TID2TASK.lock().remove(&curr_id);
let mut tasks = process.tasks.lock();
let len = tasks.len();
for index in 0..len {
if tasks[index].id().as_u64() == curr_id {
tasks.remove(index);
break;
}
}
drop(tasks);
process.signal_modules.lock().remove(&curr_id);
drop(process);
}
RUN_QUEUE.lock().exit_current(exit_code);
}
pub fn load_app(
name: String,
mut args: Vec<String>,
envs: &Vec<String>,
memory_set: &mut MemorySet,
) -> AxResult<(VirtAddr, VirtAddr, VirtAddr)> {
if name.ends_with(".sh") {
args = [vec![String::from("busybox"), String::from("sh")], args].concat();
return load_app("busybox".to_string(), args, envs, memory_set);
}
let elf_data = if let Ok(ans) = axfs::api::read(name.as_str()) {
ans
} else {
return Err(AxError::NotFound);
};
let elf = xmas_elf::ElfFile::new(&elf_data).expect("Error parsing app ELF file.");
debug!("app elf data length: {}", elf_data.len());
if let Some(interp) = elf
.program_iter()
.find(|ph| ph.get_type() == Ok(xmas_elf::program::Type::Interp))
{
let interp = match interp.get_data(&elf) {
Ok(SegmentData::Undefined(data)) => data,
_ => panic!("Invalid data in Interp Elf Program Header"),
};
let interp_path = from_utf8(interp).expect("Interpreter path isn't valid UTF-8");
let interp_path = interp_path.trim_matches(char::from(0)).to_string();
let real_interp_path = real_path(&interp_path);
args = [vec![real_interp_path.clone()], args].concat();
return load_app(real_interp_path, args, envs, memory_set);
}
info!("args: {:?}", args);
let elf_base_addr = Some(0x400_0000);
axlog::warn!("The elf base addr may be different in different arch!");
let entry = get_elf_entry(&elf, elf_base_addr);
let segments = get_elf_segments(&elf, elf_base_addr);
let relocate_pairs = get_relocate_pairs(&elf, elf_base_addr);
for segment in segments {
memory_set.new_region(
segment.vaddr,
segment.size,
segment.flags,
segment.data.as_deref(),
None,
);
}
for relocate_pair in relocate_pairs {
let src: usize = relocate_pair.src.into();
let dst: usize = relocate_pair.dst.into();
let count = relocate_pair.count;
unsafe { copy_nonoverlapping(src.to_ne_bytes().as_ptr(), dst as *mut u8, count) }
}
let heap_start = VirtAddr::from(USER_HEAP_BASE);
let heap_data = [0_u8].repeat(MAX_USER_HEAP_SIZE);
memory_set.new_region(
heap_start,
MAX_USER_HEAP_SIZE,
MappingFlags::READ | MappingFlags::WRITE | MappingFlags::USER,
Some(&heap_data),
None,
);
info!(
"[new region] user heap: [{:?}, {:?})",
heap_start,
heap_start + MAX_USER_HEAP_SIZE
);
let auxv = get_auxv_vector(&elf, elf_base_addr);
let stack_top = VirtAddr::from(USER_STACK_TOP);
let stack_size = MAX_USER_STACK_SIZE;
let (stack_data, stack_bottom) = get_app_stack_region(args, envs, auxv, stack_top, stack_size);
memory_set.new_region(
stack_top,
stack_size,
MappingFlags::USER | MappingFlags::READ | MappingFlags::WRITE,
Some(&stack_data),
None,
);
info!(
"[new region] user stack: [{:?}, {:?})",
stack_top,
stack_top + stack_size
);
Ok((entry, stack_bottom.into(), heap_start))
}
pub fn time_stat_from_kernel_to_user() {
let curr_task = current();
curr_task.time_stat_from_kernel_to_user(current_time_nanos() as usize);
}
#[no_mangle]
pub fn time_stat_from_user_to_kernel() {
let curr_task = current();
curr_task.time_stat_from_user_to_kernel(current_time_nanos() as usize);
}
pub fn time_stat_output() -> (usize, usize, usize, usize) {
let curr_task = current();
let (utime_ns, stime_ns) = curr_task.time_stat_output();
(
utime_ns / NANOS_PER_SEC as usize,
utime_ns / NANOS_PER_MICROS as usize,
stime_ns / NANOS_PER_SEC as usize,
stime_ns / NANOS_PER_MICROS as usize,
)
}
pub fn handle_page_fault(addr: VirtAddr, flags: MappingFlags) {
axlog::debug!("'page fault' addr: {:?}, flags: {:?}", addr, flags);
let current_process = current_process();
axlog::debug!(
"memory token : {:#x}",
current_process.memory_set.lock().lock().page_table_token()
);
if current_process
.memory_set
.lock()
.lock()
.handle_page_fault(addr, flags)
.is_ok()
{
axhal::arch::flush_tlb(None);
} else {
let _ = send_signal_to_thread(current().id().as_u64() as isize, SignalNo::SIGSEGV as isize);
}
}
pub unsafe fn wait_pid(pid: isize, exit_code_ptr: *mut i32) -> Result<u64, WaitStatus> {
let curr_process = current_process();
let mut exit_task_id: usize = 0;
let mut answer_id: u64 = 0;
let mut answer_status = WaitStatus::NotExist;
for (index, child) in curr_process.children.lock().iter().enumerate() {
if pid == -1 {
answer_status = WaitStatus::Running;
if let Some(exit_code) = child.get_code_if_exit() {
answer_status = WaitStatus::Exited;
info!("wait pid _{}_ with code _{}_", child.pid(), exit_code);
exit_task_id = index;
if !exit_code_ptr.is_null() {
unsafe {
*exit_code_ptr = exit_code << 8;
}
}
answer_id = child.pid();
break;
}
} else if child.pid() == pid as u64 {
if let Some(exit_code) = child.get_code_if_exit() {
answer_status = WaitStatus::Exited;
info!("wait pid _{}_ with code _{:?}_", child.pid(), exit_code);
exit_task_id = index;
if !exit_code_ptr.is_null() {
unsafe {
*exit_code_ptr = exit_code << 8;
}
}
answer_id = child.pid();
} else {
answer_status = WaitStatus::Running;
}
break;
}
}
if answer_status == WaitStatus::Exited {
curr_process.children.lock().remove(exit_task_id);
return Ok(answer_id);
}
Err(answer_status)
}
pub fn yield_now_task() {
axtask::yield_now();
}
pub fn sleep_now_task(dur: core::time::Duration) {
axtask::sleep(dur);
}
pub fn current_task() -> CurrentTask {
axtask::current()
}
pub fn set_child_tid(tid: usize) {
let curr = current_task();
curr.set_clear_child_tid(tid);
}
pub fn get_task_ref(tid: u64) -> Option<AxTaskRef> {
TID2TASK.lock().get(&tid).cloned()
}