use alloc::collections::VecDeque;
use alloc::sync::Arc;
#[cfg(feature = "monolithic")]
use axhal::KERNEL_PROCESS_ID;
use lazy_init::LazyInit;
use scheduler::BaseScheduler;
use spinlock::SpinNoIrq;
use taskctx::TaskState;
use crate::schedule::notify_wait_for_exit;
use crate::task::{new_init_task, new_task, CurrentTask};
use crate::{AxTaskRef, Scheduler, WaitQueue};
pub static RUN_QUEUE: LazyInit<SpinNoIrq<AxRunQueue>> = LazyInit::new();
pub static EXITED_TASKS: SpinNoIrq<VecDeque<AxTaskRef>> = SpinNoIrq::new(VecDeque::new());
static WAIT_FOR_EXIT: WaitQueue = WaitQueue::new();
#[percpu::def_percpu]
pub static IDLE_TASK: LazyInit<AxTaskRef> = LazyInit::new();
pub struct AxRunQueue {
scheduler: Scheduler,
}
impl AxRunQueue {
pub fn new() -> SpinNoIrq<Self> {
let gc_task = new_task(
gc_entry,
"gc".into(),
axconfig::TASK_STACK_SIZE,
#[cfg(feature = "monolithic")]
KERNEL_PROCESS_ID,
#[cfg(feature = "monolithic")]
0,
#[cfg(feature = "monolithic")]
false,
);
let mut scheduler = Scheduler::new();
scheduler.add_task(gc_task);
SpinNoIrq::new(Self { scheduler })
}
pub fn add_task(&mut self, task: AxTaskRef) {
debug!("task spawn: {}", task.id_name());
assert!(task.is_ready());
self.scheduler.add_task(task);
}
#[cfg(feature = "irq")]
pub fn scheduler_timer_tick(&mut self) {
let curr = crate::current();
if !curr.is_idle() && self.scheduler.task_tick(curr.as_task_ref()) {
#[cfg(feature = "preempt")]
curr.set_preempt_pending(true);
}
}
pub fn yield_current(&mut self) {
let curr = crate::current();
trace!("task yield: {}", curr.id_name());
assert!(curr.is_running());
self.resched(false);
}
pub fn set_current_priority(&mut self, prio: isize) -> bool {
self.scheduler
.set_priority(crate::current().as_task_ref(), prio)
}
#[cfg(feature = "preempt")]
pub fn preempt_resched(&mut self) {
let curr = crate::current();
assert!(curr.is_running());
let can_preempt = curr.can_preempt(1);
debug!(
"current task is to be preempted: {}, allow={}",
curr.id_name(),
can_preempt
);
if can_preempt {
self.resched(true);
} else {
curr.set_preempt_pending(true);
}
}
pub fn exit_current(&mut self, exit_code: i32) -> ! {
let curr = crate::current();
debug!("task exit: {}, exit_code={}", curr.id_name(), exit_code);
assert!(curr.is_running());
assert!(!curr.is_idle());
if curr.is_init() {
EXITED_TASKS.lock().clear();
axhal::misc::terminate();
} else {
curr.set_state(TaskState::Exited);
curr.set_exit_code(exit_code);
notify_wait_for_exit(curr.as_task_ref(), self);
EXITED_TASKS.lock().push_back(curr.clone());
WAIT_FOR_EXIT.notify_one_locked(false, self);
self.resched(false);
}
unreachable!("task exited!");
}
#[cfg(feature = "monolithic")]
pub fn remove_task(&mut self, task: &AxTaskRef) {
debug!("task remove: {}", task.id_name());
assert!(!task.is_running());
assert!(!task.is_idle());
if task.is_ready() {
task.set_state(TaskState::Exited);
EXITED_TASKS.lock().push_back(task.clone());
self.scheduler.remove_task(task);
}
}
pub fn block_current<F>(&mut self, wait_queue_push: F)
where
F: FnOnce(AxTaskRef),
{
let curr = crate::current();
debug!("task block: {}", curr.id_name());
assert!(curr.is_running());
assert!(!curr.is_idle());
#[cfg(feature = "preempt")]
assert!(curr.can_preempt(1));
curr.set_state(TaskState::Blocked);
wait_queue_push(curr.clone());
self.resched(false);
}
pub fn unblock_task(&mut self, task: AxTaskRef, resched: bool) {
debug!("task unblock: {}", task.id_name());
if task.is_blocked() {
task.set_state(TaskState::Ready);
self.scheduler.add_task(task); if resched {
#[cfg(feature = "preempt")]
crate::current().set_preempt_pending(true);
}
}
}
#[cfg(feature = "irq")]
pub fn sleep_until(&mut self, deadline: axhal::time::TimeValue) {
let curr = crate::current();
debug!("task sleep: {}, deadline={:?}", curr.id_name(), deadline);
assert!(curr.is_running());
assert!(!curr.is_idle());
let now = axhal::time::current_time();
if now < deadline {
crate::timers::set_alarm_wakeup(deadline, curr.clone());
curr.set_state(TaskState::Blocked);
self.resched(false);
}
}
}
impl AxRunQueue {
fn resched(&mut self, preempt: bool) {
let prev = crate::current();
if prev.is_running() {
prev.set_state(TaskState::Ready);
if !prev.is_idle() {
self.scheduler.put_prev_task(prev.clone(), preempt);
}
}
#[cfg(feature = "monolithic")]
{
use alloc::collections::BTreeSet;
use axhal::cpu::this_cpu_id;
let mut task_set = BTreeSet::new();
let next = loop {
let task = self.scheduler.pick_next_task();
if task.is_none() {
break unsafe {
IDLE_TASK.current_ref_raw().get_unchecked().clone()
};
}
let task = task.unwrap();
if task_set.contains(&task.id().as_u64()) {
break unsafe {
IDLE_TASK.current_ref_raw().get_unchecked().clone()
};
}
let mask = task.get_cpu_set();
let curr_cpu = this_cpu_id();
if mask & (1 << curr_cpu) != 0 {
break task;
}
task_set.insert(task.id().as_u64());
self.scheduler.put_prev_task(task, false);
};
self.switch_to(prev, next);
}
#[cfg(not(feature = "monolithic"))]
{
let next = self.scheduler.pick_next_task().unwrap_or_else(|| unsafe {
IDLE_TASK.current_ref_raw().get_unchecked().clone()
});
self.switch_to(prev, next);
}
}
fn switch_to(&mut self, prev_task: CurrentTask, next_task: AxTaskRef) {
trace!(
"context switch: {} -> {}",
prev_task.id_name(),
next_task.id_name()
);
#[cfg(feature = "preempt")]
next_task.set_preempt_pending(false);
next_task.set_state(TaskState::Running);
if prev_task.ptr_eq(&next_task) {
return;
}
#[cfg(feature = "monolithic")]
{
let current_timestamp = axhal::time::current_time_nanos() as usize;
next_task.time_stat_when_switch_to(current_timestamp);
prev_task.time_stat_when_switch_from(current_timestamp);
}
unsafe {
let prev_ctx_ptr = prev_task.ctx_mut_ptr();
let next_ctx_ptr = next_task.ctx_mut_ptr();
assert!(Arc::strong_count(prev_task.as_task_ref()) > 1);
assert!(Arc::strong_count(&next_task) >= 1);
#[cfg(feature = "monolithic")]
{
let page_table_token = *next_task.page_table_token.get();
if page_table_token != 0 {
axhal::arch::write_page_table_root0(page_table_token.into());
}
}
CurrentTask::set_current(prev_task, next_task);
axhal::arch::task_context_switch(&mut (*prev_ctx_ptr), &(*next_ctx_ptr))
}
}
}
fn gc_entry() {
loop {
let n = EXITED_TASKS.lock().len();
for _ in 0..n {
let task = EXITED_TASKS.lock().pop_front();
if let Some(task) = task {
if Arc::strong_count(&task) == 1 {
drop(task);
} else {
EXITED_TASKS.lock().push_back(task);
}
}
}
WAIT_FOR_EXIT.wait();
}
}
pub(crate) fn init() {
const IDLE_TASK_STACK_SIZE: usize = 4096;
let idle_task = new_task(
|| crate::run_idle(),
"idle".into(), IDLE_TASK_STACK_SIZE,
#[cfg(feature = "monolithic")]
KERNEL_PROCESS_ID,
#[cfg(feature = "monolithic")]
0,
#[cfg(feature = "monolithic")]
false,
);
IDLE_TASK.with_current(|i| i.init_by(idle_task.clone()));
let main_task = new_init_task("main".into());
#[cfg(feature = "monolithic")]
main_task.set_process_id(KERNEL_PROCESS_ID);
main_task.set_state(TaskState::Running);
RUN_QUEUE.init_by(AxRunQueue::new());
unsafe { CurrentTask::init_current(main_task) }
}
pub(crate) fn init_secondary() {
let idle_task = new_init_task("idle".into()); #[cfg(feature = "monolithic")]
idle_task.set_process_id(KERNEL_PROCESS_ID);
idle_task.set_state(TaskState::Running);
IDLE_TASK.with_current(|i| i.init_by(idle_task.clone()));
unsafe { CurrentTask::init_current(idle_task) }
}