void schedule();
-extern task_union root_task;
+extern task_t root_task;
-extern void load_cr3(task_union *tsk);
+extern void load_cr3(task_t *tsk);
extern list_head_t all_tasks;
extern list_head_t delay_tasks;
list_head_t pend; // 某些条件串成一个链表
- list_head_t wait;
+ // list_head_t wait;
uint32_t sched_cnt; // 被调度换上CPU的次数
uint32_t sched_keep_cnt; // 时间片到了,但是没有被换出,又重新执行的次数
};
unsigned char stack[TASK_SIZE];
-} task_union;
+} task_t;
-task_union *alloc_task_union();
+task_t *alloc_task_t();
-static inline task_union *get_current() {
- task_union *tsk;
+static inline task_t *get_current() {
+ task_t *tsk;
asm("andl %%esp, %0;" : "=r"(tsk) : "0"(~(TASK_SIZE - 1)));
return tsk;
}
static inline pid_t sysc_getpid() { return current->pid; }
-task_union *find_task(pid_t pid);
+task_t *find_task(pid_t pid);
#define ROOT_TSK_PID (0)
#include <irq.h>
#include <list.h>
#include <sched.h>
-union task_union;
typedef struct wait_queue_head {
list_head_t task_list;
} wait_queue_head_t;
typedef struct {
- union task_union *task;
+ task_t *task;
list_head_t task_list;
} wait_queue_t;
// 后续放到一个内核任务中去做,需要先把禁止内核抢占做了
const char *task_state(unsigned int state);
void clk_bh_handler(void *arg) {
- task_union *p = 0;
+ task_t *p = 0;
list_head_t *t = 0;
list_head_t *pos = 0;
list_for_each_safe(pos, t, &delay_tasks) {
- p = list_entry(pos, task_union, pend);
+ p = list_entry(pos, task_t, pend);
// printk("%s state: %s\n", p->name, task_state(p->state));
assert(p->state == TASK_WAIT);
assert(p->delay_jiffies != 0);
irq_save(flags);
current->state = TASK_EXITING;
- task_union *t = current;
+ task_t *t = current;
irq_restore(flags);
extern list_head_t all_tasks;
int do_fork(pt_regs_t *regs, unsigned long flags) {
- task_union *tsk;
- tsk = alloc_task_union();
+ task_t *tsk;
+ tsk = alloc_task_t();
printd("fork task %08x flags %08x\n", tsk, flags);
if (tsk == NULL) {
panic("can not malloc PCB");
}
- memcpy(tsk, current, sizeof(task_union));
+ memcpy(tsk, current, sizeof(task_t));
assert(tsk->magic == TASK_MAGIC);
#include "mm.h"
#include "msr.h"
-task_union root_task __attribute__((__aligned__(PAGE_SIZE)));
+task_t root_task __attribute__((__aligned__(PAGE_SIZE)));
// 暂时不考虑pid回绕问题
pid_t get_next_pid() {
return pid;
}
-void load_cr3(task_union *tsk) { LoadCR3(tsk->cr3); }
+void load_cr3(task_t *tsk) { LoadCR3(tsk->cr3); }
extern pde_t __initdata init_pgd[PDECNT_PER_PAGE] __attribute__((__aligned__(PAGE_SIZE)));
printk("init_root_task tss.esp0 %08x\n", tss.esp0);
}
-kmem_cache_t *task_union_cache;
+kmem_cache_t *task_t_cache;
void setup_tasks() {
INIT_LIST_HEAD(&all_tasks);
init_root_task();
- task_union_cache = kmem_cache_create("task_union", sizeof(task_union), PAGE_SIZE);
- if (0 == task_union_cache) {
+ task_t_cache = kmem_cache_create("task_t", sizeof(task_t), PAGE_SIZE);
+ if (0 == task_t_cache) {
panic("setup tasks failed. out of memory");
}
}
-task_union *alloc_task_union() {
- task_union *task;
- task = (task_union *)kmem_cache_alloc(task_union_cache, 0);
+task_t *alloc_task_t() {
+ task_t *task;
+ task = (task_t *)kmem_cache_alloc(task_t_cache, 0);
return task;
}
#endif
}
-void context_switch(task_union *prev, task_union *next) {
+void context_switch(task_t *prev, task_t *next) {
unsigned long eax, ebx, ecx, edx, esi, edi;
asm volatile(
"pushfl;"
: "memory");
}
-task_union *find_task(pid_t pid) {
- task_union *p = 0;
+task_t *find_task(pid_t pid) {
+ task_t *p = 0;
list_head_t *pos = 0, *tmp = 0;
unsigned long iflags;
irq_save(iflags);
list_for_each_safe(pos, tmp, &all_tasks) {
- p = list_entry(pos, task_union, list);
+ p = list_entry(pos, task_t, list);
if (p->pid == pid) {
break;
}
}
void debug_print_all_tasks() {
- task_union *p = 0;
+ task_t *p = 0;
list_head_t *pos = 0, *t = 0;
printl(MPL_TASK_TITLE, " NAME STATE TK/PI REASON SCHED KEEP");
list_for_each_safe(pos, t, &all_tasks) {
- p = list_entry(pos, task_union, list);
+ p = list_entry(pos, task_t, list);
printl(MPL_TASK_0 + p->pid, "%08x%s%-6s:%u %s %02u/%02u %-10s %-10u %-10u", p,
p->state == TASK_RUNNING ? ">" : " ", p->name, p->pid, task_state(p->state), p->ticks, p->priority,
p->reason, p->sched_cnt, p->sched_keep_cnt);
}
void schedule() {
- task_union *root = &root_task;
- task_union *sel = 0;
- task_union *p = 0;
+ task_t *root = &root_task;
+ task_t *sel = 0;
+ task_t *p = 0;
list_head_t *pos = 0, *t = 0;
assert(current->ticks <= TASK_MAX_PRIORITY);
}
list_for_each_safe(pos, t, &all_tasks) {
- p = list_entry(pos, task_union, list);
+ p = list_entry(pos, task_t, list);
if (p == &root_task) {
continue;
}
}
- task_union *prev = current;
- task_union *next = sel != 0 ? sel : root;
+ task_t *prev = current;
+ task_t *next = sel != 0 ? sel : root;
next->state = TASK_RUNNING;
next->reason = "";
}
void debug_sched() {
- task_union *p = list_entry(current->list.next, task_union, list);
+ task_t *p = list_entry(current->list.next, task_t, list);
p->state = (p->state == TASK_READY) ? TASK_WAIT : TASK_READY;
}
#include <sched.h>
#include <semaphore.h>
+typedef struct semaphore_waiter {
+ list_head_t list;
+ task_t *task;
+
+ // 在Linux内核中这个结构体有一个up字段,这个字段的作用是防止进程被错误地唤醒
+ // 例如
+ // 如果一个进程在等待信号量时收到了一个信号(signal),它可能会被内核唤醒以处理这个信号。
+ // 但是,如果这个进程没有真正获得信号量(即up字段为false)它应该继续等待信号量而不是继续执行
+ // 本内核暂时还用不到这个字段
+
+} semaphore_waiter_t;
+
void semaphore_init(semaphore_t *s, unsigned int v) {
s->cnt = v;
INIT_LIST_HEAD(&(s->wait_list));
}
-#if 1
volatile void down(semaphore_t *s) {
unsigned long iflags;
irq_save(iflags);
if (likely(s->cnt > 0)) {
s->cnt--;
+ irq_restore(iflags);
} else {
- task_union *task = current;
- list_add_tail(&task->wait, &s->wait_list);
+ task_t *task = current;
+ semaphore_waiter_t waiter;
+ waiter.task = task;
+ INIT_LIST_HEAD(&waiter.list);
+ list_add(&waiter.list, &s->wait_list);
+
+ irq_restore(iflags);
task->state = TASK_WAIT;
task->reason = "down";
schedule();
}
-
- irq_restore(iflags);
}
-// volatile bool try_down(semaphore_t *s) {
-// unsigned long iflags;
-// irq_save(iflags);
-
-// // if(s->cnt )
-
-// irq_restore(iflags);
-// }
-
volatile void up(semaphore_t *s) {
unsigned long iflags;
irq_save(iflags);
if (list_empty(&s->wait_list)) {
s->cnt++;
} else {
- task_union *task = list_first_entry(&s->wait_list, task_union, wait);
- list_del(&task->wait);
+ semaphore_waiter_t *waiter = list_first_entry(&s->wait_list, semaphore_waiter_t, list);
+ list_del(&waiter->list);
+ task_t *task = waiter->task;
+
task->state = TASK_READY;
task->reason = "up";
- // 按理这里应该调用schedule再重新调度一次
- // 原因是有可能多个任务都在一个循环里争抢一个锁
- // 如果这里不让当前任务尝试放弃CPU重新调度,则在下一轮循环中它又可能抢到锁
- // 这种情况如果一直发生,就可能导致其它任务一直抢不到
- //
- // 但这里为什么又没调用schedule呢?
- // 这是因为在目前在ide_irq_bh_handler里调用了up来唤醒磁盘任务
+ // 这里不应该调用schedule()再重新调度一次
+ // 例如,目前在ide_irq_bh_handler里调用了up来唤醒磁盘任务
// 而ide_irq_bh_handler又是中断的底半处理,不应该切换任务
// 否则会引起irq里的reenter问题,导致不能再进底半处理,也无法切换任务
- // 这里暂时先保持这样,后续再来解决这里
- // schedule();
- }
-
- irq_restore(iflags);
-}
-
-#else
-
-typedef struct semaphore_waiter {
- list_head_t list;
- task_union *task;
- int up;
-} semaphore_waiter_t;
-
-#define SEMAPHORE_WAITER_INITIALIZER(name, task) \
- { .list = LIST_HEAD_INIT((name).list), .task = task, .up = 0 }
-
-#define DECLARE_SEMAPHORE_WAITER(name, task) semaphore_waiter_t name = SEMAPHORE_WAITER_INITIALIZER(name, task)
-
-volatile void __down(semaphore_t *s) {
- task_union *task = current;
- DECLARE_SEMAPHORE_WAITER(waiter, task);
- list_add_tail(&waiter.list, &s->wait_list);
-
- while (true) {
- task->state = TASK_WAIT;
- task->reason = "down";
- schedule();
-
- assert(waiter.up == 1);
- if (waiter.up) {
- break;
- }
- }
-}
-
-volatile void down(semaphore_t *s) {
- unsigned long iflags;
- irq_save(iflags);
-
- if (likely(s->cnt > 0)) {
- s->cnt--;
- } else {
- __down(s);
- }
-
- irq_restore(iflags);
-}
-
-volatile void __up(semaphore_t *s) {
- semaphore_waiter_t *waiter = list_first_entry(&s->wait_list, semaphore_waiter_t, list);
- list_del(&waiter->list);
- waiter->up = 1;
-
- waiter->task->state = TASK_READY;
- waiter->task->reason = "up";
-
- // 按理这里应该调用schedule再重新调度一次
- // 原因是有可能多个任务都在一个循环里争抢一个锁
- // 如果这里不让当前任务尝试放弃CPU重新调度,则在下一轮循环中它又可能抢到锁
- // 这种情况如果一直发生,就可能导致其它任务一直抢不到
- //
- // 但这里为什么又没调用schedule呢?
- // 这是因为在目前在ide_irq_bh_handler里调用了up来唤醒磁盘任务
- // 而ide_irq_bh_handler又是中断的底半处理,不应该切换任务
- // 否则会引起irq里的reenter问题,导致不能再进底半处理,也无法切换任务
- // 这里暂时先保持这样,后续再来解决这里
- // schedule();
-}
-
-volatile void up(semaphore_t *s) {
- unsigned long iflags;
- irq_save(iflags);
-
- // if (likely(list_empty(&s->wait_list))) {
- if (list_empty(&s->wait_list)) {
- s->cnt++;
- } else {
- __up(s);
}
irq_restore(iflags);
}
-#endif
void mutex_lock(semaphore_t *s) { down(s); }
void mutex_unlock(semaphore_t *s) { up(s); }