\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
- call cycles_accounting_stop ;\
+ call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
APIC_IRQ_HANDLER(irq) ;\
- jmp restart ;\
+ jmp switch_to_user ;\
\
0: \
pusha ;\
- call cycles_accounting_stop_idle ;\
+ call context_stop_idle ;\
APIC_IRQ_HANDLER(irq) ;\
CLEAR_IF(10*4(%esp)) ;\
popa ;\
\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
- call cycles_accounting_stop ;\
+ call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
LAPIC_INTR_HANDLER(func) ;\
- jmp restart ;\
+ jmp switch_to_user ;\
\
0: \
pusha ;\
- call cycles_accounting_stop_idle ;\
+ call context_stop_idle ;\
LAPIC_INTR_HANDLER(func) ;\
CLEAR_IF(10*4(%esp)) ;\
popa ;\
read_tsc_64(&tsc_ctr_switch);
}
-PUBLIC void cycles_accounting_stop(struct proc * p)
+PUBLIC void context_stop(struct proc * p)
{
u64_t tsc;
tsc_ctr_switch = tsc;
}
-PUBLIC void cycles_accounting_stop_idle(void)
+PUBLIC void context_stop_idle(void)
{
- cycles_accounting_stop(proc_addr(IDLE));
+ context_stop(proc_addr(IDLE));
}
do_ipc(proc->p_reg.cx, proc->p_reg.retreg, proc->p_reg.bx);
}
-PUBLIC struct proc * arch_finish_schedcheck(void)
+PUBLIC struct proc * arch_finish_switch_to_user(void)
{
char * stk;
stk = (char *)tss.sp0;
* interrupt handlers. It cooperates with the code in "start.c" to set up a
* good environment for main().
*
- * Every transition to the kernel goes through this file. Transitions to the
- * kernel may be nested. The initial entry may be with a system call (i.e.,
- * send or receive a message), an exception or a hardware interrupt; kernel
- * reentries may only be made by hardware interrupts. The count of reentries
- * is kept in "k_reenter". It is important for deciding whether to switch to
- * the kernel stack and for protecting the message passing code in "proc.c".
- *
- * For the message passing trap, most of the machine state is saved in the
- * proc table. (Some of the registers need not be saved.) Then the stack is
- * switched to "k_stack", and interrupts are reenabled. Finally, the system
- * call handler (in C) is called. When it returns, interrupts are disabled
- * again and the code falls into the restart routine, to finish off held-up
- * interrupts and run the process or task whose pointer is in "proc_ptr".
- *
- * Hardware interrupt handlers do the same, except (1) The entire state must
- * be saved. (2) There are too many handlers to do this inline, so the save
- * routine is called. A few cycles are saved by pushing the address of the
- * appropiate restart routine for a return later. (3) A stack switch is
- * avoided when the stack is already switched. (4) The (master) 8259 interrupt
- * controller is reenabled centrally in save(). (5) Each interrupt handler
- * masks its interrupt line using the 8259 before enabling (other unmasked)
- * interrupts, and unmasks it after servicing the interrupt. This limits the
- * nest level to the number of lines and protects the handler from itself.
+ * Kernel is entered either because of kernel-calls, ipc-calls, interrupts or
+ * exceptions. TSS is set so that the kernel stack is loaded. The user cotext is
+ * saved to the proc table and the handler of the event is called. Once the
+ * handler is done, switch_to_user() function is called to pick a new process,
+ * finish what needs to be done for the next process to run, sets its context
+ * and switch to userspace.
*
* For communication with the boot monitor at startup time some constant
* data are compiled into the beginning of the text segment. This facilitates
* the entity.
*/
-.globl restart
+.globl restore_user_context
.globl reload_cr3
.globl divide_error
.globl params_size
.globl params_offset
.globl mon_ds
-.globl schedcheck
+.globl switch_to_user
.globl lazy_fpu
.globl hwint00 /* handlers for hardware interrupts */
\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
- call cycles_accounting_stop ;\
+ call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
- jmp restart ;\
+ jmp switch_to_user ;\
\
0: \
pusha ;\
- call cycles_accounting_stop_idle ;\
+ call context_stop_idle ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
\
SAVE_PROCESS_CTX(0) ;\
push %ebp ;\
- call cycles_accounting_stop ;\
+ call context_stop ;\
add $4, %esp ;\
movl $0, %ebp /* for stack trace */ ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
outb $INT2_CTL /* reenable slave 8259 */ ;\
- jmp restart ;\
+ jmp switch_to_user ;\
\
0: \
pusha ;\
- call cycles_accounting_stop_idle ;\
+ call context_stop_idle ;\
PIC_IRQ_HANDLER(irq) ;\
movb $END_OF_INT, %al ;\
outb $INT_CTL /* reenable interrupts in master pic */ ;\
/* stop user process cycles */
push %ebp
- call cycles_accounting_stop
+ call context_stop
add $4, %esp
/* for stack trace */
pop %esi
mov %eax, AXREG(%esi)
- jmp restart
+ jmp switch_to_user
/*
/* stop user process cycles */
push %ebp
- call cycles_accounting_stop
+ call context_stop
add $4, %esp
/* for stack trace */
/* restore the current process pointer and save the return value */
add $8, %esp
- jmp restart
+ jmp switch_to_user
.balign 16
/* stop user process cycles */
push %ebp
- call cycles_accounting_stop
+ call context_stop
add $4, %esp
/* for stack trace clear %ebp */
push $0 /* it's not a nested exception */
call exception_handler
- jmp restart
+ jmp switch_to_user
exception_entry_nested:
/*===========================================================================*/
/* restart */
/*===========================================================================*/
-restart:
- call schedcheck
-
- /* %eax is set by schedcheck() to the process to run */
- mov %eax, %ebp /* will assume P_STACKBASE == 0 */
+restore_user_context:
+ mov 4(%esp), %ebp /* will assume P_STACKBASE == 0 */
/* reconstruct the stack for iret */
movl SSREG(%ebp), %eax
SAVE_PROCESS_CTX_NON_LAZY(0)
/* stop user process cycles */
push %ebp
- call cycles_accounting_stop
+ call context_stop
pop %ebp
lea P_MISC_FLAGS(%ebp), %ebx
movw (%ebx), %cx
frstor (%eax)
copr_return:
orw $MF_USED_FPU, (%ebx) /* fpu was used during last execution */
- jmp restart
+ jmp switch_to_user
copr_not_available_in_kernel:
movl $0, (%esp)
assert(runqueues_ok());
- restart();
+ switch_to_user();
NOT_REACHABLE;
}
*/
/* start accounting for the idle time */
- cycles_accounting_stop(proc_addr(KERNEL));
+ context_stop(proc_addr(KERNEL));
halt_cpu();
/*
* end of accounting for the idle task does not happen here, the kernel
}
/*===========================================================================*
- * schedcheck *
+ * switch_to_user *
*===========================================================================*/
-PUBLIC struct proc * schedcheck(void)
+PUBLIC void switch_to_user(void)
{
/* This function is called an instant before proc_ptr is
* to be scheduled again.
#endif
- proc_ptr = arch_finish_schedcheck();
+ proc_ptr = arch_finish_switch_to_user();
assert(proc_ptr->p_ticks_left > 0);
- cycles_accounting_stop(proc_addr(KERNEL));
+ context_stop(proc_addr(KERNEL));
- return proc_ptr;
+ /*
+ * restore_user_context() carries out the actual mode switch from kernel
+ * to userspace. This function does not return
+ */
+ restore_user_context(proc_ptr);
+ NOT_REACHABLE;
}
/*
/*
* If a scheduler is scheduling itself or has no scheduler, and
* runs out of quantum, we don't send a message. The
- * RTS_NO_QUANTUM flag will be removed by schedcheck in proc.c.
+ * RTS_NO_QUANTUM flag will be removed in switch_to_user.
*/
}
else {
* possible before returning to userspace. These function is architecture
* dependent
*/
-_PROTOTYPE( void cycles_accounting_stop, (struct proc * p) );
+_PROTOTYPE( void context_stop, (struct proc * p) );
/* this is a wrapper to make calling it from assembly easier */
-_PROTOTYPE( void cycles_accounting_stop_idle, (void) );
+_PROTOTYPE( void context_stop_idle, (void) );
/* main.c */
_PROTOTYPE( void main, (void) );
_PROTOTYPE( int mini_notify, (const struct proc *src, endpoint_t dst) );
_PROTOTYPE( void enqueue, (struct proc *rp) );
_PROTOTYPE( void dequeue, (const struct proc *rp) );
-_PROTOTYPE( struct proc * schedcheck, (void) );
-_PROTOTYPE( struct proc * arch_finish_schedcheck, (void) );
+_PROTOTYPE( void switch_to_user, (void) );
+_PROTOTYPE( struct proc * arch_finish_switch_to_user, (void) );
_PROTOTYPE( struct proc *endpoint_lookup, (endpoint_t ep) );
#if DEBUG_ENABLE_IPC_WARNINGS
_PROTOTYPE( int isokendpt_f, (const char *file, int line, endpoint_t e, int *p, int f));
_PROTOTYPE( void arch_shutdown, (int) );
_PROTOTYPE( void arch_monitor, (void) );
_PROTOTYPE( void arch_get_aout_headers, (int i, struct exec *h) );
-_PROTOTYPE( void restart, (void) );
+_PROTOTYPE( void restore_user_context, (struct proc * p) );
_PROTOTYPE( void read_tsc, (unsigned long *high, unsigned long *low) );
_PROTOTYPE( int arch_init_profile_clock, (u32_t freq) );
_PROTOTYPE( void arch_stop_profile_clock, (void) );
caller->p_delivermsg_vir = (vir_bytes) m_user;
/*
* the ldt and cr3 of the caller process is loaded because it just've trapped
- * into the kernel or was already set in schedcheck() before we resume
+ * into the kernel or was already set in switch_to_user() before we resume
* execution of an interrupted kernel call
*/
if (copy_msg_from_user(caller, m_user, &msg) == 0) {