if(!fpu_presence)
return;
- /* If the process hasn't touched the FPU, there is nothing to do. */
-
- if(!(pr->p_misc_flags & MF_USED_FPU))
- return;
-
/* Save changed FPU context. */
-
if(osfxsr_feature) {
fxsave(pr->p_fpu_state.fpu_save_area_p);
fninit();
} else {
fnsave(pr->p_fpu_state.fpu_save_area_p);
}
-
- /* Clear MF_USED_FPU to signal there is no unsaved FPU state. */
-
- pr->p_misc_flags &= ~MF_USED_FPU;
}
PUBLIC void restore_fpu(struct proc *pr)
{
- /* If the process hasn't touched the FPU, enable the FPU exception
- * and don't restore anything.
- */
- if(!(pr->p_misc_flags & MF_USED_FPU)) {
- write_cr0(read_cr0() | I386_CR0_TS);
- return;
- }
-
- /* If the process has touched the FPU, disable the FPU
- * exception (both for the kernel and for the process once
- * it's scheduled), and initialize or restore the FPU state.
- */
-
- clts();
-
if(!(pr->p_misc_flags & MF_FPU_INITIALIZED)) {
fninit();
pr->p_misc_flags |= MF_FPU_INITIALIZED;
stk = (char *)tss.sp0;
/* set pointer to the process to run on the stack */
*((reg_t *)stk) = (reg_t) proc_ptr;
+
return proc_ptr;
}
#include <assert.h>
#include "kernel/proc.h"
#include "kernel/proto.h"
+#include <machine/vm.h>
extern int catch_pagefaults = 0;
}
printf("\n");
}
+
+PUBLIC void enable_fpu_exception(void)
+{
+ write_cr0(read_cr0() | I386_CR0_TS);
+}
+
+PUBLIC void disable_fpu_exception(void)
+{
+ clts();
+}
+
*/
.globl _restore_user_context
+.globl _copr_not_available_handler
.globl _reload_cr3
.globl _divide_error
_copr_not_available:
TEST_INT_IN_KERNEL(4, copr_not_available_in_kernel)
cld /* set direction flag to a known value */
- SAVE_PROCESS_CTX_NON_LAZY(0)
+ SAVE_PROCESS_CTX(0)
/* stop user process cycles */
push %ebp
mov $0, %ebp
call _context_stop
- pop %ebp
- lea P_MISC_FLAGS(%ebp), %ebx
- orw $MF_USED_FPU, (%ebx)
- mov $0, %ebp
- jmp _switch_to_user
+ jmp _copr_not_available_handler
copr_not_available_in_kernel:
pushl $0
* displ is the stack displacement. In case of an exception, there are two extra
* value on the stack - error code and the exception number
*/
-#define SAVE_PROCESS_CTX_NON_LAZY(displ) \
+#define SAVE_PROCESS_CTX(displ) \
\
cld /* set the direction flag to a known state */ ;\
\
RESTORE_KERNEL_SEGS ;\
SAVE_TRAP_CTX(displ, %ebp, %esi) ;
-#define SAVE_PROCESS_CTX(displ) \
- SAVE_PROCESS_CTX_NON_LAZY(displ) ;\
- push %eax ;\
- push %ebx ;\
- push %ecx ;\
- push %edx ;\
- push %ebp ;\
- call _save_fpu ;\
- pop %ebp ;\
- pop %edx ;\
- pop %ecx ;\
- pop %ebx ;\
- pop %eax ;
-
/*
* clear the IF flag in eflags which are stored somewhere in memory, e.g. on
* stack. iret or popf will load the new value later
EXTERN char params_buffer[512]; /* boot monitor parameters */
EXTERN int minix_panicing;
EXTERN char fpu_presence;
+EXTERN struct proc * fpu_owner;
EXTERN int verboseboot; /* verbose boot, init'ed in cstart */
#define MAGICTEST 0xC0FFEE23
EXTERN u32_t magictest; /* global magic number */
context_stop(proc_addr(KERNEL));
+ /* If the process isn't the owner of FPU, enable the FPU exception */
+ if(fpu_owner != proc_ptr)
+ enable_fpu_exception();
+ else
+ disable_fpu_exception();
/*
* restore_user_context() carries out the actual mode switch from kernel
* to userspace. This function does not return
*/
- restore_fpu(proc_ptr);
restore_user_context(proc_ptr);
NOT_REACHABLE;
}
#endif
}
}
+
+PUBLIC void copr_not_available_handler(void)
+{
+ /*
+ * Disable the FPU exception (both for the kernel and for the process
+ * once it's scheduled), and initialize or restore the FPU state.
+ */
+
+ disable_fpu_exception();
+
+ /* if FPU is not owned by anyone, do not store anything */
+ if (fpu_owner != NULL) {
+ assert(fpu_owner != proc_ptr);
+ save_fpu(fpu_owner);
+ }
+
+ /*
+ * restore the current process' state and let it run again, do not
+ * schedule!
+ */
+ restore_fpu(proc_ptr);
+ fpu_owner = proc_ptr;
+ context_stop(proc_addr(KERNEL));
+ restore_user_context(proc_ptr);
+ NOT_REACHABLE;
+}
+
+PUBLIC void release_fpu(void) {
+ fpu_owner = NULL;
+}
#define MF_SC_ACTIVE 0x100 /* Syscall tracing: in a system call now */
#define MF_SC_DEFER 0x200 /* Syscall tracing: deferred system call */
#define MF_SC_TRACE 0x400 /* Syscall tracing: trigger syscall events */
-#define MF_USED_FPU 0x800 /* process used fpu during last execution run */
#define MF_FPU_INITIALIZED 0x1000 /* process already used math, so fpu
* regs are significant (initialized)*/
#define MF_SENDING_FROM_KERNEL 0x2000 /* message of this process is from kernel */
_PROTOTYPE(void switch_address_space, (struct proc * p));
_PROTOTYPE(void release_address_space, (struct proc *pr));
+_PROTOTYPE(void enable_fpu_exception, (void));
+_PROTOTYPE(void disable_fpu_exception, (void));
+_PROTOTYPE(void release_fpu, (void));
+
/* utility.c */
_PROTOTYPE( void cpu_print_freq, (unsigned cpu));
#endif /* PROTO_H */
}
#endif
+ /* release FPU */
+ if (fpu_owner == rc)
+ release_fpu();
+
return OK;
}
/* Mark fpu_regs contents as not significant, so fpu
* will be initialized, when it's used next time. */
rp->p_misc_flags &= ~MF_FPU_INITIALIZED;
+ /* force reloading FPU if the current process is the owner */
+ if (rp == fpu_owner)
+ release_fpu();
return(OK);
}
#endif /* USE_EXEC */
map_ptr= (struct mem_map *) m_ptr->PR_MEM_PTR;
+ /* make sure that the FPU context is saved in parent before copy */
+ if (fpu_owner == rpp) {
+ disable_fpu_exception();
+ save_fpu(rpp);
+ }
/* Copy parent 'proc' struct to child. And reinitialize some fields. */
gen = _ENDPOINT_G(rpc->p_endpoint);
#if (_MINIX_CHIP == _CHIP_INTEL)
/* Copy FPU state */
mc.mc_fpu_flags = 0;
if (rp->p_misc_flags & MF_FPU_INITIALIZED) {
+ /* make sure that the FPU context is saved into proc structure first */
+ if (fpu_owner == rp) {
+ disable_fpu_exception();
+ save_fpu(rp);
+ }
mc.mc_fpu_flags = 0 | rp->p_misc_flags & MF_FPU_INITIALIZED;
memcpy(&(mc.mc_fpu_state), rp->p_fpu_state.fpu_save_area_p,
FPU_XFP_SIZE);
FPU_XFP_SIZE);
} else
rp->p_misc_flags &= ~MF_FPU_INITIALIZED;
+ /* force reloading FPU in either case */
+ if (fpu_owner == rp)
+ release_fpu();
#endif
return(OK);
memcpy(rp->p_fpu_state.fpu_save_area_p, &sc.sc_fpu_state,
FPU_XFP_SIZE);
rp->p_misc_flags |= MF_FPU_INITIALIZED; /* Restore math usage flag. */
+ /* force reloading FPU */
+ if (fpu_owner == rp)
+ release_fpu();
}
#endif
/* Copy the registers to the sigcontext structure. */
memcpy(&sc.sc_regs, (char *) &rp->p_reg, sizeof(sigregs));
#if (_MINIX_CHIP == _CHIP_INTEL)
- if(rp->p_misc_flags & MF_FPU_INITIALIZED)
+ if(rp->p_misc_flags & MF_FPU_INITIALIZED) {
+ /* save the FPU context before saving it to the sig context */
+ if (fpu_owner == rp) {
+ disable_fpu_exception();
+ save_fpu(rp);
+ }
memcpy(&sc.sc_fpu_state, rp->p_fpu_state.fpu_save_area_p,
FPU_XFP_SIZE);
+ }
#endif
/* Finish the sigcontext initialization. */