p->p_seg.p_cr3 = 0;
p->p_misc_flags &= ~MF_FULLVM;
}
- RTS_LOCK_UNSET(p, RTS_VMINHIBIT);
+ RTS_UNSET(p, RTS_VMINHIBIT);
return OK;
case VMCTL_INCSP:
/* Increase process SP. */
* Figuring out the exact source is too complicated. CLOCK_IRQ is normally
* not very random.
*/
- lock;
get_randomness(&krandom, CLOCK_IRQ);
- unlock;
return(OK);
}
/* Don't schedule this process until pagefault is handled. */
vmassert(pr->p_seg.p_cr3 == read_cr3());
vmassert(!RTS_ISSET(pr, RTS_PAGEFAULT));
- RTS_LOCK_SET(pr, RTS_PAGEFAULT);
+ RTS_SET(pr, RTS_PAGEFAULT);
/* Save pagefault details, suspend process,
* add process to pagefault chain,
vmassert(!RTS_ISSET(caller, RTS_VMREQUEST));
vmassert(!RTS_ISSET(target, RTS_VMREQUEST));
- RTS_LOCK_SET(caller, RTS_VMREQUEST);
+ RTS_SET(caller, RTS_VMREQUEST);
#if DEBUG_VMASSERT
caller->p_vmrequest.stacktrace[0] = '\0';
if(caller && RTS_ISSET(caller, RTS_VMREQUEST)) {
vmassert(caller->p_vmrequest.vmresult != VMSUSPEND);
- RTS_LOCK_UNSET(caller, RTS_VMREQUEST);
+ RTS_UNSET(caller, RTS_VMREQUEST);
if(caller->p_vmrequest.vmresult != OK) {
#if DEBUG_VMASSERT
printf("virtual_copy: returning VM error %d\n",
PRIVATE void ser_debug(int c)
{
- int u = 0;
-
serial_debug_active = 1;
- /* Disable interrupts so that we get a consistent state. */
- if(!intr_disabled()) { lock; u = 1; };
switch(c)
{
#endif
}
serial_debug_active = 0;
- if(u) { unlock; }
}
PRIVATE void printslot(struct proc *pp, int level)
#define unset_sys_bit(map,bit) \
( MAP_CHUNK(map.chunk,bit) &= ~(1 << CHUNK_OFFSET(bit) )
-#define reallock
-
-#define realunlock
-
-/* Disable/ enable hardware interrupts. The parameters of lock() and unlock()
- * are used when debugging is enabled. See debug.h for more information.
- */
-#define lock reallock
-#define unlock realunlock
-
#ifdef CONFIG_IDLE_TSC
#define IDLE_STOP if(idle_active) { read_tsc_64(&idle_stop); idle_active = 0; }
#else
#define NOREC_ENTER(varname) \
static int varname = NOTENTERED; \
- int mustunlock = 0; \
- if(!intr_disabled()) { lock; mustunlock = 1; } \
vmassert(varname == ENTERED || varname == NOTENTERED); \
vmassert(magictest == MAGICTEST); \
vmassert(varname != ENTERED); \
vmassert(magictest == MAGICTEST); \
vmassert(varname == ENTERED || varname == NOTENTERED); \
varname = NOTENTERED; \
- if(mustunlock) { unlock; } \
return v; \
} while(0)
*
* sys_call: a system call, i.e., the kernel is trapped with an INT
*
- * As well as several entry points used from the interrupt and task level:
- *
- * lock_send: send a message to a process
- *
* Changes:
* Aug 19, 2005 rewrote scheduling code (Jorrit N. Herder)
* Jul 25, 2005 rewrote system call handling (Jorrit N. Herder)
#include "proc.h"
#include "vm.h"
-/* Scheduling and message passing functions. The functions are available to
- * other parts of the kernel through lock_...(). The lock temporarily disables
- * interrupts to prevent race conditions.
- */
+/* Scheduling and message passing functions */
FORWARD _PROTOTYPE( void idle, (void));
FORWARD _PROTOTYPE( int mini_send, (struct proc *caller_ptr, int dst_e,
message *m_ptr, int flags));
return EAGAIN;
}
- /*===========================================================================*
- * lock_notify *
- *===========================================================================*/
-PUBLIC int lock_notify(src_e, dst_e)
-int src_e; /* (endpoint) sender of the notification */
-int dst_e; /* (endpoint) who is to be notified */
-{
-/* Safe gateway to mini_notify() for tasks and interrupt handlers. The sender
- * is explicitely given to prevent confusion where the call comes from. MINIX
- * kernel is not reentrant, which means to interrupts are disabled after
- * the first kernel entry (hardware interrupt, trap, or exception). Locking
- * is done by temporarily disabling interrupts.
- */
- int result, src_p;
-
- vmassert(!intr_disabled());
-
- if (!isokendpt(src_e, &src_p)) {
- kprintf("lock_notify: bogus src: %d\n", src_e);
- return EDEADSRCDST;
- }
-
- lock;
- vmassert(intr_disabled());
- result = mini_notify(proc_addr(src_p), dst_e);
- vmassert(intr_disabled());
- unlock;
- vmassert(!intr_disabled());
-
- return(result);
-}
-
/*===========================================================================*
* enqueue *
*===========================================================================*/
vmassert(!intr_disabled());
- lock;
for (rp=BEG_PROC_ADDR; rp<END_PROC_ADDR; rp++) {
if (! isemptyp(rp)) { /* check slot use */
if (rp->p_priority > rp->p_max_priority) { /* update priority? */
}
}
}
- unlock;
/* Now schedule a new watchdog timer to balance the queues again. The
* period depends on the total amount of quantum ticks added.
set_timer(&queue_timer, get_uptime() + next_period, balance_queues);
}
-/*===========================================================================*
- * lock_send *
- *===========================================================================*/
-PUBLIC int lock_send(dst_e, m_ptr)
-int dst_e; /* to whom is message being sent? */
-message *m_ptr; /* pointer to message buffer */
-{
-/* Safe gateway to mini_send() for tasks. */
- int result;
- lock;
- result = mini_send(proc_ptr, dst_e, m_ptr, 0);
- unlock;
- return(result);
-}
-
/*===========================================================================*
* endpoint_lookup *
*===========================================================================*/
vmassert(intr_disabled()); \
} while(0)
-/* Set flag and dequeue if the process was runnable. */
-#define RTS_LOCK_SET(rp, f) \
- do { \
- int u = 0; \
- if(!intr_disabled()) { u = 1; lock; } \
- if(proc_is_runnable(rp)) { dequeue(rp); } \
- (rp)->p_rts_flags |= (f); \
- if(u) { unlock; } \
- } while(0)
-
-/* Clear flag and enqueue if the process was not runnable but is now. */
-#define RTS_LOCK_UNSET(rp, f) \
- do { \
- int rts; \
- int u = 0; \
- if(!intr_disabled()) { u = 1; lock; } \
- rts = (rp)->p_rts_flags; \
- (rp)->p_rts_flags &= ~(f); \
- if(!rts_f_is_runnable(rts) && proc_is_runnable(rp)) { \
- enqueue(rp); \
- } \
- if(u) { unlock; } \
- } while(0)
-
/* Set flags to this value. */
-#define RTS_LOCK_SETFLAGS(rp, f) \
+#define RTS_SETFLAGS(rp, f) \
do { \
- int u = 0; \
- if(!intr_disabled()) { u = 1; lock; } \
if(proc_is_runnable(rp) && (f)) { dequeue(rp); } \
(rp)->p_rts_flags = (f); \
- if(u) { unlock; } \
} while(0)
/* Misc flags */
/* proc.c */
_PROTOTYPE( int do_ipc, (int call_nr, int src_dst,
message *m_ptr, long bit_map) );
-_PROTOTYPE( int lock_notify, (int src, int dst) );
_PROTOTYPE( int mini_notify, (struct proc *src, endpoint_t dst) );
-_PROTOTYPE( int lock_send, (int dst, message *m_ptr) );
_PROTOTYPE( void enqueue, (struct proc *rp) );
_PROTOTYPE( void dequeue, (struct proc *rp) );
_PROTOTYPE( void balance_queues, (struct timer *tp) );
rp = proc_addr(proc_nr);
sigaddset(&priv(rp)->s_sig_pending, sig_nr);
- if(!intr_disabled()) {
- lock_notify(SYSTEM, rp->p_endpoint);
- } else {
- mini_notify(proc_addr(SYSTEM), rp->p_endpoint);
- }
+ mini_notify(proc_addr(SYSTEM), rp->p_endpoint);
}
/*===========================================================================*
if (! sigismember(&rp->p_pending, sig_nr)) {
sigaddset(&rp->p_pending, sig_nr);
if (! (RTS_ISSET(rp, RTS_SIGNALED))) { /* other pending */
- RTS_LOCK_SET(rp, RTS_SIGNALED | RTS_SIG_PENDING);
+ RTS_SET(rp, RTS_SIGNALED | RTS_SIG_PENDING);
send_sig(PM_PROC_NR, SIGKSIG);
}
}
}
/* Make sure that the exiting process is no longer scheduled. */
- RTS_LOCK_SET(rc, RTS_NO_ENDPOINT);
+ RTS_SET(rc, RTS_NO_ENDPOINT);
if (priv(rc)->s_flags & SYS_PROC)
{
if (priv(rc)->s_asynsize) {
/* Check if process is receiving from exiting process. */
if (RTS_ISSET(rp, RTS_RECEIVING) && rp->p_getfrom_e == rc->p_endpoint) {
rp->p_reg.retreg = ESRCDIED; /* report source died */
- RTS_LOCK_UNSET(rp, RTS_RECEIVING); /* no longer receiving */
+ RTS_UNSET(rp, RTS_RECEIVING); /* no longer receiving */
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("endpoint %d / %s receiving from dead src ep %d / %s\n",
rp->p_endpoint, rp->p_name, rc->p_endpoint, rc->p_name);
if (RTS_ISSET(rp, RTS_SENDING) &&
rp->p_sendto_e == rc->p_endpoint) {
rp->p_reg.retreg = EDSTDIED; /* report destination died */
- RTS_LOCK_UNSET(rp, RTS_SENDING);
+ RTS_UNSET(rp, RTS_SENDING);
#if DEBUG_ENABLE_IPC_WARNINGS
kprintf("endpoint %d / %s send to dying dst ep %d (%s)\n",
rp->p_endpoint, rp->p_name, rc->p_endpoint, rc->p_name);
/* PM has finished one kernel signal. Perhaps process is ready now? */
if (!RTS_ISSET(rp, RTS_SIGNALED)) /* new signal arrived */
- RTS_LOCK_UNSET(rp, RTS_SIG_PENDING); /* remove pending flag */
+ RTS_UNSET(rp, RTS_SIG_PENDING); /* remove pending flag */
return(OK);
}
arch_pre_exec(rp, (u32_t) m_ptr->PR_IP_PTR, (u32_t) m_ptr->PR_STACK_PTR);
/* No reply to EXEC call */
- RTS_LOCK_UNSET(rp, RTS_RECEIVING);
+ RTS_UNSET(rp, RTS_RECEIVING);
/* Mark fpu_regs contents as not significant, so fpu
* will be initialized, when it's used next time. */
/* Make sure that the exiting process is no longer scheduled,
* and mark slot as FREE. Also mark saved fpu contents as not significant.
*/
- RTS_LOCK_SETFLAGS(rc, RTS_SLOT_FREE);
+ RTS_SETFLAGS(rc, RTS_SLOT_FREE);
rc->p_misc_flags &= ~MF_FPU_INITIALIZED;
/* Release the process table slot. If this is a system process, also
/* Don't schedule process in VM mode until it has a new pagetable. */
if(m_ptr->PR_FORK_FLAGS & PFF_VMINHIBIT) {
- RTS_LOCK_SET(rpc, RTS_VMINHIBIT);
+ RTS_SET(rpc, RTS_VMINHIBIT);
}
/*
* Only one in group should have RTS_SIGNALED, child doesn't inherit tracing.
*/
- RTS_LOCK_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
+ RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
sigemptyset(&rpc->p_pending);
return r;
m_ptr->SIG_ENDPT = rp->p_endpoint;
m_ptr->SIG_MAP = rp->p_pending; /* pending signals map */
sigemptyset(&rp->p_pending); /* ball is in PM's court */
- RTS_LOCK_UNSET(rp, RTS_SIGNALED); /* blocked by SIG_PENDING */
+ RTS_UNSET(rp, RTS_SIGNALED); /* blocked by SIG_PENDING */
return(OK);
}
}
if (new_q < MAX_USER_Q) new_q = MAX_USER_Q; /* shouldn't happen */
if (new_q > MIN_USER_Q) new_q = MIN_USER_Q; /* shouldn't happen */
- /* Make sure the process is not running while changing its priority.
- * Put the process back in its new queue if it is runnable.
- */
- RTS_LOCK_SET(rp, RTS_SYS_LOCK);
+ /* Dequeue the process and put it in its new queue if it is runnable. */
+ RTS_SET(rp, RTS_SYS_LOCK);
rp->p_max_priority = rp->p_priority = new_q;
- RTS_LOCK_UNSET(rp, RTS_SYS_LOCK);
+ RTS_UNSET(rp, RTS_SYS_LOCK);
return(OK);
}
if (!RTS_ISSET(rp, RTS_NO_PRIV) || priv(rp)->s_proc_nr == NONE) {
return(EPERM);
}
- RTS_LOCK_UNSET(rp, RTS_NO_PRIV);
+ RTS_UNSET(rp, RTS_NO_PRIV);
return(OK);
case SYS_PRIV_DISALLOW:
/* Disallow process from running. */
if (RTS_ISSET(rp, RTS_NO_PRIV)) return(EPERM);
- RTS_LOCK_SET(rp, RTS_NO_PRIV);
+ RTS_SET(rp, RTS_NO_PRIV);
return(OK);
case SYS_PRIV_SET_SYS:
* should not also install signal handlers *and* expect POSIX compliance.
*/
if (action == RC_STOP && (flags & RC_DELAY)) {
- RTS_LOCK_SET(rp, RTS_SYS_LOCK);
+ RTS_SET(rp, RTS_SYS_LOCK);
if (RTS_ISSET(rp, RTS_SENDING) || (rp->p_misc_flags & MF_SC_DEFER))
rp->p_misc_flags |= MF_SIG_DELAY;
delayed = (rp->p_misc_flags & MF_SIG_DELAY);
- RTS_LOCK_UNSET(rp, RTS_SYS_LOCK);
+ RTS_UNSET(rp, RTS_SYS_LOCK);
if (delayed) return(EBUSY);
}
/* Either set or clear the stop flag. */
switch (action) {
case RC_STOP:
- RTS_LOCK_SET(rp, RTS_PROC_STOP);
+ RTS_SET(rp, RTS_PROC_STOP);
break;
case RC_RESUME:
- RTS_LOCK_UNSET(rp, RTS_PROC_STOP);
+ RTS_UNSET(rp, RTS_PROC_STOP);
break;
default:
return(EINVAL);
vmassert(!RTS_ISSET(caller, RTS_VMREQTARGET));
vmassert(!RTS_ISSET(dst, RTS_VMREQUEST));
vmassert(!RTS_ISSET(dst, RTS_VMREQTARGET));
- RTS_LOCK_SET(caller, RTS_VMREQUEST);
- RTS_LOCK_SET(dst, RTS_VMREQTARGET);
+ RTS_SET(caller, RTS_VMREQUEST);
+ RTS_SET(dst, RTS_VMREQTARGET);
/* Map to the destination. */
caller->p_vmrequest.req_type = req_type;
/* Connect caller on vmrequest wait queue. */
if(!(caller->p_vmrequest.nextrequestor = vmrequest))
- lock_notify(SYSTEM, VM_PROC_NR);
+ mini_notify(proc_addr(SYSTEM), VM_PROC_NR);
vmrequest = caller;
return OK;
* process with a notification message from CLOCK.
*/
int proc_nr_e = tmr_arg(tp)->ta_int; /* get process number */
- lock_notify(CLOCK, proc_nr_e); /* notify process */
+ mini_notify(proc_addr(CLOCK), proc_nr_e); /* notify process */
}
#endif /* USE_SETALARM */
if (isemptyp(rp)) return(EINVAL);
switch (tr_request) {
case T_STOP: /* stop process */
- RTS_LOCK_SET(rp, RTS_P_STOP);
+ RTS_SET(rp, RTS_P_STOP);
rp->p_reg.psw &= ~TRACEBIT; /* clear trace bit */
rp->p_misc_flags &= ~MF_SC_TRACE; /* clear syscall trace flag */
return(OK);
/* fall through */
case T_RESUME: /* resume execution */
- RTS_LOCK_UNSET(rp, RTS_P_STOP);
+ RTS_UNSET(rp, RTS_P_STOP);
m_ptr->CTL_DATA = 0;
break;
case T_STEP: /* set trace bit */
rp->p_reg.psw |= TRACEBIT;
- RTS_LOCK_UNSET(rp, RTS_P_STOP);
+ RTS_UNSET(rp, RTS_P_STOP);
m_ptr->CTL_DATA = 0;
break;
case T_SYSCALL: /* trace system call */
rp->p_misc_flags |= MF_SC_TRACE;
- RTS_LOCK_UNSET(rp, RTS_P_STOP);
+ RTS_UNSET(rp, RTS_P_STOP);
m_ptr->CTL_DATA = 0;
break;
}
}
- /* Perform actual device I/O for byte, word, and long values. Note that
- * the entire switch is wrapped in lock() and unlock() to prevent the I/O
- * batch from being interrupted.
- */
- lock;
+ /* Perform actual device I/O for byte, word, and long values */
switch (io_type) {
case _DIO_BYTE: /* byte values */
if (io_in) for (i=0; i<vec_size; i++)
}
}
}
- unlock;
/* Almost done, copy back results for input requests. */
if (io_in)
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_CLEAR_PAGEFAULT:
- RTS_LOCK_UNSET(p, RTS_PAGEFAULT);
+ RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
/* Send VM the information about the memory request. */
#endif
vmassert(RTS_ISSET(target, RTS_VMREQTARGET));
- RTS_LOCK_UNSET(target, RTS_VMREQTARGET);
+ RTS_UNSET(target, RTS_VMREQTARGET);
switch(p->p_vmrequest.type) {
case VMSTYPE_KERNELCALL:
p->p_vmrequest.type);
}
- RTS_LOCK_UNSET(p, RTS_VMREQUEST);
+ RTS_UNSET(p, RTS_VMREQUEST);
return OK;
case VMCTL_ENABLE_PAGING:
- /*
- * system task must not get preempted while switching to paging,
- * interrupt handling is not safe
- */
- lock;
if(vm_running)
minix_panic("do_vmctl: paging already enabled", NO_NUM);
vm_init(p);
vmassert(p->p_delivermsg_lin ==
umap_local(p, D, p->p_delivermsg_vir, sizeof(message)));
if ((err = arch_enable_paging()) != OK) {
- unlock;
return err;
}
if(newmap(caller, p, (struct mem_map *) m_ptr->SVMCTL_VALUE) != OK)
minix_panic("do_vmctl: newmap failed", NO_NUM);
FIXLINMSG(p);
vmassert(p->p_delivermsg_lin);
- unlock;
return OK;
case VMCTL_KERN_PHYSMAP:
{