*/
if (is_nested) {
/*
- * if a problem occured while copying a message from userspace because
+ * if a problem occurred while copying a message from userspace because
* of a wrong pointer supplied by userland, handle it the only way we
* can handle it ...
*/
/*
* struct used internally by memory.c to keep a list of
- * items to map. These should be staticaly allocated
+ * items to map. These should be statically allocated
* in the individual files and passed as argument.
* The data doesn't need to be initialized. See omap_serial for
* and example usage.
/*
* Request a physical mapping and put the result in the given prt
- * Note that ptr will only be valid once the callback happend.
+ * Note that ptr will only be valid once the callback happened.
*/
int kern_phys_map_ptr( phys_bytes base_address, vir_bytes io_size,
kern_phys_map * priv, vir_bytes ptr);
* The target (i.e. in-kernel) mapping area is one of the freepdes[]
* VM has earlier already told the kernel about that is available. It is
* identified as the 'pde' parameter. This value can be chosen freely
- * by the caller, as long as it is in range (i.e. 0 or higher and corresonds
+ * by the caller, as long as it is in range (i.e. 0 or higher and corresponds
* to a known freepde slot). It is up to the caller to keep track of which
* freepde's are in use, and to determine which ones are free to use.
*
/*
* Request a physical mapping and put the result in the given prt
- * Note that ptr will only be valid once the callback happend.
+ * Note that ptr will only be valid once the callback happened.
*/
int kern_phys_map_ptr(
phys_bytes base_address,
}
/* pre_init gets executed at the memory location where the kernel was loaded by the boot loader.
- * at that stage we only have a minium set of functionality present (all symbols gets renamed to
+ * at that stage we only have a minimum set of functionality present (all symbols gets renamed to
* ensure this). The following methods are used in that context. Once we jump to kmain they are no
* longer used and the "real" implementations are visible
*/
struct io_apic {
unsigned id;
vir_bytes addr; /* presently used address */
- phys_bytes paddr; /* where is it inphys space */
- vir_bytes vaddr; /* adress after paging s on */
+ phys_bytes paddr; /* where is it in phys space */
+ vir_bytes vaddr; /* address after paging is on */
unsigned pins;
unsigned gsi_base;
};
* The target (i.e. in-kernel) mapping area is one of the freepdes[]
* VM has earlier already told the kernel about that is available. It is
* identified as the 'pde' parameter. This value can be chosen freely
- * by the caller, as long as it is in range (i.e. 0 or higher and corresonds
+ * by the caller, as long as it is in range (i.e. 0 or higher and corresponds
* to a known freepde slot). It is up to the caller to keep track of which
* freepde's are in use, and to determine which ones are free to use.
*
movl tmp, STREG(pptr)
/*
- * restore kernel segments. %cs is aready set and %fs, %gs are not used */
+ * restore kernel segments. %cs is already set and %fs, %gs are not used */
#define RESTORE_KERNEL_SEGS \
mov $KERN_DS_SELECTOR, %si ;\
mov %si, %ds ;\
int timer_int_handler(void);
int init_local_timer(unsigned freq);
-/* sto p the local timer ticking */
+/* stop the local timer ticking */
void stop_local_timer(void);
/* let the time tick again with the original settings after it was stopped */
void restart_local_timer(void);
#endif
#ifdef USE_APIC
-EXTERN int config_no_apic; /* optionaly turn off apic */
+EXTERN int config_no_apic; /* optionally turn off apic */
EXTERN int config_apic_timer_x; /* apic timer slowdown factor */
#endif
#define cpu_get_freq(cpu) cpu_hz[cpu]
#ifdef CONFIG_SMP
-EXTERN int config_no_smp; /* optionaly turn off SMP */
+EXTERN int config_no_smp; /* optionally turn off SMP */
#endif
/* VM */
* rm_irq_handler: deregister an interrupt handler.
* irq_handle: handle a hardware interrupt.
* called by the system dependent part when an
- * external interrupt occures.
+ * external interrupt occurs.
* enable_irq: enable hook for IRQ.
* disable_irq: disable hook for IRQ.
*/
/* Assign privilege structure. Force a static privilege id. */
(void) get_priv(rp, static_priv_id(proc_nr));
- /* Priviliges for kernel tasks. */
+ /* Privileges for kernel tasks. */
if(proc_nr == VM_PROC_NR) {
priv(rp)->s_flags = VM_F;
priv(rp)->s_trap_mask = SRV_T;
ipc_to_m = TSK_M; /* allowed targets */
kcalls = TSK_KC; /* allowed kernel calls */
}
- /* Priviliges for the root system process. */
+ /* Privileges for the root system process. */
else {
assert(isrootsysn(proc_nr));
priv(rp)->s_flags= RSYS_F; /* privilege flags */
struct priv *sp;
int i;
- /* Clear the process table. Anounce each slot as empty and set up
+ /* Clear the process table. Announce each slot as empty and set up
* mappings for proc_addr() and proc_nr() macros. Do the same for the
* table with privilege structures for the system processes.
*/
/* This function is called whenever there is no work to do.
* Halt the CPU, and measure how many timestamp counter ticks are
* spent not doing anything. This allows test setups to measure
- * the CPU utiliziation of certain workloads with high precision.
+ * the CPU utilization of certain workloads with high precision.
*/
p = get_cpulocal_var(proc_ptr) = get_cpulocal_var_ptr(idle_proc);
{
/* Check for deadlock. This can happen if 'caller_ptr' and 'src_dst' have
* a cyclic dependency of blocking send and receive calls. The only cyclic
- * depency that is not fatal is if the caller and target directly SEND(REC)
+ * dependency that is not fatal is if the caller and target directly SEND(REC)
* and RECEIVE to each other. If a deadlock is found, the group size is
* returned. Otherwise zero is returned.
*/
IPC_STATUS_ADD_CALL(caller_ptr, call);
/*
- * if the message is originaly from the kernel on behalf of this
+ * if the message is originally from the kernel on behalf of this
* process, we must send the status flags accordingly
*/
if (sender->p_misc_flags & MF_SENDING_FROM_KERNEL) {
struct proc *p_scheduler; /* who should get out of quantum msg */
unsigned p_cpu; /* what CPU is the process running on */
#ifdef CONFIG_SMP
- bitchunk_t p_cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is hte
+ bitchunk_t p_cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is the
process allowed to
run on */
bitchunk_t p_stale_tlb[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* On which cpu are
* memory that isn't present, VM has to fix it. Until it has asked
* what needs to be done and fixed it, save necessary state here.
*
- * The requestor gets a copy of its request message in reqmsg and gets
+ * The requester gets a copy of its request message in reqmsg and gets
* VMREQUEST set.
*/
struct {
BKL_UNLOCK();
while (ap_cpus_booted != (n - 1))
arch_pause();
- /* now we have to take the lock again as we continu execution */
+ /* now we have to take the lock again as we continue execution */
BKL_LOCK();
}
EXTERN unsigned ncpus;
/* Number of virtual strands per physical core */
EXTERN unsigned ht_per_core;
-/* which cpu is bootstraping */
+/* which cpu is bootstrapping */
EXTERN unsigned bsp_cpu_id;
#define cpu_is_bsp(cpu) (bsp_cpu_id == cpu)
void smp_schedule(unsigned cpu);
/* stop a processes on a different cpu */
void smp_schedule_stop_proc(struct proc * p);
-/* stop a process on a different cpu because its adress space is being changed */
+/* stop a process on a different cpu because its address space is being changed */
void smp_schedule_vminhibit(struct proc * p);
/* stop the process and for saving its full context */
void smp_schedule_stop_proc_save_ctx(struct proc * p);
}
/* Initialize the call vector to a safe default handler. Some system calls
- * may be disabled or nonexistant. Then explicitely map known calls to their
+ * may be disabled or nonexistant. Then explicitly map known calls to their
* handler functions. This is done with a macro that gives a compile error
* if an illegal call number is used. The ordering is not important here.
*/
*/
for (i=_SRC_; i<=_DST_; i++) {
int p;
- /* Check if process number was given implictly with SELF and is valid. */
+ /* Check if process number was given implicitly with SELF and is valid. */
if (vir_addr[i].proc_nr_e == SELF)
vir_addr[i].proc_nr_e = caller->p_endpoint;
if (vir_addr[i].proc_nr_e != NONE) {
/* If the parent is a privileged process, take away the privileges from the
* child process and inhibit it from running by setting the NO_PRIV flag.
- * The caller should explicitely set the new privileges before executing.
+ * The caller should explicitly set the new privileges before executing.
*/
if (priv(rpp)->s_flags & SYS_PROC) {
rpc->p_priv = priv_addr(USER_PRIV_ID);
struct priv priv;
int irq;
- /* Check whether caller is allowed to make this call. Privileged proceses
+ /* Check whether caller is allowed to make this call. Privileged processes
* can only update the privileges of processes that are inhibited from
* running by the RTS_NO_PRIV flag. This flag is set when a privileged process
* forks.
return r;
priv(rp)->s_flags |= CHECK_MEM; /* Check memory mappings */
- /* When restarting a driver, check if it already has the premission */
+ /* When restarting a driver, check if it already has the permission */
for (i = 0; i < priv(rp)->s_nr_mem_range; i++) {
if (priv(rp)->s_mem_tab[i].mr_base == mem_range.mr_base &&
priv(rp)->s_mem_tab[i].mr_limit == mem_range.mr_limit)
KERNEL, (vir_bytes) &irq, sizeof(irq));
priv(rp)->s_flags |= CHECK_IRQ; /* Check IRQs */
- /* When restarting a driver, check if it already has the premission */
+ /* When restarting a driver, check if it already has the permission */
for (i = 0; i < priv(rp)->s_nr_irq; i++) {
if (priv(rp)->s_irq_tab[i] == irq)
return OK;
/* Perform a series of device I/O on behalf of a non-kernel process. The
* I/O addresses and I/O values are fetched from and returned to some buffer
* in user space. The actual I/O is wrapped by lock() and unlock() to prevent
- * that I/O batch from being interrrupted.
+ * that I/O batch from being interrupted.
* This is the counterpart of do_devio, which performs a single device I/O.
*/
int vec_size; /* size of vector */
/*
- * This is arch independent NMI watchdog implementaion part. It is used to
+ * This is arch independent NMI watchdog implementation part. It is used to
* detect kernel lockups and help debugging. each architecture must add its own
* low level code that triggers periodic checks
*/
/* if the watchdog detects lockup, let the arch code to handle it */
void arch_watchdog_lockup(const struct nmi_frame * frame);
-/* generic NMI handler. Takes one agument which points to where the arch
+/* generic NMI handler. Takes one argument which points to where the arch
* specific low level handler dumped CPU information and can be inspected by the
- * arch specific code of the watchdog implementaion */
+ * arch specific code of the watchdog implementation */
void nmi_watchdog_handler(struct nmi_frame * frame);
#endif
unsigned priority; /* the process' current priority */
unsigned time_slice; /* this process's time slice */
unsigned cpu; /* what CPU is the process running on */
- bitchunk_t cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is hte
+ bitchunk_t cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is the
process allowed
to run on */
} schedproc[NR_PROCS];