]> Zhao Yanbai Git Server - minix.git/commitdiff
ARM support for kernel and vm
authorArun Thomas <arun@minix3.org>
Mon, 8 Oct 2012 01:38:03 +0000 (21:38 -0400)
committerArun Thomas <arun@minix3.org>
Mon, 8 Oct 2012 01:38:03 +0000 (21:38 -0400)
57 files changed:
include/arch/arm/include/vm.h
kernel/arch/arm/Makefile.inc [new file with mode: 0644]
kernel/arch/arm/arch_clock.c [new file with mode: 0644]
kernel/arch/arm/arch_do_vmctl.c [new file with mode: 0644]
kernel/arch/arm/arch_reset.c [new file with mode: 0644]
kernel/arch/arm/arch_system.c [new file with mode: 0644]
kernel/arch/arm/direct_tty_utils.c [new file with mode: 0644]
kernel/arch/arm/exc.S [new file with mode: 0644]
kernel/arch/arm/exception.c [new file with mode: 0644]
kernel/arch/arm/glo.h [new file with mode: 0644]
kernel/arch/arm/head.S [new file with mode: 0644]
kernel/arch/arm/include/arch_clock.h [new file with mode: 0644]
kernel/arch/arm/include/arch_proto.h [new file with mode: 0644]
kernel/arch/arm/include/arch_watchdog.h [new file with mode: 0644]
kernel/arch/arm/include/archconst.h [new file with mode: 0644]
kernel/arch/arm/include/cpufunc.h [new file with mode: 0644]
kernel/arch/arm/include/direct_utils.h [new file with mode: 0644]
kernel/arch/arm/include/hw_intr.h [new file with mode: 0644]
kernel/arch/arm/include/io.h [new file with mode: 0644]
kernel/arch/arm/io_intr.S [new file with mode: 0644]
kernel/arch/arm/kernel.lds [new file with mode: 0644]
kernel/arch/arm/klib.S [new file with mode: 0644]
kernel/arch/arm/memory.c [new file with mode: 0644]
kernel/arch/arm/mpx.S [new file with mode: 0644]
kernel/arch/arm/omap_intr.c [new file with mode: 0644]
kernel/arch/arm/omap_intr.h [new file with mode: 0644]
kernel/arch/arm/omap_serial.c [new file with mode: 0644]
kernel/arch/arm/omap_serial.h [new file with mode: 0644]
kernel/arch/arm/omap_timer.c [new file with mode: 0644]
kernel/arch/arm/omap_timer.h [new file with mode: 0644]
kernel/arch/arm/pg_utils.c [new file with mode: 0644]
kernel/arch/arm/phys_copy.S [new file with mode: 0644]
kernel/arch/arm/phys_memset.S [new file with mode: 0644]
kernel/arch/arm/pre_init.c [new file with mode: 0644]
kernel/arch/arm/procoffsets.cf [new file with mode: 0644]
kernel/arch/arm/protect.c [new file with mode: 0644]
kernel/arch/arm/sconst.h [new file with mode: 0644]
kernel/arch/arm/serial.h [new file with mode: 0644]
kernel/arch/arm/timer.h [new file with mode: 0644]
kernel/arch/i386/arch_clock.c
kernel/arch/i386/include/arch_clock.h
kernel/clock.c
kernel/debug.c
kernel/proc.c
kernel/system.c
kernel/system/Makefile.inc
kernel/system/do_fork.c
kernel/system/do_sigreturn.c
kernel/system/do_trace.c
lib/libc/arch/arm/sys-minix/_ipc.S
lib/libc/arch/arm/sys-minix/_senda.S
servers/vm/arch/arm/Makefile.inc [new file with mode: 0644]
servers/vm/arch/arm/memory.h [new file with mode: 0644]
servers/vm/arch/arm/pagefaults.h [new file with mode: 0644]
servers/vm/arch/arm/pagetable.c [new file with mode: 0644]
servers/vm/arch/arm/pagetable.h [new file with mode: 0644]
servers/vm/proto.h

index 974ce4743b10dbaf0f05c6b1f3a9b752163fa148..3800d907b68b69a347cabcff2637b1b3e20b2c96 100644 (file)
@@ -116,7 +116,14 @@ arm/vm.h
                                        */
 #define ARM_VM_PFE_TLB_CONFLICT   0x10 /* Caused by TLB conflict abort
                                        */
-#define ARM_VM_PFE_W   (1<<11) /* Caused by write (otherwise read) */
+
+#define ARM_VM_PFE_W     (1<<11)  /* Caused by write (otherwise read) */
+#define ARM_VM_PFE_FS4    (1<<10)  /* Fault status (bit 4) */
+#define ARM_VM_PFE_FS3_0   0xf     /* Fault status (bits 3:0) */
+
+/* Fault status */
+#define ARM_VM_PFE_FS(s) \
+    ((((s) & ARM_VM_PFE_FS4) >> 6) | ((s) & ARM_VM_PFE_FS3_0))
 
 #ifndef __ASSEMBLY__
 
diff --git a/kernel/arch/arm/Makefile.inc b/kernel/arch/arm/Makefile.inc
new file mode 100644 (file)
index 0000000..dca10e4
--- /dev/null
@@ -0,0 +1,58 @@
+
+# Makefile for arch-dependent kernel code
+.include <bsd.own.mk>
+
+HERE=${.CURDIR}/arch/${MACHINE_ARCH}
+.PATH: ${HERE}
+
+# objects we want unpaged from -lminlib, -lminc
+MINLIB_OBJS_UNPAGED=get_bp.o
+MINC_OBJS_UNPAGED=strcat.o strlen.o memcpy.o strcpy.o strncmp.o memset.o \
+       memmove.o strcmp.o atoi.o ctype_.o _stdfile.o strtol.o _errno.o errno.o
+#      udivdi3.o umoddi3.o qdivrem.o
+SYS_OBJS_UNPAGED=kprintf.o vprintf.o assert.o stacktrace.o
+LIBGCC_OBJS_UNPAGED=_divsi3.o _udivsi3.o _divdi3.o  _udivdi3.o _umoddi3.o \
+       _dvmd_tls.o _aeabi_uldivmod.o _clzsi2.o bpabi.o
+
+# some object files we give a symbol prefix (or namespace) of __k_unpaged_
+# that must live in their own unique namespace.
+#
+.for UNPAGED_OBJ in head.o pre_init.o direct_tty_utils.o \
+       pg_utils.o klib.o omap_serial.o utility.o arch_reset.o \
+       $(MINLIB_OBJS_UNPAGED) $(MINC_OBJS_UNPAGED) $(SYS_OBJS_UNPAGED) $(LIBGCC_OBJS_UNPAGED)
+unpaged_${UNPAGED_OBJ}: ${UNPAGED_OBJ}
+       ${OBJCOPY} --prefix-symbols=__k_unpaged_ ${UNPAGED_OBJ} unpaged_${UNPAGED_OBJ}
+UNPAGED_OBJS += unpaged_${UNPAGED_OBJ}
+ORIG_UNPAGED_OBJS += ${UNPAGED_OBJ}
+.endfor
+
+# we have to extract some object files from libminc.a and libminlib.a
+$(MINLIB_OBJS_UNPAGED) $(MINC_OBJS_UNPAGED) $(SYS_OBJS_UNPAGED) $(LIBGCC_OBJS_UNPAGED): $(LIBMINLIB) $(LIBMINC) $(LIBSYS) $(LIBGCC)
+       ${AR} x $(LIBMINLIB) $(MINLIB_OBJS_UNPAGED)
+       ${AR} x $(LIBMINC) $(MINC_OBJS_UNPAGED)
+       ${AR} x $(LIBSYS) $(SYS_OBJS_UNPAGED)
+       ${AR} x $(LIBGCC) $(LIBGCC_OBJS_UNPAGED)
+
+CLEANFILES+= $(ORIG_UNPAGED_OBJS)
+
+SRCS+= mpx.S arch_clock.c arch_do_vmctl.c arch_system.c \
+       omap_serial.c omap_timer.c omap_intr.c exception.c \
+       io_intr.S klib.S memory.c \
+       protect.c direct_tty_utils.c arch_reset.c \
+       pg_utils.c phys_copy.S phys_memset.S exc.S
+OBJS.kernel+=  ${UNPAGED_OBJS}
+
+klib.d mpx.d head.d: procoffsets.h
+
+# It's OK to hardcode the arch as arm here as this and procoffsets.cf
+# are arm-specific.
+TMP=procoffsets.h.tmp
+INCLS=../include/arch/arm/include/
+PROCOFFSETSCF=procoffsets.cf
+
+procoffsets.h: $(PROCOFFSETSCF) kernel.h proc.h $(INCLS)/stackframe.h $(INCLS)/archtypes.h
+       ${_MKTARGET_CREATE}
+       cat ${HERE}/$(PROCOFFSETSCF) | \
+         ${TOOL_GENASSYM} -- ${CC} ${CFLAGS} ${CPPFLAGS} ${PROF} >$TMP && \
+         mv -f $TMP $@ 
+
diff --git a/kernel/arch/arm/arch_clock.c b/kernel/arch/arm/arch_clock.c
new file mode 100644 (file)
index 0000000..0375963
--- /dev/null
@@ -0,0 +1,141 @@
+
+/* ARM-specific clock functions. */
+
+#include "kernel/kernel.h"
+
+#include "kernel/clock.h"
+#include "kernel/proc.h"
+#include "kernel/interrupt.h"
+#include <minix/u64.h>
+#include "glo.h"
+#include "profile.h"
+
+
+#include "spinlock.h"
+
+#ifdef CONFIG_SMP
+#include "kernel/smp.h"
+#endif
+
+#include "omap_timer.h"
+#include "omap_intr.h"
+
+static unsigned tsc_per_ms[CONFIG_MAX_CPUS];
+
+int init_local_timer(unsigned freq)
+{
+       omap3_timer_init(freq);
+       /* always only 1 cpu in the system */
+       tsc_per_ms[0] = 1;
+
+       return 0;
+}
+
+void stop_local_timer(void)
+{
+       omap3_timer_stop();
+}
+
+void arch_timer_int_handler(void)
+{
+       omap3_timer_int_handler();
+}
+
+void cycles_accounting_init(void)
+{
+       read_tsc_64(get_cpu_var_ptr(cpu, tsc_ctr_switch));
+
+       make_zero64(get_cpu_var(cpu, cpu_last_tsc));
+       make_zero64(get_cpu_var(cpu, cpu_last_idle));
+}
+
+void context_stop(struct proc * p)
+{
+       u64_t tsc, tsc_delta;
+       u64_t * __tsc_ctr_switch = get_cpulocal_var_ptr(tsc_ctr_switch);
+
+       read_tsc_64(&tsc);
+       p->p_cycles = add64(p->p_cycles, sub64(tsc, *__tsc_ctr_switch));
+
+       tsc_delta = sub64(tsc, *__tsc_ctr_switch);
+
+       if(kbill_ipc) {
+               kbill_ipc->p_kipc_cycles =
+                       add64(kbill_ipc->p_kipc_cycles, tsc_delta);
+               kbill_ipc = NULL;
+       }
+
+       if(kbill_kcall) {
+               kbill_kcall->p_kcall_cycles =
+                       add64(kbill_kcall->p_kcall_cycles, tsc_delta);
+               kbill_kcall = NULL;
+       }
+
+       /*
+        * deduct the just consumed cpu cycles from the cpu time left for this
+        * process during its current quantum. Skip IDLE and other pseudo kernel
+        * tasks
+        */
+       if (p->p_endpoint >= 0) {
+#if DEBUG_RACE
+               make_zero64(p->p_cpu_time_left);
+#else
+               /* if (tsc_delta < p->p_cpu_time_left) in 64bit */
+               if (ex64hi(tsc_delta) < ex64hi(p->p_cpu_time_left) ||
+                               (ex64hi(tsc_delta) == ex64hi(p->p_cpu_time_left) &&
+                                ex64lo(tsc_delta) < ex64lo(p->p_cpu_time_left)))
+               {
+                       p->p_cpu_time_left = sub64(p->p_cpu_time_left, tsc_delta);
+               }
+               else {
+                       make_zero64(p->p_cpu_time_left);
+               }
+#endif
+       }
+
+       *__tsc_ctr_switch = tsc;
+}
+
+void context_stop_idle(void)
+{
+       int is_idle;
+#ifdef CONFIG_SMP
+       unsigned cpu = cpuid;
+#endif
+
+       is_idle = get_cpu_var(cpu, cpu_is_idle);
+       get_cpu_var(cpu, cpu_is_idle) = 0;
+
+       context_stop(get_cpulocal_var_ptr(idle_proc));
+
+       if (is_idle)
+               restart_local_timer();
+#if SPROFILE
+       if (sprofiling)
+               get_cpulocal_var(idle_interrupted) = 1;
+#endif
+}
+
+void restart_local_timer(void)
+{
+}
+
+int register_local_timer_handler(const irq_handler_t handler)
+{
+       return omap3_register_timer_handler(handler);
+}
+
+u64_t ms_2_cpu_time(unsigned ms)
+{
+       return mul64u(tsc_per_ms[cpuid], ms);
+}
+
+unsigned cpu_time_2_ms(u64_t cpu_time)
+{
+       return div64u(cpu_time, tsc_per_ms[cpuid]);
+}
+
+short cpu_load(void)
+{
+       return 0;
+}
diff --git a/kernel/arch/arm/arch_do_vmctl.c b/kernel/arch/arm/arch_do_vmctl.c
new file mode 100644 (file)
index 0000000..26f563d
--- /dev/null
@@ -0,0 +1,58 @@
+/* The kernel call implemented in this file:
+ *   m_type:   SYS_VMCTL
+ *
+ * The parameters for this kernel call are:
+ *     SVMCTL_WHO      which process
+ *     SVMCTL_PARAM    set this setting (VMCTL_*)
+ *     SVMCTL_VALUE    to this value
+ */
+
+#include "kernel/system.h"
+#include <assert.h>
+#include <minix/type.h>
+
+#include "arch_proto.h"
+
+static void set_ttbr(struct proc *p, u32_t ttbr, u32_t *v)
+{
+       /* Set process TTBR. */
+       p->p_seg.p_ttbr = ttbr;
+       assert(p->p_seg.p_ttbr);
+       p->p_seg.p_ttbr_v = v;
+       if(p == get_cpulocal_var(ptproc)) {
+               write_ttbr0(p->p_seg.p_ttbr);
+       }
+       if(p->p_nr == VM_PROC_NR) {
+               if (arch_enable_paging(p) != OK)
+                       panic("arch_enable_paging failed");
+       }
+       RTS_UNSET(p, RTS_VMINHIBIT);
+}
+
+/*===========================================================================*
+ *                             arch_do_vmctl                                *
+ *===========================================================================*/
+int arch_do_vmctl(m_ptr, p)
+register message *m_ptr;       /* pointer to request message */
+struct proc *p;
+{
+  switch(m_ptr->SVMCTL_PARAM) {
+       case VMCTL_GET_PDBR:
+               /* Get process page directory base reg (TTBR). */
+               m_ptr->SVMCTL_VALUE = p->p_seg.p_ttbr;
+               return OK;
+       case VMCTL_SETADDRSPACE:
+               set_ttbr(p, m_ptr->SVMCTL_PTROOT, (u32_t *) m_ptr->SVMCTL_PTROOT_V);
+               return OK;
+       case VMCTL_FLUSHTLB:
+       {
+               reload_ttbr0();
+               return OK;
+       }
+  }
+
+
+
+  printf("arch_do_vmctl: strange param %d\n", m_ptr->SVMCTL_PARAM);
+  return EINVAL;
+}
diff --git a/kernel/arch/arm/arch_reset.c b/kernel/arch/arm/arch_reset.c
new file mode 100644 (file)
index 0000000..66232ba
--- /dev/null
@@ -0,0 +1,45 @@
+
+#include "kernel/kernel.h"
+
+#include <unistd.h>
+#include <ctype.h>
+#include <string.h>
+#include <machine/cpu.h>
+#include <assert.h>
+#include <signal.h>
+#include <machine/vm.h>
+
+#include <minix/u64.h>
+
+#include "archconst.h"
+#include "arch_proto.h"
+#include "serial.h"
+#include "kernel/proc.h"
+#include "kernel/debug.h"
+#include "direct_utils.h"
+#include <machine/multiboot.h>
+
+void halt_cpu(void)
+{
+    asm volatile("dsb");
+    asm volatile("wfi");
+}
+
+void
+reset(void)
+{
+    while (1);
+}
+
+__dead void arch_shutdown(int how)
+{
+    while (1);
+}
+
+#ifdef DEBUG_SERIAL
+void ser_putc(char c)
+{
+    omap3_ser_putc(c);
+}
+
+#endif
diff --git a/kernel/arch/arm/arch_system.c b/kernel/arch/arm/arch_system.c
new file mode 100644 (file)
index 0000000..7e19224
--- /dev/null
@@ -0,0 +1,182 @@
+/* system dependent functions for use inside the whole kernel. */
+
+#include "kernel/kernel.h"
+
+#include <unistd.h>
+#include <ctype.h>
+#include <string.h>
+#include <minix/cpufeature.h>
+#include <assert.h>
+#include <signal.h>
+#include <machine/vm.h>
+
+#include <minix/u64.h>
+
+#include "archconst.h"
+#include "arch_proto.h"
+#include "serial.h"
+#include "kernel/proc.h"
+#include "kernel/debug.h"
+
+#include "glo.h"
+
+void * k_stacks;
+
+static void ser_init(void);
+
+void fpu_init(void)
+{
+}
+
+void save_local_fpu(struct proc *pr, int retain)
+{
+}
+
+void save_fpu(struct proc *pr)
+{
+}
+
+void arch_proc_reset(struct proc *pr)
+{
+       assert(pr->p_nr < NR_PROCS);
+
+       /* Clear process state. */
+        memset(&pr->p_reg, 0, sizeof(pr->p_reg));
+        if(iskerneln(pr->p_nr))
+               pr->p_reg.psr = INIT_TASK_PSR;
+        else
+               pr->p_reg.psr = INIT_PSR;
+}
+
+void arch_proc_setcontext(struct proc *p, struct stackframe_s *state, int isuser)
+{
+}
+
+void arch_set_secondary_ipc_return(struct proc *p, u32_t val)
+{
+       p->p_reg.r1 = val;
+}
+
+int restore_fpu(struct proc *pr)
+{
+       return 0;
+}
+
+void cpu_identify(void)
+{
+       u32_t midr;
+       unsigned cpu = cpuid;
+
+       asm volatile("mrc p15, 0, %[midr], c0, c0, 0 @ read MIDR\n\t"
+                    : [midr] "=r" (midr));
+
+       cpu_info[cpu].implementer = midr >> 24;
+       cpu_info[cpu].variant = (midr >> 20) & 0xF;
+       cpu_info[cpu].arch = (midr >> 16) & 0xF;
+       cpu_info[cpu].part = (midr >> 4) & 0xFFF;
+       cpu_info[cpu].revision = midr & 0xF;
+}
+
+void arch_init(void)
+{
+       k_stacks = (void*) &k_stacks_start;
+       assert(!((vir_bytes) k_stacks % K_STACK_SIZE));
+
+#ifndef CONFIG_SMP
+       /*
+        * use stack 0 and cpu id 0 on a single processor machine, SMP
+        * configuration does this in smp_init() for all cpus at once
+        */
+       tss_init(0, get_k_stack_top(0));
+#endif
+
+       ser_init();
+}
+
+/*===========================================================================*
+ *                             do_ser_debug                                 * 
+ *===========================================================================*/
+void do_ser_debug()
+{
+}
+
+void arch_do_syscall(struct proc *proc)
+{
+  /* do_ipc assumes that it's running because of the current process */
+  assert(proc == get_cpulocal_var(proc_ptr));
+  /* Make the system call, for real this time. */
+  proc->p_reg.retreg =
+         do_ipc(proc->p_reg.retreg, proc->p_reg.r1, proc->p_reg.r2);
+}
+
+reg_t svc_stack;
+
+struct proc * arch_finish_switch_to_user(void)
+{
+       char * stk;
+       struct proc * p;
+
+#ifdef CONFIG_SMP
+       stk = (char *)tss[cpuid].sp0;
+#else
+       stk = (char *)tss[0].sp0;
+#endif
+       svc_stack = (reg_t)stk;
+       /* set pointer to the process to run on the stack */
+       p = get_cpulocal_var(proc_ptr);
+       *((reg_t *)stk) = (reg_t) p;
+
+       /* make sure I bit is clear in PSR so that interrupts won't be disabled
+        * once p's context is restored. this should not be possible.
+        */
+        assert(!(p->p_reg.psr & PSR_I));
+
+       return p;
+}
+
+void fpu_sigcontext(struct proc *pr, struct sigframe *fr, struct sigcontext *sc)
+{
+}
+
+reg_t arch_get_sp(struct proc *p) { return p->p_reg.sp; }
+
+void get_randomness(struct k_randomness *rand, int source)
+{
+}
+
+static void ser_init(void)
+{
+}
+
+/*===========================================================================*/
+/*                           __switch_address_space                         */
+/*===========================================================================*/
+/*
+ * sets the ttbr register to the supplied value if it is not already set to the
+ * same value in which case it would only result in an extra TLB flush which is
+ * not desirable
+ */
+void __switch_address_space(struct proc *p, struct proc **__ptproc)
+{
+       reg_t orig_ttbr, new_ttbr;
+
+       new_ttbr = p->p_seg.p_ttbr;
+       if (new_ttbr == 0)
+           return;
+
+       orig_ttbr = read_ttbr0();
+
+       /*
+        * test if ttbr is loaded with the current value to avoid unnecessary
+        * TLB flushes
+        */
+       if (new_ttbr == orig_ttbr)
+           return;
+
+       refresh_tlb();
+       write_ttbr0(new_ttbr);
+
+       *__ptproc = p;
+
+       return;
+}
diff --git a/kernel/arch/arm/direct_tty_utils.c b/kernel/arch/arm/direct_tty_utils.c
new file mode 100644 (file)
index 0000000..9862cf2
--- /dev/null
@@ -0,0 +1,42 @@
+
+#include "kernel.h"
+#include <minix/minlib.h>
+#include <minix/const.h>
+#include <minix/cpufeature.h>
+#include <minix/types.h>
+#include <minix/type.h>
+#include <minix/com.h>
+#include <sys/param.h>
+#include <libexec.h>
+#include "string.h"
+#include "arch_proto.h"
+#include "libexec.h"
+#include "direct_utils.h"
+#include "serial.h"
+#include "glo.h"
+#include <machine/multiboot.h>
+
+void direct_cls(void)
+{
+    /* Do nothing */
+}
+
+void direct_print_char(char c)
+{
+       if(c == '\n')
+               ser_putc('\r');
+       ser_putc(c);
+}
+
+void direct_print(const char *str)
+{
+       while (*str) {
+               direct_print_char(*str);
+               str++;
+       }
+}
+
+int direct_read_char(unsigned char *ch)
+{
+       return 0;
+}
diff --git a/kernel/arch/arm/exc.S b/kernel/arch/arm/exc.S
new file mode 100644 (file)
index 0000000..743102c
--- /dev/null
@@ -0,0 +1,22 @@
+#include <machine/asm.h>
+
+IMPORT(undefined_inst_entry)
+IMPORT(svc_entry)
+IMPORT(prefetch_abort_entry)
+IMPORT(data_abort_entry)
+IMPORT(irq_entry)
+
+.text
+.balign        4096
+LABEL(exc_vector_table)
+    ldr pc, =invalid_exc           /* Reset */
+    ldr pc, =undefined_inst_entry  /* Undefined Instruction */
+    ldr pc, =svc_entry             /* Supervisor Call */
+    ldr pc, =prefetch_abort_entry  /* Prefetch Abort */
+    ldr pc, =data_abort_entry      /* Data Abort */
+    ldr pc, =invalid_exc           /* Hypervisor Call */
+    ldr pc, =irq_entry             /* Interrupt */
+    ldr pc, =invalid_exc           /* Fast Interrupt */
+
+ENTRY(invalid_exc)
+       b .
diff --git a/kernel/arch/arm/exception.c b/kernel/arch/arm/exception.c
new file mode 100644 (file)
index 0000000..416dacb
--- /dev/null
@@ -0,0 +1,223 @@
+/* This file contains a simple exception handler.  Exceptions in user
+ * processes are converted to signals. Exceptions in a kernel task cause
+ * a panic.
+ */
+
+#include "kernel/kernel.h"
+#include "arch_proto.h"
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include "kernel/proc.h"
+#include "kernel/proto.h"
+#include <machine/vm.h>
+
+struct ex_s {
+       char *msg;
+       int signum;
+};
+
+static struct ex_s ex_data[] = {
+       { "Reset", 0},
+       { "Undefined instruction", SIGILL},
+       { "Supervisor call", 0},
+       { "Prefetch Abort", SIGILL},
+       { "Data Abort", SIGSEGV},
+       { "Hypervisor call", 0},
+       { "Interrupt", 0},
+       { "Fast Interrupt", 0},
+};
+
+static void inkernel_disaster(struct proc *saved_proc,
+       reg_t *saved_lr, struct ex_s *ep, int is_nested);
+
+extern int catch_pagefaults;
+
+static void proc_stacktrace_execute(struct proc *whichproc, reg_t v_bp, reg_t pc);
+
+static void pagefault( struct proc *pr,
+                       reg_t *saved_lr,
+                       int is_nested)
+{
+       int in_physcopy = 0, in_memset = 0;
+
+       reg_t pagefault_addr, pagefault_status;
+       message m_pagefault;
+       int err;
+
+       pagefault_addr = read_dfar();
+       pagefault_status = read_dfsr();
+
+#if 0
+       printf("kernel: pagefault in pr %d, addr 0x%lx, his ttbr 0x%lx, actual ttbr 0x%lx\n",
+               pr->p_endpoint, pagefault_addr, pr->p_seg.p_ttbr, read_ttbr0());
+#endif
+
+       in_physcopy = (*saved_lr > (vir_bytes) phys_copy) &&
+          (*saved_lr < (vir_bytes) phys_copy_fault);
+
+       in_memset = (*saved_lr > (vir_bytes) phys_memset) &&
+          (*saved_lr < (vir_bytes) memset_fault);
+
+       if((is_nested || iskernelp(pr)) &&
+               catch_pagefaults && (in_physcopy || in_memset)) {
+#if 0
+               printf("pf caught! addr 0x%lx\n", pagefault_addr);
+#endif
+               if (is_nested) {
+                       if(in_physcopy) {
+                               assert(!in_memset);
+                               *saved_lr = (reg_t) phys_copy_fault_in_kernel;
+                       } else {
+                               *saved_lr = (reg_t) memset_fault_in_kernel;
+                       }
+               }
+               else {
+                       pr->p_reg.pc = (reg_t) phys_copy_fault;
+                       pr->p_reg.retreg = pagefault_addr;
+               }
+
+               return;
+       }
+
+       if(is_nested) {
+               printf("pagefault in kernel at pc 0x%lx address 0x%lx\n",
+                       *saved_lr, pagefault_addr);
+               inkernel_disaster(pr, saved_lr, NULL, is_nested);
+       }
+
+       /* VM can't handle page faults. */
+       if(pr->p_endpoint == VM_PROC_NR) {
+               /* Page fault we can't / don't want to
+                * handle.
+                */
+               printf("pagefault for VM on CPU %d, "
+                       "pc = 0x%x, addr = 0x%x, flags = 0x%x, is_nested %d\n",
+                       cpuid, pr->p_reg.pc, pagefault_addr, pagefault_status,
+                       is_nested);
+               proc_stacktrace(pr);
+               printf("pc of pagefault: 0x%lx\n", pr->p_reg.pc);
+               panic("pagefault in VM");
+
+               return;
+       }
+
+       /* Don't schedule this process until pagefault is handled. */
+       RTS_SET(pr, RTS_PAGEFAULT);
+
+       /* tell Vm about the pagefault */
+       m_pagefault.m_source = pr->p_endpoint;
+       m_pagefault.m_type   = VM_PAGEFAULT;
+       m_pagefault.VPF_ADDR = pagefault_addr;
+       m_pagefault.VPF_FLAGS = pagefault_status;
+
+       if ((err = mini_send(pr, VM_PROC_NR,
+                                       &m_pagefault, FROM_KERNEL))) {
+               panic("WARNING: pagefault: mini_send returned %d\n", err);
+       }
+
+       return;
+}
+
+static void inkernel_disaster(struct proc *saved_proc,
+       reg_t *saved_lr, struct ex_s *ep,
+       int is_nested)
+{
+#if USE_SYSDEBUG
+  if(ep)
+       printf("\n%s\n", ep->msg);
+
+  printf("cpu %d is_nested = %d ", cpuid, is_nested);
+
+  if (saved_proc) {
+         printf("scheduled was: process %d (%s), ", saved_proc->p_endpoint, saved_proc->p_name);
+         printf("pc = 0x%x\n", (unsigned) saved_proc->p_reg.pc);
+         proc_stacktrace(saved_proc);
+
+         panic("Unhandled kernel exception");
+  }
+
+  /* in an early stage of boot process we don't have processes yet */
+  panic("exception in kernel while booting, no saved_proc yet");
+
+#endif /* USE_SYSDEBUG */
+}
+
+void exception_handler(int is_nested, reg_t *saved_lr, int vector)
+{
+/* An exception or unexpected interrupt has occurred. */
+  struct ex_s *ep;
+  struct proc *saved_proc;
+
+  saved_proc = get_cpulocal_var(proc_ptr);
+
+  ep = &ex_data[vector];
+
+  /*
+   * handle special cases for nested problems as they might be tricky or filter
+   * them out quickly if the traps are not nested
+   */
+  if (is_nested) {
+       /*
+        * if a problem occured while copying a message from userspace because
+        * of a wrong pointer supplied by userland, handle it the only way we
+        * can handle it ...
+        */
+       if (((void*)*saved_lr >= (void*)copy_msg_to_user &&
+                       (void*)*saved_lr <= (void*)__copy_msg_to_user_end) ||
+                       ((void*)*saved_lr >= (void*)copy_msg_from_user &&
+                       (void*)*saved_lr <= (void*)__copy_msg_from_user_end)) {
+               switch(vector) {
+               /* these error are expected */
+               case DATA_ABORT_VECTOR:
+                       *saved_lr = (reg_t) __user_copy_msg_pointer_failure;
+                       return;
+               default:
+                       panic("Copy involving a user pointer failed unexpectedly!");
+               }
+       }
+  }
+
+  if (vector == DATA_ABORT_VECTOR) {
+       pagefault(saved_proc, saved_lr, is_nested);
+       return;
+  }
+
+  /* If an exception occurs while running a process, the is_nested variable
+   * will be zero. Exceptions in interrupt handlers or system traps will make
+   * is_nested non-zero.
+   */
+  if (is_nested == 0 && ! iskernelp(saved_proc)) {
+       cause_sig(proc_nr(saved_proc), ep->signum);
+       return;
+  }
+
+  /* Exception in system code. This is not supposed to happen. */
+  inkernel_disaster(saved_proc, saved_lr, ep, is_nested);
+
+  panic("return from inkernel_disaster");
+}
+
+#if USE_SYSDEBUG
+/*===========================================================================*
+ *                             proc_stacktrace_execute                      *
+ *===========================================================================*/
+static void proc_stacktrace_execute(struct proc *whichproc, reg_t v_bp, reg_t pc)
+{
+}
+#endif
+
+void proc_stacktrace(struct proc *whichproc)
+{
+#if USE_SYSDEBUG
+       proc_stacktrace_execute(whichproc, whichproc->p_reg.fp, whichproc->p_reg.pc);
+#endif /* USE_SYSDEBUG */
+}
+
+void enable_fpu_exception(void)
+{
+}
+
+void disable_fpu_exception(void)
+{
+}
diff --git a/kernel/arch/arm/glo.h b/kernel/arch/arm/glo.h
new file mode 100644 (file)
index 0000000..d12da42
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef __GLO_ARM_H__
+#define __GLO_ARM_H__
+
+#include "kernel/kernel.h"
+#include "arch_proto.h"
+
+EXTERN struct tss_s tss[CONFIG_MAX_CPUS];
+
+#endif /* __GLO_ARM_H__ */
diff --git a/kernel/arch/arm/head.S b/kernel/arch/arm/head.S
new file mode 100644 (file)
index 0000000..5f8bd04
--- /dev/null
@@ -0,0 +1,53 @@
+#include "kernel/kernel.h" /* configures the kernel */
+
+/* sections */
+
+#include <machine/vm.h>
+#include "../../kernel.h"
+#include <minix/config.h>
+#include <minix/const.h>
+#include <minix/com.h>
+#include <machine/asm.h>
+#include <machine/interrupt.h>
+#include "archconst.h"
+#include "kernel/const.h"
+#include "kernel/proc.h"
+#include "sconst.h"
+#include <machine/multiboot.h>
+#include <machine/cpu.h>
+
+#include "arch_proto.h" /* K_STACK_SIZE */
+
+.text
+/*===========================================================================*/
+/*                             MINIX                                */
+/*===========================================================================*/
+.global MINIX
+MINIX:
+/* this is the entry point for the MINIX kernel */
+       b multiboot_init
+
+multiboot_init:
+       ldr     sp, =load_stack_start /* make usable stack */
+       mov     fp, #0
+       bl      _C_LABEL(pre_init)
+
+       /* Kernel is mapped high now and ready to go, with
+        * the boot info pointer returned in r0. Set the
+        * highly mapped stack, initialize it, push the boot
+        * info pointer and jump to the highly mapped kernel.
+        */
+       ldr     sp, =k_initial_stktop
+       mov     r1, #0
+       push    {r1}    /* Terminate stack */
+       /* r0 holds kinfo_t ptr */
+       bl      _C_LABEL(kmain)
+
+       /* not reached */
+hang:
+       b hang
+
+.data
+load_stack:
+       .space 4096
+load_stack_start:
diff --git a/kernel/arch/arm/include/arch_clock.h b/kernel/arch/arm/include/arch_clock.h
new file mode 100644 (file)
index 0000000..21574aa
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __CLOCK_ARM_H__
+#define __CLOCK_ARM_H__
+
+void arch_timer_int_handler(void);
+
+#endif /* __CLOCK_ARM_H__ */
diff --git a/kernel/arch/arm/include/arch_proto.h b/kernel/arch/arm/include/arch_proto.h
new file mode 100644 (file)
index 0000000..f0ae853
--- /dev/null
@@ -0,0 +1,61 @@
+
+#ifndef _ARM_PROTO_H
+#define _ARM_PROTO_H
+
+#include <machine/vm.h>
+
+#define K_STACK_SIZE   ARM_PAGE_SIZE
+
+
+#ifndef __ASSEMBLY__
+
+#include "cpufunc.h"
+
+/* klib */
+__dead void reset(void);
+phys_bytes vir2phys(void *);
+vir_bytes phys_memset(phys_bytes ph, u32_t c, phys_bytes bytes);
+
+void __switch_address_space(struct proc *p, struct proc **__ptproc);
+#define switch_address_space(proc)     \
+       __switch_address_space(proc, get_cpulocal_var_ptr(ptproc))
+
+void __copy_msg_from_user_end(void);
+void __copy_msg_to_user_end(void);
+void __user_copy_msg_pointer_failure(void);
+
+/* multiboot.c */
+void multiboot_init(void);
+
+/* protect.c */
+struct tss_s {
+  reg_t sp0;                    /* stack pointer to use during interrupt */
+} __attribute__((packed));
+int tss_init(unsigned cpu, void * kernel_stack);
+
+void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len);
+phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len);
+void vm_enable_paging(void);
+void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end);
+phys_bytes pg_roundup(phys_bytes b);
+void pg_info(reg_t *, u32_t **);
+void pg_clear(void);
+void pg_identity(kinfo_t *);
+phys_bytes pg_load(void);
+void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end, kinfo_t *cbi);
+int pg_mapkernel(void);
+void pg_mapproc(struct proc *p, struct boot_image *ip, kinfo_t *cbi);
+
+EXTERN void * k_stacks_start;
+extern void * k_stacks;
+
+#define get_k_stack_top(cpu)   ((void *)(((char*)(k_stacks)) \
+                                       + 2 * ((cpu) + 1) * K_STACK_SIZE))
+
+
+/* functions defined in architecture-independent kernel source. */
+#include "kernel/proto.h"
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/kernel/arch/arm/include/arch_watchdog.h b/kernel/arch/arm/include/arch_watchdog.h
new file mode 100644 (file)
index 0000000..2e6f733
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ARM_WATCHDOG_H__
+#define __ARM_WATCHDOG_H__
+
+#include "kernel/kernel.h"
+
+#endif /* __ARM_WATCHDOG_H__ */
diff --git a/kernel/arch/arm/include/archconst.h b/kernel/arch/arm/include/archconst.h
new file mode 100644 (file)
index 0000000..4830763
--- /dev/null
@@ -0,0 +1,31 @@
+
+#ifndef _ARM_ACONST_H
+#define _ARM_ACONST_H
+
+#include <machine/interrupt.h>
+#include <machine/memory.h>
+#include <machine/cpu.h>
+
+/* Program stack words and masks. */
+#define INIT_PSR      (MODE_USR | PSR_F)    /* initial psr */
+#define INIT_TASK_PSR (MODE_SVC | PSR_F)    /* initial psr for tasks */
+
+/* Exception vector numbers */
+#define RESET_VECTOR                  0
+#define UNDEFINED_INST_VECTOR         1
+#define SUPERVISOR_CALL_VECTOR        2
+#define PREFETCH_ABORT_VECTOR         3
+#define DATA_ABORT_VECTOR             4
+#define HYPERVISOR_CALL_VECTOR        5
+#define INTERRUPT_VECTOR              6
+#define FAST_INTERRUPT_VECTOR         7
+
+/*
+ * defines how many bytes are reserved at the top of the kernel stack for global
+ * information like currently scheduled process or current cpu id
+ */
+#define ARM_STACK_TOP_RESERVED (2 * sizeof(reg_t))
+
+#define PG_ALLOCATEME ((phys_bytes)-1)
+
+#endif /* _ARM_ACONST_H */
diff --git a/kernel/arch/arm/include/cpufunc.h b/kernel/arch/arm/include/cpufunc.h
new file mode 100644 (file)
index 0000000..3736620
--- /dev/null
@@ -0,0 +1,273 @@
+#ifndef _ARM_CPUFUNC_H
+#define _ARM_CPUFUNC_H
+
+/* Data memory barrier */
+static inline void dmb(void)
+{
+       asm volatile("dmb" : : : "memory");
+}
+
+/* Data synchronization barrier */
+static inline void dsb(void)
+{
+       asm volatile("dsb" : : : "memory");
+}
+
+/* Instruction synchronization barrier */
+static inline void isb(void)
+{
+       asm volatile("isb" : : : "memory");
+}
+
+static inline void barrier(void)
+{
+       dsb();
+       isb();
+}
+
+static inline void refresh_tlb(void)
+{
+       dsb();
+       /* Invalidate entire unified TLB */
+       asm volatile("mcr p15, 0, r0, c8, c7, 0 @ TLBIALL\n\t");
+       dsb();
+       isb();
+}
+
+
+/* Read System Control Register */
+static inline u32_t read_sctlr()
+{
+    u32_t ctl;
+
+    asm volatile("mrc p15, 0, %[ctl], c1, c0, 0 @ Read SCTLR\n\t"
+                : [ctl] "=r" (ctl));
+    return ctl;
+}
+
+/* Write System Control Register */
+static inline void write_sctlr(u32_t ctl)
+{
+    asm volatile("mcr p15, 0, %[ctl], c1, c0, 0 @ Write SCTLR\n\t"
+                : : [ctl] "r" (ctl));
+}
+
+/* Read Translation Table Base Register 0 */
+static inline u32_t read_ttbr0()
+{
+    u32_t bar;
+
+    asm volatile("mrc p15, 0, %[bar], c2, c0, 0 @ Read TTBR0\n\t"
+                : [bar] "=r" (bar));
+    return bar;
+}
+
+/* Write Translation Table Base Register 0 */
+static inline void write_ttbr0(u32_t bar)
+{
+    barrier();
+    asm volatile("mcr p15, 0, %[bar], c2, c0, 0 @ Write TTBR0\n\t"
+                : : [bar] "r" (bar));
+    refresh_tlb();
+}
+
+/* Reload Translation Table Base Register 0 */
+static inline void reload_ttbr0(void)
+{
+    reg_t ttbr = read_ttbr0();
+    write_ttbr0(ttbr);
+    refresh_tlb();
+}
+
+/* Read Translation Table Base Register 1 */
+static inline u32_t read_ttbr1()
+{
+    u32_t bar;
+
+    asm volatile("mrc p15, 0, %[bar], c2, c0, 1 @ Read TTBR1\n\t"
+                : [bar] "=r" (bar));
+    return bar;
+}
+
+/* Write Translation Table Base Register 1 */
+static inline void write_ttbr1(u32_t bar)
+{
+    barrier();
+    asm volatile("mcr p15, 0, %[bar], c2, c0, 1 @ Write TTBR1\n\t"
+                : : [bar] "r" (bar));
+    refresh_tlb();
+}
+
+/* Reload Translation Table Base Register 1 */
+static inline void reload_ttbr1(void)
+{
+    reg_t ttbr = read_ttbr1();
+    write_ttbr1(ttbr);
+    refresh_tlb();
+}
+
+/* Read Translation Table Base Control Register */
+static inline u32_t read_ttbcr()
+{
+    u32_t bcr;
+
+    asm volatile("mrc p15, 0, %[bcr], c2, c0, 2 @ Read TTBCR\n\t"
+                : [bcr] "=r" (bcr));
+    return bcr;
+}
+
+/* Write Translation Table Base Control Register */
+static inline void write_ttbcr(u32_t bcr)
+{
+    asm volatile("mcr p15, 0, %[bcr], c2, c0, 2 @ Write TTBCR\n\t"
+                : : [bcr] "r" (bcr));
+}
+
+/* Read Domain Access Control Register */
+static inline u32_t read_dacr()
+{
+    u32_t dacr;
+
+    asm volatile("mrc p15, 0, %[dacr], c3, c0, 0 @ Read DACR\n\t"
+                : [dacr] "=r" (dacr));
+    return dacr;
+}
+
+/* Write Domain Access Control Register */
+static inline void write_dacr(u32_t dacr)
+{
+    asm volatile("mcr p15, 0, %[dacr], c3, c0, 0 @ Write DACR\n\t"
+                : : [dacr] "r" (dacr));
+}
+
+/* Read Data Fault Status Register */
+static inline u32_t read_dfsr()
+{
+    u32_t fsr;
+
+    asm volatile("mrc p15, 0, %[fsr], c5, c0, 0 @ Read DFSR\n\t"
+                : [fsr] "=r" (fsr));
+    return fsr;
+}
+
+/* Write Data Fault Status Register */
+static inline void write_dfsr(u32_t fsr)
+{
+    asm volatile("mcr p15, 0, %[fsr], c5, c0, 0 @ Write DFSR\n\t"
+                : : [fsr] "r" (fsr));
+}
+
+/* Read Instruction Fault Status Register */
+static inline u32_t read_ifsr()
+{
+    u32_t fsr;
+
+    asm volatile("mrc p15, 0, %[fsr], c5, c0, 1 @ Read IFSR\n\t"
+                : [fsr] "=r" (fsr));
+    return fsr;
+}
+
+/* Write Instruction Fault Status Register */
+static inline void write_ifsr(u32_t fsr)
+{
+    asm volatile("mcr p15, 0, %[fsr], c5, c0, 1 @ Write IFSR\n\t"
+                : : [fsr] "r" (fsr));
+}
+
+/* Read Data Fault Address Register */
+static inline u32_t read_dfar()
+{
+    u32_t far;
+
+    asm volatile("mrc p15, 0, %[far], c6, c0, 0 @ Read DFAR\n\t"
+                : [far] "=r" (far));
+    return far;
+}
+
+/* Write Data Fault Address Register */
+static inline void write_dfar(u32_t far)
+{
+    asm volatile("mcr p15, 0, %[far], c6, c0, 0 @ Write DFAR\n\t"
+                : : [far] "r" (far));
+}
+
+/* Read Instruction Fault Address Register */
+static inline u32_t read_ifar()
+{
+    u32_t far;
+
+    asm volatile("mrc p15, 0, %[far], c6, c0, 2 @ Read IFAR\n\t"
+                : [far] "=r" (far));
+    return far;
+}
+
+/* Write Instruction Fault Address Register */
+static inline void write_ifar(u32_t far)
+{
+    asm volatile("mcr p15, 0, %[far], c6, c0, 2 @ Write IFAR\n\t"
+                : : [far] "r" (far));
+}
+
+/* Read Vector Base Address Register */
+static inline u32_t read_vbar()
+{
+    u32_t vbar;
+
+    asm volatile("mrc p15, 0, %[vbar], c12, c0, 0 @ Read VBAR\n\t"
+                : [vbar] "=r" (vbar));
+    return vbar;
+}
+
+/* Write Vector Base Address Register */
+static inline void write_vbar(u32_t vbar)
+{
+    asm volatile("mcr p15, 0, %[vbar], c12, c0, 0 @ Write VBAR\n\t"
+                : : [vbar] "r" (vbar));
+    asm volatile("dsb");
+}
+
+/* Read the Main ID Register  */
+static inline u32_t read_midr()
+{
+    u32_t id;
+
+    asm volatile("mrc p15, 0, %[id], c0, c0, 0 @ read MIDR\n\t"
+                : [id] "=r" (id));
+    return id;
+}
+
+/* Read Auxiliary Control Register */
+static inline u32_t read_actlr()
+{
+    u32_t ctl;
+
+    asm volatile("mrc p15, 0, %[ctl], c1, c0, 1 @ Read ACTLR\n\t"
+                : [ctl] "=r" (ctl));
+    return ctl;
+}
+
+/* Write Auxiliary Control Register */
+static inline void write_actlr(u32_t ctl)
+{
+    asm volatile("mcr p15, 0, %[ctl], c1, c0, 1 @ Write ACTLR\n\t"
+                : : [ctl] "r" (ctl));
+}
+
+/* Read Current Program Status Register */
+static inline u32_t read_cpsr()
+{
+    u32_t status;
+
+    asm volatile("mrs %[status], cpsr @ read CPSR"
+                : [status] "=r" (status));
+    return status;
+}
+
+/* Write Current Program Status Register */
+static inline void write_cpsr(u32_t status)
+{
+    asm volatile("msr cpsr_c, %[status] @ write CPSR"
+                : : [status] "r" (status));
+}
+
+#endif /* _ARM_CPUFUNC_H */
diff --git a/kernel/arch/arm/include/direct_utils.h b/kernel/arch/arm/include/direct_utils.h
new file mode 100644 (file)
index 0000000..5b7943d
--- /dev/null
@@ -0,0 +1,11 @@
+#ifndef MB_UTILS_H
+#define MB_UTILS_H
+
+#include "kernel/kernel.h"
+
+void direct_cls(void);
+void direct_print(const char*);
+void direct_print_char(char);
+int direct_read_char(unsigned char*);
+
+#endif
diff --git a/kernel/arch/arm/include/hw_intr.h b/kernel/arch/arm/include/hw_intr.h
new file mode 100644 (file)
index 0000000..0e747c5
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __HW_INTR_ARM_H__
+#define __HW_INTR_ARM_H__
+
+#include "kernel/kernel.h"
+void irq_handle(int irq);
+
+#define hw_intr_mask(irq)      omap3_irq_mask(irq)
+#define hw_intr_unmask(irq)    omap3_irq_unmask(irq)
+#define hw_intr_ack(irq)
+#define hw_intr_used(irq)
+#define hw_intr_not_used(irq)
+#define hw_intr_disable_all()
+
+#endif /* __HW_INTR_ARM_H__ */
diff --git a/kernel/arch/arm/include/io.h b/kernel/arch/arm/include/io.h
new file mode 100644 (file)
index 0000000..7678148
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ARM_IO_H_
+#define _ARM_IO_H_
+
+#ifndef __ASSEMBLY__
+
+#include <sys/types.h>
+
+/* Access memory-mapped I/O devices */
+#define mmio_read(a)    (*(volatile u32_t *)(a))
+#define mmio_write(a,v) (*(volatile u32_t *)(a) = (v))
+#define mmio_set(a,v)   mmio_write((a), mmio_read((a)) | (v))
+#define mmio_clear(a,v) mmio_write((a), mmio_read((a)) & ~(v))
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ARM_IO_H_ */
diff --git a/kernel/arch/arm/io_intr.S b/kernel/arch/arm/io_intr.S
new file mode 100644 (file)
index 0000000..60b420e
--- /dev/null
@@ -0,0 +1,14 @@
+/*     intr_disable(), intr_enable - Disable/Enable hardware interrupts. */
+/*     void intr_disable(void); */
+/*     void intr_enable(void); */
+#include <machine/asm.h>
+
+ENTRY(intr_disable)
+       dsb
+       cpsid i
+       bx lr
+
+ENTRY(intr_enable)
+       dsb
+       cpsie i
+       bx lr
diff --git a/kernel/arch/arm/kernel.lds b/kernel/arch/arm/kernel.lds
new file mode 100644 (file)
index 0000000..b14cba4
--- /dev/null
@@ -0,0 +1,49 @@
+OUTPUT_ARCH("arm")
+ENTRY(__k_unpaged_MINIX)
+
+_kern_phys_base = 0x80200000;  /* phys 4MB aligned for convenient remapping */
+_kern_vir_base =  0xF0400000;  /* map kernel high for max. user vir space */
+_kern_offset = (_kern_vir_base - _kern_phys_base);
+
+__k_unpaged__kern_offset = _kern_offset;
+__k_unpaged__kern_vir_base = _kern_vir_base;
+__k_unpaged__kern_phys_base = _kern_phys_base;
+
+SECTIONS
+{
+       . = _kern_phys_base;
+       __k_unpaged__kern_unpaged_start = .;
+
+       .unpaged_text ALIGN(4096) : { unpaged_*.o(.text) }
+       .unpaged_data ALIGN(4096) : { unpaged_*.o(.data .rodata*) }
+       __k_unpaged__kern_unpaged__edata = .;
+       __k_unpaged__edata = .;
+       .unpaged_bss  ALIGN(4096) : { unpaged_*.o(.bss COMMON) }
+       __k_unpaged__kern_unpaged_end = .;
+       __k_unpaged__end = .;
+
+       . += _kern_offset;
+
+       . = ALIGN(4096); usermapped_start = .;
+       .usermapped_glo : AT(ADDR(.usermapped_glo) - _kern_offset) { usermapped_glo*.o }
+       . = ALIGN(4096); usermapped_nonglo_start = .;
+       .usermapped : AT(ADDR(.usermapped) - _kern_offset) { usermapped_*.o }
+       . = ALIGN(4096); usermapped_end = .;
+       .text             : AT(ADDR(.text) - _kern_offset) { *(.text*) }
+       _etext = .;
+       .data ALIGN(4096) : AT(ADDR(.data) - _kern_offset) { *(.data .rodata* ) }
+       . = ALIGN(4096);
+       _edata = .;
+       .bss ALIGN(4096)  : AT(ADDR(.bss) - _kern_offset) { *(.bss* COMMON)
+               __k_unpaged__kern_size = . - _kern_vir_base;
+               _kern_size = __k_unpaged__kern_size;
+               . += 4096;
+       }
+       _end = .;
+
+      /DISCARD/ :
+       {
+               *(.ARM.exidx*)
+       }
+
+}
diff --git a/kernel/arch/arm/klib.S b/kernel/arch/arm/klib.S
new file mode 100644 (file)
index 0000000..95bfa79
--- /dev/null
@@ -0,0 +1,93 @@
+/* sections */
+
+
+#include <minix/config.h>
+#include <minix/const.h>
+#include <machine/asm.h>
+#include <machine/interrupt.h>
+#include <machine/vm.h>
+#include "archconst.h"
+#include "kernel/const.h"
+#include "sconst.h"
+#include <machine/multiboot.h>
+
+
+/*===========================================================================*/
+/*                             copy_msg_from_user                           */
+/*===========================================================================*/
+/*
+ * int copy_msg_from_user(message * user_mbuf, message * dst);
+ *
+ * Copies a message of 36 bytes from user process space to a kernel buffer. This
+ * function assumes that the process address space is installed (cr3 loaded).
+ *
+ * This function from the callers point of view either succeeds or returns an
+ * error which gives the caller a chance to respond accordingly. In fact it
+ * either succeeds or if it generates a pagefault, general protection or other
+ * exception, the trap handler has to redirect the execution to
+ * __user_copy_msg_pointer_failure where the error is reported to the caller
+ * without resolving the pagefault. It is not kernel's problem to deal with
+ * wrong pointers from userspace and the caller should return an error to
+ * userspace as if wrong values or request were passed to the kernel
+ */
+ENTRY(copy_msg_from_user)
+       push    {r4-r10, lr}
+       /* load the source pointer */
+       mov     r9, r0
+       /* load the destination pointer */
+       mov     r10, r1
+       /* do the copy */
+       ldm     r9,  {r0-r8}
+       stm     r10, {r0-r8}
+
+LABEL(__copy_msg_from_user_end)
+       pop     {r4-r10, lr}
+       mov     r0, #0
+       bx      lr
+
+/*===========================================================================*/
+/*                             copy_msg_to_user                             */
+/*===========================================================================*/
+/*
+ * void copy_msg_to_user(message * src, message * user_mbuf);
+ *
+ * Copies a message of 36 bytes to user process space from a kernel buffer.
+ *
+ * All the other copy_msg_from_user() comments apply here as well!
+ */
+ENTRY(copy_msg_to_user)
+       push    {r4-r10, lr}
+       /* load the source pointer */
+       mov     r9, r0
+       /* load the destination pointer */
+       mov     r10, r1
+       /* do the copy */
+       ldm     r9,  {r0-r8}
+       stm     r10, {r0-r8}
+
+LABEL(__copy_msg_to_user_end)
+       pop     {r4-r10, lr}
+       mov     r0, #0
+       bx      lr
+
+/*
+ * if a function from a selected set of copies from or to userspace fails, it is
+ * because of a wrong pointer supplied by the userspace. We have to clean up and
+ * and return -1 to indicated that something wrong has happend. The place it was
+ * called from has to handle this situation. The exception handler redirect us
+ * here to continue, clean up and report the error
+ */
+ENTRY(__user_copy_msg_pointer_failure)
+       pop     {r4-r10, lr}
+       mov     r0, #-1
+       bx      lr
+
+ENTRY(interrupts_enable)
+       dsb
+       cpsie i
+       bx lr
+
+ENTRY(interrupts_disable)
+       dsb
+       cpsid i
+       bx lr
diff --git a/kernel/arch/arm/memory.c b/kernel/arch/arm/memory.c
new file mode 100644 (file)
index 0000000..2f22bb5
--- /dev/null
@@ -0,0 +1,775 @@
+
+#include "kernel/kernel.h"
+#include "kernel/proc.h"
+#include "kernel/vm.h"
+
+#include <machine/vm.h>
+
+#include <minix/type.h>
+#include <minix/syslib.h>
+#include <minix/cpufeature.h>
+#include <string.h>
+#include <assert.h>
+#include <signal.h>
+#include <stdlib.h>
+
+#include <machine/vm.h>
+
+#include "arch_proto.h"
+#include "kernel/proto.h"
+#include "kernel/debug.h"
+
+phys_bytes device_mem_vaddr = 0;
+
+#define HASPT(procptr) ((procptr)->p_seg.p_ttbr != 0)
+static int nfreepdes = 0;
+#define MAXFREEPDES    2
+static int freepdes[MAXFREEPDES];
+
+static u32_t phys_get32(phys_bytes v);
+
+void mem_clear_mapcache(void)
+{
+       int i;
+       for(i = 0; i < nfreepdes; i++) {
+               struct proc *ptproc = get_cpulocal_var(ptproc);
+               int pde = freepdes[i];
+               u32_t *ptv;
+               assert(ptproc);
+               ptv = ptproc->p_seg.p_ttbr_v;
+               assert(ptv);
+               ptv[pde] = 0;
+       }
+}
+
+/* This function sets up a mapping from within the kernel's address
+ * space to any other area of memory, either straight physical
+ * memory (pr == NULL) or a process view of memory, in 1MB windows.
+ * I.e., it maps in 1MB chunks of virtual (or physical) address space
+ * to 1MB chunks of kernel virtual address space.
+ *
+ * It recognizes pr already being in memory as a special case (no
+ * mapping required).
+ *
+ * The target (i.e. in-kernel) mapping area is one of the freepdes[]
+ * VM has earlier already told the kernel about that is available. It is
+ * identified as the 'pde' parameter. This value can be chosen freely
+ * by the caller, as long as it is in range (i.e. 0 or higher and corresonds
+ * to a known freepde slot). It is up to the caller to keep track of which
+ * freepde's are in use, and to determine which ones are free to use.
+ *
+ * The logical number supplied by the caller is translated into an actual
+ * pde number to be used, and a pointer to it (linear address) is returned
+ * for actual use by phys_copy or memset.
+ */
+static phys_bytes createpde(
+       const struct proc *pr,  /* Requested process, NULL for physical. */
+       const phys_bytes linaddr,/* Address after segment translation. */
+       phys_bytes *bytes,      /* Size of chunk, function may truncate it. */
+       int free_pde_idx,       /* index of the free slot to use */
+       int *changed            /* If mapping is made, this is set to 1. */
+       )
+{
+       u32_t pdeval;
+       phys_bytes offset;
+       int pde;
+
+       assert(free_pde_idx >= 0 && free_pde_idx < nfreepdes);
+       pde = freepdes[free_pde_idx];
+       assert(pde >= 0 && pde < 4096);
+
+       if(pr && ((pr == get_cpulocal_var(ptproc)) || iskernelp(pr))) {
+               /* Process memory is requested, and
+                * it's a process that is already in current page table, or
+                * the kernel, which is always there.
+                * Therefore linaddr is valid directly, with the requested
+                * size.
+                */
+               return linaddr;
+       }
+
+       if(pr) {
+               /* Requested address is in a process that is not currently
+                * accessible directly. Grab the PDE entry of that process'
+                * page table that corresponds to the requested address.
+                */
+               assert(pr->p_seg.p_ttbr_v);
+               pdeval = pr->p_seg.p_ttbr_v[ARM_VM_PDE(linaddr)];
+       } else {
+               /* Requested address is physical. Make up the PDE entry. */
+               pdeval = (linaddr & ARM_VM_SECTION_MASK) |
+                       ARM_VM_SECTION |
+                       ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER;
+       }
+
+       /* Write the pde value that we need into a pde that the kernel
+        * can access, into the currently loaded page table so it becomes
+        * visible.
+        */
+       assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
+       if(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v[pde] != pdeval) {
+               get_cpulocal_var(ptproc)->p_seg.p_ttbr_v[pde] = pdeval;
+               *changed = 1;
+       }
+
+       /* Memory is now available, but only the 1MB window of virtual
+        * address space that we have mapped; calculate how much of
+        * the requested range is visible and return that in *bytes,
+        * if that is less than the requested range.
+        */
+       offset = linaddr & ARM_VM_OFFSET_MASK_1MB; /* Offset in 1MB window. */
+       *bytes = MIN(*bytes, ARM_BIG_PAGE_SIZE - offset);
+
+       /* Return the linear address of the start of the new mapping. */
+       return ARM_BIG_PAGE_SIZE*pde + offset;
+}
+
+
+/*===========================================================================*
+ *                           check_resumed_caller                            *
+ *===========================================================================*/
+static int check_resumed_caller(struct proc *caller)
+{
+       /* Returns the result from VM if caller was resumed, otherwise OK. */
+       if (caller && (caller->p_misc_flags & MF_KCALL_RESUME)) {
+               assert(caller->p_vmrequest.vmresult != VMSUSPEND);
+               return caller->p_vmrequest.vmresult;
+       }
+
+       return OK;
+}
+  
+/*===========================================================================*
+ *                             lin_lin_copy                                 *
+ *===========================================================================*/
+static int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, 
+       struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
+{
+       u32_t addr;
+       proc_nr_t procslot;
+
+       assert(get_cpulocal_var(ptproc));
+       assert(get_cpulocal_var(proc_ptr));
+       assert(read_ttbr0() == get_cpulocal_var(ptproc)->p_seg.p_ttbr);
+
+       procslot = get_cpulocal_var(ptproc)->p_nr;
+
+       assert(procslot >= 0 && procslot < ARM_VM_DIR_ENTRIES);
+
+       if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE));
+       if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE));
+       assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE));
+       assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
+       if(srcproc) assert(!RTS_ISSET(srcproc, RTS_VMINHIBIT));
+       if(dstproc) assert(!RTS_ISSET(dstproc, RTS_VMINHIBIT));
+
+       while(bytes > 0) {
+               phys_bytes srcptr, dstptr;
+               vir_bytes chunk = bytes;
+               int changed = 0;
+
+#ifdef CONFIG_SMP
+               unsigned cpu = cpuid;
+
+               if (srcproc && GET_BIT(srcproc->p_stale_tlb, cpu)) {
+                       changed = 1;
+                       UNSET_BIT(srcproc->p_stale_tlb, cpu);
+               }
+               if (dstproc && GET_BIT(dstproc->p_stale_tlb, cpu)) {
+                       changed = 1;
+                       UNSET_BIT(dstproc->p_stale_tlb, cpu);
+               }
+#endif
+
+               /* Set up 1MB ranges. */
+               srcptr = createpde(srcproc, srclinaddr, &chunk, 0, &changed);
+               dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
+               if(changed) {
+                       reload_ttbr0();
+                       refresh_tlb();
+               }
+               /* Copy pages. */
+               PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr);
+
+               if(addr) {
+                       /* If addr is nonzero, a page fault was caught. */
+
+                       if(addr >= srcptr && addr < (srcptr + chunk)) {
+                               return EFAULT_SRC;
+                       }
+                       if(addr >= dstptr && addr < (dstptr + chunk)) {
+                               return EFAULT_DST;
+                       }
+
+                       panic("lin_lin_copy fault out of range");
+
+                       /* Not reached. */
+                       return EFAULT;
+               }
+
+               /* Update counter and addresses for next iteration, if any. */
+               bytes -= chunk;
+               srclinaddr += chunk;
+               dstlinaddr += chunk;
+       }
+
+       if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE));
+       if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE));
+       assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE));
+       assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
+
+       return OK;
+}
+
+static u32_t phys_get32(phys_bytes addr)
+{
+       const u32_t v;
+       int r;
+
+       if((r=lin_lin_copy(NULL, addr, 
+               proc_addr(SYSTEM), (phys_bytes) &v, sizeof(v))) != OK) {
+               panic("lin_lin_copy for phys_get32 failed: %d",  r);
+       }
+
+       return v;
+}
+
+/*===========================================================================*
+ *                              umap_virtual                                 *
+ *===========================================================================*/
+phys_bytes umap_virtual(rp, seg, vir_addr, bytes)
+register struct proc *rp;       /* pointer to proc table entry for process */
+int seg;                        /* T, D, or S segment */
+vir_bytes vir_addr;             /* virtual address in bytes within the seg */
+vir_bytes bytes;                /* # of bytes to be copied */
+{
+       phys_bytes phys = 0;
+
+       if(vm_lookup(rp, vir_addr, &phys, NULL) != OK) {
+               printf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%x: 0x%lx failed\n", rp->p_name, seg, vir_addr);
+               phys = 0;
+       } else {
+               if(phys == 0)
+                       panic("vm_lookup returned phys: %d",  phys);
+       }
+
+       if(phys == 0) {
+               printf("SYSTEM:umap_virtual: lookup failed\n");
+               return 0;
+       }
+
+       /* Now make sure addresses are contiguous in physical memory
+        * so that the umap makes sense.
+        */
+       if(bytes > 0 && vm_lookup_range(rp, vir_addr, NULL, bytes) != bytes) {
+               printf("umap_virtual: %s: %lu at 0x%lx (vir 0x%lx) not contiguous\n",
+                       rp->p_name, bytes, vir_addr, vir_addr);
+               return 0;
+       }
+
+       /* phys must be larger than 0 (or the caller will think the call
+        * failed), and address must not cross a page boundary.
+        */
+       assert(phys);
+
+       return phys;
+}
+
+
+/*===========================================================================*
+ *                              vm_lookup                                    *
+ *===========================================================================*/
+int vm_lookup(const struct proc *proc, const vir_bytes virtual,
+ phys_bytes *physical, u32_t *ptent)
+{
+       u32_t *root, *pt;
+       int pde, pte;
+       u32_t pde_v, pte_v;
+
+       assert(proc);
+       assert(physical);
+       assert(!isemptyp(proc));
+       assert(HASPT(proc));
+
+       /* Retrieve page directory entry. */
+       root = (u32_t *) proc->p_seg.p_ttbr;
+       assert(!((u32_t) root % ARM_PAGEDIR_SIZE));
+       pde = ARM_VM_PDE(virtual);
+       assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
+       pde_v = phys_get32((u32_t) (root + pde));
+
+       if(!(pde_v & ARM_VM_PDE_PRESENT)) {
+               return EFAULT;
+       }
+
+       /* We don't expect to ever see this. */
+       if(pde_v & ARM_VM_BIGPAGE) {
+               *physical = pde_v & ARM_VM_SECTION_MASK;
+               if(ptent) *ptent = pde_v;
+               *physical += virtual & ARM_VM_OFFSET_MASK_1MB;
+       } else {
+               /* Retrieve page table entry. */
+               pt = (u32_t *) (pde_v & ARM_VM_PDE_MASK);
+               assert(!((u32_t) pt % ARM_PAGETABLE_SIZE));
+               pte = ARM_VM_PTE(virtual);
+               assert(pte >= 0 && pte < ARM_VM_PT_ENTRIES);
+               pte_v = phys_get32((u32_t) (pt + pte));
+               if(!(pte_v & ARM_VM_PTE_PRESENT)) {
+                       return EFAULT;
+               }
+
+               if(ptent) *ptent = pte_v;
+
+               /* Actual address now known; retrieve it and add page offset. */
+               *physical = pte_v & ARM_VM_PTE_MASK;
+               *physical += virtual % ARM_PAGE_SIZE;
+       }
+
+       return OK;
+}
+
+/*===========================================================================*
+ *                             vm_lookup_range                              *
+ *===========================================================================*/
+size_t vm_lookup_range(const struct proc *proc, vir_bytes vir_addr,
+       phys_bytes *phys_addr, size_t bytes)
+{
+       /* Look up the physical address corresponding to linear virtual address
+        * 'vir_addr' for process 'proc'. Return the size of the range covered
+        * by contiguous physical memory starting from that address; this may
+        * be anywhere between 0 and 'bytes' inclusive. If the return value is
+        * nonzero, and 'phys_addr' is non-NULL, 'phys_addr' will be set to the
+        * base physical address of the range. 'vir_addr' and 'bytes' need not
+        * be page-aligned, but the caller must have verified that the given
+        * linear range is valid for the given process at all.
+        */
+       phys_bytes phys, next_phys;
+       size_t len;
+
+       assert(proc);
+       assert(bytes > 0);
+       assert(HASPT(proc));
+
+       /* Look up the first page. */
+       if (vm_lookup(proc, vir_addr, &phys, NULL) != OK)
+               return 0;
+
+       if (phys_addr != NULL)
+               *phys_addr = phys;
+
+       len = ARM_PAGE_SIZE - (vir_addr % ARM_PAGE_SIZE);
+       vir_addr += len;
+       next_phys = phys + len;
+
+       /* Look up any next pages and test physical contiguity. */
+       while (len < bytes) {
+               if (vm_lookup(proc, vir_addr, &phys, NULL) != OK)
+                       break;
+
+               if (next_phys != phys)
+                       break;
+
+               len += ARM_PAGE_SIZE;
+               vir_addr += ARM_PAGE_SIZE;
+               next_phys += ARM_PAGE_SIZE;
+       }
+
+       /* We might now have overshot the requested length somewhat. */
+       return MIN(bytes, len);
+}
+
+/*===========================================================================*
+ *                              vm_suspend                                *
+ *===========================================================================*/
+static void vm_suspend(struct proc *caller, const struct proc *target,
+       const vir_bytes linaddr, const vir_bytes len, const int type)
+{
+       /* This range is not OK for this process. Set parameters  
+        * of the request and notify VM about the pending request. 
+        */                                                             
+       assert(!RTS_ISSET(caller, RTS_VMREQUEST));
+       assert(!RTS_ISSET(target, RTS_VMREQUEST));
+
+       RTS_SET(caller, RTS_VMREQUEST);
+
+       caller->p_vmrequest.req_type = VMPTYPE_CHECK;
+       caller->p_vmrequest.target = target->p_endpoint;
+       caller->p_vmrequest.params.check.start = linaddr;
+       caller->p_vmrequest.params.check.length = len;
+       caller->p_vmrequest.params.check.writeflag = 1;
+       caller->p_vmrequest.type = type;
+                                                       
+       /* Connect caller on vmrequest wait queue. */   
+       if(!(caller->p_vmrequest.nextrequestor = vmrequest))
+               if(OK != send_sig(VM_PROC_NR, SIGKMEM))
+                       panic("send_sig failed");
+       vmrequest = caller;
+}
+
+/*===========================================================================*
+ *                             vm_check_range                               *
+ *===========================================================================*/
+int vm_check_range(struct proc *caller, struct proc *target,
+       vir_bytes vir_addr, size_t bytes)
+{
+       /* Public interface to vm_suspend(), for use by kernel calls. On behalf
+        * of 'caller', call into VM to check linear virtual address range of
+        * process 'target', starting at 'vir_addr', for 'bytes' bytes. This
+        * function assumes that it will called twice if VM returned an error
+        * the first time (since nothing has changed in that case), and will
+        * then return the error code resulting from the first call. Upon the
+        * first call, a non-success error code is returned as well.
+        */
+       int r;
+
+       if ((caller->p_misc_flags & MF_KCALL_RESUME) &&
+                       (r = caller->p_vmrequest.vmresult) != OK)
+               return r;
+
+       vm_suspend(caller, target, vir_addr, bytes, VMSTYPE_KERNELCALL);
+
+       return VMSUSPEND;
+}
+
+/*===========================================================================*
+ *                              delivermsg                                *
+ *===========================================================================*/
+void delivermsg(struct proc *rp)
+{
+       int r = OK;
+
+       assert(rp->p_misc_flags & MF_DELIVERMSG);
+       assert(rp->p_delivermsg.m_source != NONE);
+
+       if (copy_msg_to_user(&rp->p_delivermsg,
+                               (message *) rp->p_delivermsg_vir)) {
+               printf("WARNING wrong user pointer 0x%08lx from "
+                               "process %s / %d\n",
+                               rp->p_delivermsg_vir,
+                               rp->p_name,
+                               rp->p_endpoint);
+               r = EFAULT;
+       }
+
+       /* Indicate message has been delivered; address is 'used'. */
+       rp->p_delivermsg.m_source = NONE;
+       rp->p_misc_flags &= ~MF_DELIVERMSG;
+
+       if(!(rp->p_misc_flags & MF_CONTEXT_SET)) {
+               rp->p_reg.retreg = r;
+       }
+}
+
+/*===========================================================================*
+ *                                 vmmemset                                  *
+ *===========================================================================*/
+int vm_memset(struct proc* caller, endpoint_t who, phys_bytes ph, int c,
+       phys_bytes count)
+{
+       u32_t pattern;
+       struct proc *whoptr = NULL;
+       phys_bytes cur_ph = ph;
+       phys_bytes left = count;
+       phys_bytes ptr, chunk, pfa = 0;
+       int new_ttbr, r = OK;
+
+       if ((r = check_resumed_caller(caller)) != OK)
+               return r;
+
+       /* NONE for physical, otherwise virtual */
+       if (who != NONE && !(whoptr = endpoint_lookup(who)))
+               return ESRCH;
+
+       c &= 0xFF;
+       pattern = c | (c << 8) | (c << 16) | (c << 24);
+
+       assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
+       assert(!catch_pagefaults);
+       catch_pagefaults = 1;
+
+       /* We can memset as many bytes as we have remaining,
+        * or as many as remain in the 1MB chunk we mapped in.
+        */
+       while (left > 0) {
+               new_ttbr = 0;
+               chunk = left;
+               ptr = createpde(whoptr, cur_ph, &chunk, 0, &new_ttbr);
+
+               if (new_ttbr) {
+                       reload_ttbr0();
+                       refresh_tlb();
+               }
+               /* If a page fault happens, pfa is non-null */
+               if ((pfa = phys_memset(ptr, pattern, chunk))) {
+
+                       /* If a process pagefaults, VM may help out */
+                       if (whoptr) {
+                               vm_suspend(caller, whoptr, ph, count,
+                                                  VMSTYPE_KERNELCALL);
+                               assert(catch_pagefaults);
+                               catch_pagefaults = 0;
+                               return VMSUSPEND;
+                       }
+
+                       /* Pagefault when phys copying ?! */
+                       panic("vm_memset: pf %lx addr=%lx len=%lu\n",
+                                               pfa , ptr, chunk);
+               }
+
+               cur_ph += chunk;
+               left -= chunk;
+       }
+
+       assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v);
+       assert(catch_pagefaults);
+       catch_pagefaults = 0;
+
+       return OK;
+}
+
+/*===========================================================================*
+ *                             virtual_copy_f                               *
+ *===========================================================================*/
+int virtual_copy_f(caller, src_addr, dst_addr, bytes, vmcheck)
+struct proc * caller;
+struct vir_addr *src_addr;     /* source virtual address */
+struct vir_addr *dst_addr;     /* destination virtual address */
+vir_bytes bytes;               /* # of bytes to copy  */
+int vmcheck;                   /* if nonzero, can return VMSUSPEND */
+{
+/* Copy bytes from virtual address src_addr to virtual address dst_addr. */
+  struct vir_addr *vir_addr[2];        /* virtual source and destination address */
+  int i, r;
+  struct proc *procs[2];
+
+  assert((vmcheck && caller) || (!vmcheck && !caller));
+
+  /* Check copy count. */
+  if (bytes <= 0) return(EDOM);
+
+  /* Do some more checks and map virtual addresses to physical addresses. */
+  vir_addr[_SRC_] = src_addr;
+  vir_addr[_DST_] = dst_addr;
+
+  for (i=_SRC_; i<=_DST_; i++) {
+       endpoint_t proc_e = vir_addr[i]->proc_nr_e;
+       int proc_nr;
+       struct proc *p;
+
+       if(proc_e == NONE) {
+               p = NULL;
+       } else {
+               if(!isokendpt(proc_e, &proc_nr)) {
+                       printf("virtual_copy: no reasonable endpoint\n");
+                       return ESRCH;
+               }
+               p = proc_addr(proc_nr);
+       }
+
+       procs[i] = p;
+  }
+
+  if ((r = check_resumed_caller(caller)) != OK)
+       return r;
+
+  if((r=lin_lin_copy(procs[_SRC_], vir_addr[_SRC_]->offset,
+       procs[_DST_], vir_addr[_DST_]->offset, bytes)) != OK) {
+       struct proc *target = NULL;
+       phys_bytes lin;
+       if(r != EFAULT_SRC && r != EFAULT_DST)
+               panic("lin_lin_copy failed: %d",  r);
+       if(!vmcheck || !caller) {
+               return r;
+       }
+
+       if(r == EFAULT_SRC) {
+               lin = vir_addr[_SRC_]->offset;
+               target = procs[_SRC_];
+       } else if(r == EFAULT_DST) {
+               lin = vir_addr[_DST_]->offset;
+               target = procs[_DST_];
+       } else {
+               panic("r strange: %d",  r);
+       }
+
+       assert(caller);
+       assert(target);
+
+       vm_suspend(caller, target, lin, bytes, VMSTYPE_KERNELCALL);
+       return VMSUSPEND;
+  }
+
+  return OK;
+}
+
+/*===========================================================================*
+ *                             data_copy                                    *
+ *===========================================================================*/
+int data_copy(const endpoint_t from_proc, const vir_bytes from_addr,
+       const endpoint_t to_proc, const vir_bytes to_addr,
+       size_t bytes)
+{
+  struct vir_addr src, dst;
+
+  src.offset = from_addr;
+  dst.offset = to_addr;
+  src.proc_nr_e = from_proc;
+  dst.proc_nr_e = to_proc;
+  assert(src.proc_nr_e != NONE);
+  assert(dst.proc_nr_e != NONE);
+
+  return virtual_copy(&src, &dst, bytes);
+}
+
+/*===========================================================================*
+ *                             data_copy_vmcheck                            *
+ *===========================================================================*/
+int data_copy_vmcheck(struct proc * caller,
+       const endpoint_t from_proc, const vir_bytes from_addr,
+       const endpoint_t to_proc, const vir_bytes to_addr,
+       size_t bytes)
+{
+  struct vir_addr src, dst;
+
+  src.offset = from_addr;
+  dst.offset = to_addr;
+  src.proc_nr_e = from_proc;
+  dst.proc_nr_e = to_proc;
+  assert(src.proc_nr_e != NONE);
+  assert(dst.proc_nr_e != NONE);
+
+  return virtual_copy_vmcheck(caller, &src, &dst, bytes);
+}
+
+void memory_init(void)
+{
+       assert(nfreepdes == 0);
+
+       freepdes[nfreepdes++] = kinfo.freepde_start++;
+       freepdes[nfreepdes++] = kinfo.freepde_start++;
+
+       assert(kinfo.freepde_start < ARM_VM_DIR_ENTRIES);
+       assert(nfreepdes == 2);
+       assert(nfreepdes <= MAXFREEPDES);
+}
+
+/*===========================================================================*
+ *                             arch_proc_init                               *
+ *===========================================================================*/
+void arch_proc_init(struct proc *pr, const u32_t ip, const u32_t sp, char *name)
+{
+       arch_proc_reset(pr);
+       strcpy(pr->p_name, name);
+
+       /* set custom state we know */
+       pr->p_reg.pc = ip;
+       pr->p_reg.sp = sp;
+}
+
+static int device_mem_mapping_index = -1,
+       usermapped_glo_index = -1,
+       usermapped_index = -1, first_um_idx = -1;
+
+char *device_mem;
+
+extern char usermapped_start, usermapped_end, usermapped_nonglo_start;
+
+int arch_phys_map(const int index,
+                       phys_bytes *addr,
+                       phys_bytes *len,
+                       int *flags)
+{
+       static int first = 1;
+       int freeidx = 0;
+       u32_t glo_len = (u32_t) &usermapped_nonglo_start -
+                       (u32_t) &usermapped_start;
+
+       if(first) {
+               device_mem_mapping_index = freeidx++;
+               if(glo_len > 0) {
+                       usermapped_glo_index = freeidx++;
+               }
+
+               usermapped_index = freeidx++;
+               first_um_idx = usermapped_index;
+               if(usermapped_glo_index != -1)
+                       first_um_idx = usermapped_glo_index;
+               first = 0;
+       }
+
+       if(index == usermapped_glo_index) {
+               *addr = vir2phys(&usermapped_start);
+               *len = glo_len;
+               *flags = VMMF_USER | VMMF_GLO;
+               return OK;
+       }
+       else if(index == usermapped_index) {
+               *addr = vir2phys(&usermapped_nonglo_start);
+               *len = (u32_t) &usermapped_end -
+                       (u32_t) &usermapped_nonglo_start;
+               *flags = VMMF_USER;
+               return OK;
+       }
+       else if (index == device_mem_mapping_index) {
+               /* map device memory */
+               *addr = 0x48000000;
+               *len =  0x02000000;
+               *flags = VMMF_UNCACHED | VMMF_WRITE;
+               return OK;
+       }
+
+       return EINVAL;
+}
+
+int arch_phys_map_reply(const int index, const vir_bytes addr)
+{
+       if(index == first_um_idx) {
+               u32_t usermapped_offset;
+               assert(addr > (u32_t) &usermapped_start);
+               usermapped_offset = addr - (u32_t) &usermapped_start;
+               memset(&minix_kerninfo, 0, sizeof(minix_kerninfo));
+#define FIXEDPTR(ptr) (void *) ((u32_t)ptr + usermapped_offset)
+#define FIXPTR(ptr) ptr = FIXEDPTR(ptr)
+#define ASSIGN(minixstruct) minix_kerninfo.minixstruct = FIXEDPTR(&minixstruct)
+               ASSIGN(kinfo);
+               ASSIGN(machine);
+               ASSIGN(kmessages);
+               ASSIGN(loadinfo);
+
+               /* adjust the pointers of the functions and the struct
+                * itself to the user-accessible mapping
+                */
+               minix_kerninfo.kerninfo_magic = KERNINFO_MAGIC;
+               minix_kerninfo.minix_feature_flags = minix_feature_flags;
+               minix_kerninfo_user = (vir_bytes) FIXEDPTR(&minix_kerninfo);
+
+               return OK;
+       }
+
+       if(index == usermapped_index) return OK;
+
+       if (index == device_mem_mapping_index) {
+               device_mem_vaddr =  addr;
+               return OK;
+       }
+
+       return EINVAL;
+}
+
+int arch_enable_paging(struct proc * caller)
+{
+       assert(caller->p_seg.p_ttbr);
+
+       /* load caller's page table */
+       switch_address_space(caller);
+
+       device_mem = (char *) device_mem_vaddr;
+
+       return OK;
+}
+
+void release_address_space(struct proc *pr)
+{
+       pr->p_seg.p_ttbr_v = NULL;
+       refresh_tlb();
+}
diff --git a/kernel/arch/arm/mpx.S b/kernel/arch/arm/mpx.S
new file mode 100644 (file)
index 0000000..d40a1fd
--- /dev/null
@@ -0,0 +1,295 @@
+/* This file is part of the lowest layer of the MINIX kernel.  (The other part 
+ * is "proc.c".)  The lowest layer does process switching and message handling. 
+ *
+ * Kernel is entered either because of kernel-calls, ipc-calls, interrupts or
+ * exceptions. TSS is set so that the kernel stack is loaded. The user context is
+ * saved to the proc table and the handler of the event is called. Once the
+ * handler is done, switch_to_user() function is called to pick a new process,
+ * finish what needs to be done for the next process to run, sets its context
+ * and switch to userspace.
+ */
+
+#include "kernel/kernel.h" /* configures the kernel */
+
+/* sections */
+
+#include <machine/vm.h>
+#include "../../kernel.h"
+#include <minix/config.h>
+#include <minix/const.h>
+#include <minix/com.h>
+#include <machine/asm.h>
+#include <machine/interrupt.h>
+#include "archconst.h"
+#include "kernel/const.h"
+#include "kernel/proc.h"
+#include "sconst.h"
+#include <machine/multiboot.h>
+#include <machine/ipcconst.h>
+#include <machine/cpu.h>
+#include "omap_intr.h"
+
+#include "arch_proto.h" /* K_STACK_SIZE */
+
+IMPORT(svc_stack)
+
+/*
+ * Adjust lr, switch to SVC mode, and push pc/psr when exception triggered
+ * The 'lr_offset' argument holds the adjustment. It differs based on
+ * which mode the CPU is in.
+ */
+.macro switch_to_svc lr_offset
+       sub     lr, lr, #\lr_offset
+       srsdb   #MODE_SVC!
+       cps     #MODE_SVC
+.endm
+
+/*
+ * Test if the exception/interrupt occured in the kernel.
+ * Jump to 'label' argument if it occurred in the kernel.
+ *
+ * NOTE: switch_to_svc must be called first
+ */
+.macro test_int_in_kernel, label
+       push    {r3}
+       ldr     r3, [sp, #8] /* spsr */
+       and     r3, r3, #PSR_MODE_MASK
+       cmp     r3, #MODE_USR
+       pop     {r3}
+       bne     \label  /* In-kernel handling */
+.endm
+
+/* Save the register context to the proc structure */
+.macro save_process_ctx
+       push    {lr}
+       ldr     lr, [sp, #12] /* proc ptr */
+       stm     lr, {r0-r14}^
+       ldr     r12, [sp, #8]
+       str     r12, [lr, #PSREG]
+       ldr     r12, [sp, #4]
+       str     r12, [lr, #PCREG]
+       pop     {lr}
+       add     sp, sp, #8
+.endm
+
+.macro exception_handler exc_name, exc_num, lr_offset
+ENTRY(\exc_name\()_entry)
+       switch_to_svc \lr_offset
+       test_int_in_kernel \exc_name\()_entry_nested
+
+\exc_name\()entry_from_user:
+       save_process_ctx
+
+       /* save the pointer to the current process */
+       ldr     fp, [sp]
+       /* save the exception pc (saved lr_user) */
+       ldr     r4, [fp, #PCREG]
+
+       /* stop user process cycles */
+       mov     r0, fp  /* first param: caller proc ptr */
+       mov     fp, #0  /* for stack trace */
+       bl      _C_LABEL(context_stop)
+
+       /*
+        * push a pointer to the interrupt state pushed by the cpu and the
+        * vector number pushed by the vector handler just before calling
+        * exception_entry and call the exception handler.
+        */
+       mov     r0, #0  /* it's not a nested exception */
+       mov     r1, r4          /* saved lr */
+       mov     r2, #\exc_num   /* vector number */
+       bl      _C_LABEL(exception_handler)
+
+       b       _C_LABEL(switch_to_user)
+
+\exc_name\()_entry_nested:
+       push    {r0-r12, lr}
+       mov     r0, #1  /* it's a nested exception */
+       add     r1, sp, #56     /* saved lr */
+       mov     r2, #\exc_num   /* vector number */
+       bl      _C_LABEL(exception_handler)
+       pop     {r0-r12, lr}
+       rfeia   sp!
+.endm
+
+
+/* Exception handlers */
+exception_handler data_abort DATA_ABORT_VECTOR 8
+exception_handler prefetch_abort PREFETCH_ABORT_VECTOR 4
+exception_handler undefined_inst UNDEFINED_INST_VECTOR 4
+
+
+ENTRY(irq_entry)
+       switch_to_svc 4
+       test_int_in_kernel irq_entry_from_kernel
+
+irq_entry_from_user:
+       save_process_ctx
+
+       /* save the pointer to the current process */
+       ldr     fp, [sp]
+
+       push    {fp}    /* save caller proc ptr */
+
+       /* stop user process cycles */
+       mov     r0, fp  /* first param: caller proc ptr */
+       mov     fp, #0  /* for stack trace */
+       bl      _C_LABEL(context_stop)
+
+       /* get irq num */
+       ldr     r3, =OMAP3_INTR_SIR_IRQ
+       ldr     r0, [r3]
+       and     r0, r0, #OMAP3_INTR_ACTIVEIRQ_MASK /* irq */
+       /* call handler */
+       bl      _C_LABEL(irq_handle)    /* irq_handle(irq) */
+
+       pop     {fp}    /* caller proc ptr */
+
+       /* allow new interrupts */
+       mov     r1, #OMAP3_INTR_NEWIRQAGR
+       ldr     r3, =OMAP3_INTR_CONTROL
+       str     r1, [r3]
+
+       /* data synchronization barrier */
+       dsb
+
+       b       _C_LABEL(switch_to_user)
+
+irq_entry_from_kernel:
+       push    {r0-r12, lr}
+       bl      _C_LABEL(context_stop_idle)
+
+       /* get irq num */
+       ldr     r3, =OMAP3_INTR_SIR_IRQ
+       ldr     r0, [r3]
+       and     r0, r0, #OMAP3_INTR_ACTIVEIRQ_MASK /* irq */
+       /* call handler */
+       bl      _C_LABEL(irq_handle)    /* irq_handle(irq) */
+
+       /* allow new interrupts */
+       mov     r1, #OMAP3_INTR_NEWIRQAGR
+       ldr     r3, =OMAP3_INTR_CONTROL
+       str     r1, [r3]
+
+       /* data synchronization barrier */
+       dsb
+
+       pop     {r0-r12, lr}
+       rfeia   sp!
+
+
+/*
+ * supervisor call (SVC) kernel entry point
+ */
+ENTRY(svc_entry)
+       srsdb   #MODE_SVC!
+       save_process_ctx
+
+       /* save the pointer to the current process */
+       ldr     fp, [sp]
+
+       cmp     r3, #KERVEC
+       beq     kernel_call_entry
+       cmp     r3, #IPCVEC
+       beq     ipc_entry
+       b       invalid_svc
+
+/*
+ * kernel call is only from a process to kernel
+ */
+ENTRY(kernel_call_entry)
+       /*
+        * pass the syscall arguments from userspace to the handler.
+        * save_process_ctx() does not clobber these registers, they are still
+        * set as the userspace has set them
+        */
+       push    {fp}    /* save caller proc ptr */
+       push    {r0}    /* save msg ptr so it's not clobbered */
+
+       /* stop user process cycles */
+       mov     r0, fp  /* first param: caller proc ptr */
+       mov     fp, #0  /* for stack trace */
+       bl      _C_LABEL(context_stop)
+
+       pop     {r0} /* first param: msg ptr */
+       pop     {r1} /* second param: caller proc ptr */
+       bl      _C_LABEL(kernel_call)
+
+       b       _C_LABEL(switch_to_user)
+
+/*
+ * IPC is only from a process to kernel
+ */
+ENTRY(ipc_entry)
+       /*
+        * pass the syscall arguments from userspace to the handler.
+        * save_process_ctx() does not clobber these registers, they are still
+        * set as the userspace have set them
+        */
+       push    {fp}    /* save caller proc ptr */
+       push    {r0-r2} /* save regs so they're not clobbered */
+
+       /* stop user process cycles */
+       mov     r0, fp  /* first param: caller proc ptr */
+       mov     fp, #0  /* for stack trace */
+       bl      _C_LABEL(context_stop)
+
+       pop     {r0-r2} /* restore regs */
+       bl      _C_LABEL(do_ipc)
+
+       /* restore the current process pointer and save the return value */
+       pop     {fp}    /* caller proc ptr */
+       str     r0, [fp, #REG0]
+
+       b       _C_LABEL(switch_to_user)
+
+ENTRY(invalid_svc)
+       b .
+
+ENTRY(restore_user_context)
+       /* sp holds the proc ptr */
+       mov sp, r0
+
+       /* Set SPSR and LR for return */
+       ldr r0, [sp, #PSREG]
+       msr spsr_fsxc, r0
+       ldr lr, [sp, #PCREG]
+
+       /* Restore user-mode registers from proc struct */
+       ldm sp, {r0-r14}^
+
+       ldr sp, =_C_LABEL(svc_stack)
+       ldr sp, [sp]
+
+       /* To user mode! */
+       movs pc, lr
+
+/*===========================================================================*/
+/*                             data                                         */
+/*===========================================================================*/
+
+.data
+.short 0x526F  /* this must be the first data entry (magic #) */
+.bss
+.data
+.balign 4
+k_initial_stack:
+.space K_STACK_SIZE
+LABEL(__k_unpaged_k_initial_stktop)
+
+/*
+ * the kernel stack
+ */
+k_boot_stack:
+.space K_STACK_SIZE    /* kernel stack */ /* FIXME use macro here */
+LABEL(k_boot_stktop)   /* top of kernel stack */
+
+.balign K_STACK_SIZE
+LABEL(k_stacks_start)
+
+/* two pages for each stack, one for data, other as a sandbox */
+.space 2 * (K_STACK_SIZE * CONFIG_MAX_CPUS)
+
+LABEL(k_stacks_end)
+
+/* top of kernel stack */
diff --git a/kernel/arch/arm/omap_intr.c b/kernel/arch/arm/omap_intr.c
new file mode 100644 (file)
index 0000000..14c0b32
--- /dev/null
@@ -0,0 +1,19 @@
+#include <sys/types.h>
+#include <machine/cpu.h>
+#include <io.h>
+#include "omap_intr.h"
+
+int intr_init(const int auto_eoi)
+{
+    return 0;
+}
+
+void omap3_irq_unmask(int irq)
+{
+    mmio_write(OMAP3_INTR_MIR_CLEAR(irq >> 5), 1 << (irq & 0x1f));
+}
+
+void omap3_irq_mask(const int irq)
+{
+    mmio_write(OMAP3_INTR_MIR_SET(irq >> 5), 1 << (irq & 0x1f));
+}
diff --git a/kernel/arch/arm/omap_intr.h b/kernel/arch/arm/omap_intr.h
new file mode 100644 (file)
index 0000000..98f6e5c
--- /dev/null
@@ -0,0 +1,146 @@
+#ifndef _OMAP_INTR_H
+#define _OMAP_INTR_H
+
+/* Interrupt controller memory map */
+#define OMAP3_INTR_BASE 0x48200000 /* INTCPS physical address */
+
+/* Interrupt controller registers */
+#define OMAP3_INTCPS_REVISION     0x000 /* IP revision code */
+#define OMAP3_INTCPS_SYSCONFIG    0x010 /* Controls params */
+#define OMAP3_INTCPS_SYSSTATUS    0x014 /* Status */
+#define OMAP3_INTCPS_SIR_IRQ      0x040 /* Active IRQ number */
+#define OMAP3_INTCPS_SIR_FIQ      0x044 /* Active FIQ number */
+#define OMAP3_INTCPS_CONTROL      0x048 /* New int agreement bits */
+#define OMAP3_INTCPS_PROTECTION   0x04C /* Protection for other regs */
+#define OMAP3_INTCPS_IDLE         0x050 /* Clock auto-idle/gating */
+#define OMAP3_INTCPS_IRQ_PRIORITY 0x060 /* Active IRQ priority level */
+#define OMAP3_INTCPS_FIQ_PRIORITY 0x064 /* Active FIQ priority level */
+#define OMAP3_INTCPS_THRESHOLD    0x068 /* Priority threshold */
+#define OMAP3_INTCPS_ITR0         0x080 /* Raw pre-masking interrupt status */
+#define OMAP3_INTCPS_MIR0         0x084 /* Interrupt mask */
+#define OMAP3_INTCPS_MIR_CLEAR0   0x088 /* Clear interrupt mask bits */
+#define OMAP3_INTCPS_MIR_SET0     0x08C /* Set interrupt mask bits */
+#define OMAP3_INTCPS_ISR_SET0     0x090 /* Set software int bits */
+#define OMAP3_INTCPS_ISR_CLEAR0   0x094 /* Clear software int bits */
+#define OMAP3_INTCPS_PENDING_IRQ0 0x098 /* IRQ status post-masking */
+#define OMAP3_INTCPS_PENDING_FIQ0 0x09C /* FIQ status post-masking */
+#define OMAP3_INTCPS_ILR0         0x100 /* Priority for interrupts */
+
+
+#define OMAP3_INTR_REVISION     (OMAP3_INTR_BASE + OMAP3_INTCPS_REVISION)
+#define OMAP3_INTR_SYSCONFIG    (OMAP3_INTR_BASE + OMAP3_INTCPS_SYSCONFIG)
+#define OMAP3_INTR_SYSSTATUS    (OMAP3_INTR_BASE + OMAP3_INTCPS_SYSSTATUS)
+#define OMAP3_INTR_SIR_IRQ      (OMAP3_INTR_BASE + OMAP3_INTCPS_SIR_IRQ)
+#define OMAP3_INTR_SIR_FIQ      (OMAP3_INTR_BASE + OMAP3_INTCPS_SIR_FIQ)
+#define OMAP3_INTR_CONTROL      (OMAP3_INTR_BASE + OMAP3_INTCPS_CONTROL)
+#define OMAP3_INTR_PROTECTION   (OMAP3_INTR_BASE + OMAP3_INTCPS_PROTECTION)
+#define OMAP3_INTR_IDLE         (OMAP3_INTR_BASE + OMAP3_INTCPS_IDLE)
+#define OMAP3_INTR_IRQ_PRIORITY (OMAP3_INTR_BASE + OMAP3_INTCPS_IRQ_PRIORITY)
+#define OMAP3_INTR_FIQ_PRIORITY (OMAP3_INTR_BASE + OMAP3_INTCPS_FIQ_PRIORITY)
+#define OMAP3_INTR_THRESHOLD    (OMAP3_INTR_BASE + OMAP3_INTCPS_THRESHOLD)
+
+#define OMAP3_INTR_ITR(n) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_ITR0 + 0x20 * (n))
+#define OMAP3_INTR_MIR(n) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_MIR0 + 0x20 * (n))
+#define OMAP3_INTR_MIR_CLEAR(n)        \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_MIR_CLEAR0 + 0x20 * (n))
+#define OMAP3_INTR_MIR_SET(n) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_MIR_SET0 + 0x20 * (n))
+#define OMAP3_INTR_ISR_SET(n) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_ISR_SET0 + 0x20 * (n))
+#define OMAP3_INTR_ISR_CLEAR(n) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_ISR_CLEAR0 + 0x20 * (n))
+#define OMAP3_INTR_PENDING_IRQ(n) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_PENDING_IRQ0 + 0x20 * (n))
+#define OMAP3_INTR_PENDING_FIQ(n) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_PENDING_FIQ0 + 0x20 * (n))
+#define OMAP3_INTR_ILR(m) \
+    (OMAP3_INTR_BASE + OMAP3_INTCPS_ILR0 + 0x4 * (m))
+
+#define OMAP3_INTR_ACTIVEIRQ_MASK 0x7f /* Active IRQ mask for SIR_IRQ */
+#define OMAP3_INTR_NEWIRQAGR      0x1  /* New IRQ Generation */
+
+#define OMAP3_NR_IRQ_VECTORS    96
+
+/* Interrupt mappings */
+#define OMAP3_MCBSP2_ST_IRQ  4  /* Sidestone McBSP2 overflow */
+#define OMAP3_MCBSP3_ST_IRQ  5  /* Sidestone McBSP3 overflow */
+#define OMAP3_SYS_NIRQ       7  /* External source (active low) */
+#define OMAP3_SMX_DBG_IRQ    9  /* L3 interconnect error for debug */
+#define OMAP3_SMX_APP_IRQ   10  /* L3 interconnect error for application */
+#define OMAP3_PRCM_IRQ      11  /* PRCM module */
+#define OMAP3_SDMA0_IRQ     12  /* System DMA request 0 */
+#define OMAP3_SDMA1_IRQ     13  /* System DMA request 1 */
+#define OMAP3_SDMA2_IRQ     14  /* System DMA request 2 */
+#define OMAP3_SDMA3_IRQ     15  /* System DMA request 3 */
+#define OMAP3_MCBSP1_IRQ    16  /* McBSP module 1 */
+#define OMAP3_MCBSP2_IRQ    17  /* McBSP module 2 */
+#define OMAP3_GPMC_IRQ      20  /* General-purpose memory controller */
+#define OMAP3_SGX_IRQ       21  /* 2D/3D graphics module */
+#define OMAP3_MCBSP3_IRQ    22  /* McBSP module 3 */
+#define OMAP3_MCBSP4_IRQ    23  /* McBSP module 4 */
+#define OMAP3_CAM0_IRQ      24  /* Camera interface request 0 */
+#define OMAP3_DSS_IRQ       25  /* Display subsystem module */
+#define OMAP3_MAIL_U0_IRQ   26  /* Mailbox user 0 request */
+#define OMAP3_MCBSP5_IRQ    27  /* McBSP module 5 */
+#define OMAP3_IVA2_MMU_IRQ  28  /* IVA2 MMU */
+#define OMAP3_GPIO1_IRQ     29  /* GPIO module 1 */
+#define OMAP3_GPIO2_IRQ     30  /* GPIO module 2 */
+#define OMAP3_GPIO3_IRQ     31  /* GPIO module 3 */
+#define OMAP3_GPIO4_IRQ     32  /* GPIO module 4 */
+#define OMAP3_GPIO5_IRQ     33  /* GPIO module 5 */
+#define OMAP3_GPIO6_IRQ     34  /* GPIO module 6 */
+#define OMAP3_WDT3_IRQ      36  /* Watchdog timer module 3 overflow */
+#define OMAP3_GPT1_IRQ      37  /* General-purpose timer module 1 */
+#define OMAP3_GPT2_IRQ      38  /* General-purpose timer module 2 */
+#define OMAP3_GPT3_IRQ      39  /* General-purpose timer module 3 */
+#define OMAP3_GPT4_IRQ      40  /* General-purpose timer module 4 */
+#define OMAP3_GPT5_IRQ      41  /* General-purpose timer module 5 */
+#define OMAP3_GPT6_IRQ      42  /* General-purpose timer module 6 */
+#define OMAP3_GPT7_IRQ      43  /* General-purpose timer module 7 */
+#define OMAP3_GPT8_IRQ      44  /* General-purpose timer module 8 */
+#define OMAP3_GPT9_IRQ      45  /* General-purpose timer module 9 */
+#define OMAP3_GPT10_IRQ     46  /* General-purpose timer module 10 */
+#define OMAP3_GPT11_IRQ     47  /* General-purpose timer module 11 */
+#define OMAP3_SPI4_IRQ      48  /* McSPI module 4 */
+#define OMAP3_MCBSP4_TX_IRQ 54  /* McBSP module 4 transmit */
+#define OMAP3_MCBSP4_RX_IRQ 55  /* McBSP module 4 receive */
+#define OMAP3_I2C1_IRQ      56  /* I2C module 1 */
+#define OMAP3_I2C2_IRQ      57  /* I2C module 2 */
+#define OMAP3_HDQ_IRQ       58  /* HDQ/1-Wire */
+#define OMAP3_MCBSP1_TX_IRQ 59  /* McBSP module 1 transmit */
+#define OMAP3_MCBSP1_RX_IRQ 60  /* McBSP module 1 receive */
+#define OMAP3_I2C3_IRQ      61  /* I2C module 3 */
+#define OMAP3_MCBSP2_TX_IRQ 62  /* McBSP module 2 transmit */
+#define OMAP3_MCBSP2_RX_IRQ 63  /* McBSP module 2 receive */
+#define OMAP3_SPI1_IRQ      65  /* McSPI module 1 */
+#define OMAP3_SPI2_IRQ      66  /* McSPI module 2 */
+#define OMAP3_UART1_IRQ     72  /* UART module 1 */
+#define OMAP3_UART2_IRQ     73  /* UART module 2 */
+#define OMAP3_PBIAS_IRQ     75  /* Merged interrupt for PBIASlite 1/2 */
+#define OMAP3_OHCI_IRQ      76  /* OHCI HSUSB MP Host Interrupt */
+#define OMAP3_EHCI_IRQ      77  /* EHCI HSUSB MP Host Interrupt */
+#define OMAP3_TLL_IRQ       78  /* HSUSB MP TLL Interrupt */
+#define OMAP3_MCBSP5_TX_IRQ 81  /* McBSP module 5 transmit */
+#define OMAP3_MCBSP5_RX_IRQ 82  /* McBSP module 5 receive */
+#define OMAP3_MMC1_IRQ      83  /* MMC/SD module 1 */
+#define OMAP3_MMC2_IRQ      86  /* MMC/SD module 2 */
+#define OMAP3_ICR_IRQ       87  /* MPU ICR */
+#define OMAP3_D2DFRINT_IRQ  88  /* 3G coproc (in stacked modem config) */
+#define OMAP3_MCBSP3_TX_IRQ 89  /* McBSP module 3 transmit */
+#define OMAP3_MCBSP3_RX_IRQ 90  /* McBSP module 3 receive */
+#define OMAP3_SPI3_IRQ      91  /* McSPI module 3 */
+#define OMAP3_HSUSB_MC_IRQ  92  /* High-speed USB OTG */
+#define OMAP3_HSUSB_DMA_IRQ 93  /* High-speed USB OTG DMA */
+#define OMAP3_MMC3_IRQ      94  /* MMC/SD module 3 */
+
+
+#ifndef __ASSEMBLY__
+
+void omap3_irq_unmask(int irq);
+void omap3_irq_mask(int irq);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _OMAP_INTR_H */
diff --git a/kernel/arch/arm/omap_serial.c b/kernel/arch/arm/omap_serial.c
new file mode 100644 (file)
index 0000000..30e744e
--- /dev/null
@@ -0,0 +1,17 @@
+#include <sys/types.h>
+#include <machine/cpu.h>
+#include <io.h>
+#include "omap_serial.h"
+
+void omap3_ser_putc(char c)
+{
+    int i;
+
+    /* Wait until FIFO's empty */
+    for (i = 0; i < 100000; i++)
+       if (mmio_read(OMAP3_UART3_LSR) & OMAP3_LSR_TX_FIFO_E)
+           break;
+
+    /* Write character */
+    mmio_write(OMAP3_UART3_THR, c);
+}
diff --git a/kernel/arch/arm/omap_serial.h b/kernel/arch/arm/omap_serial.h
new file mode 100644 (file)
index 0000000..278d028
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _OMAP_SERIAL_H
+#define _OMAP_SERIAL_H
+
+/* UART register map */
+#define OMAP3_UART1_BASE 0x4806A000 /* UART1 physical address */
+#define OMAP3_UART2_BASE 0x4806C000 /* UART2 physical address */
+#define OMAP3_UART3_BASE 0x49020000 /* UART3 physical address */
+
+/* UART registers */
+#define OMAP3_THR 0x000 /* Transmit holding register */
+#define OMAP3_LSR 0x014 /* Line status register */
+#define OMAP3_SSR 0x044 /* Supplementary status register */
+
+/* Line status register fields */
+#define OMAP3_LSR_TX_FIFO_E    (1 << 5) /* Transmit FIFO empty */
+
+/* Supplementary status register fields */
+#define OMAP3_SSR_TX_FIFO_FULL (1 << 0) /* Transmit FIFO full */
+
+#define OMAP3_UART3_THR (OMAP3_UART3_BASE + OMAP3_THR)
+#define OMAP3_UART3_LSR (OMAP3_UART3_BASE + OMAP3_LSR)
+#define OMAP3_UART3_SSR (OMAP3_UART3_BASE + OMAP3_SSR)
+
+#ifndef __ASSEMBLY__
+
+void omap3_ser_putc(char c);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _OMAP_SERIAL_H */
diff --git a/kernel/arch/arm/omap_timer.c b/kernel/arch/arm/omap_timer.c
new file mode 100644 (file)
index 0000000..20ac461
--- /dev/null
@@ -0,0 +1,66 @@
+#include "kernel/kernel.h"
+#include "kernel/clock.h"
+#include <sys/types.h>
+#include <machine/cpu.h>
+#include <io.h>
+#include "arch_proto.h"
+#include "omap_timer.h"
+#include "omap_intr.h"
+
+static irq_hook_t omap3_timer_hook;            /* interrupt handler hook */
+
+int omap3_register_timer_handler(const irq_handler_t handler)
+{
+       /* Initialize the CLOCK's interrupt hook. */
+       omap3_timer_hook.proc_nr_e = NONE;
+       omap3_timer_hook.irq = OMAP3_GPT1_IRQ;
+
+       put_irq_handler(&omap3_timer_hook, OMAP3_GPT1_IRQ, handler);
+
+       return 0;
+}
+
+void omap3_timer_init(unsigned freq)
+{
+    /* Stop timer */
+    mmio_clear(OMAP3_GPTIMER1_TCLR, OMAP3_TCLR_ST);
+
+    /* Use 32 KHz clock source for GPTIMER1 */
+    mmio_clear(OMAP3_CM_CLKSEL_WKUP, OMAP3_CLKSEL_GPT1);
+
+    /* Use 1-ms tick mode for GPTIMER1 */
+    mmio_write(OMAP3_GPTIMER1_TPIR, 232000);
+    mmio_write(OMAP3_GPTIMER1_TNIR, -768000);
+    mmio_write(OMAP3_GPTIMER1_TLDR, 0xffffffe0);
+    mmio_write(OMAP3_GPTIMER1_TCRR, 0xffffffe0);
+
+    /* Set frequency */
+    mmio_write(OMAP3_GPTIMER1_TOWR, TIMER_COUNT(freq));
+
+    /* Set up overflow interrupt */
+    mmio_write(OMAP3_GPTIMER1_TISR, ~0);
+    mmio_write(OMAP3_GPTIMER1_TIER, OMAP3_TIER_OVF_IT_ENA);
+    omap3_irq_unmask(OMAP3_GPT1_IRQ);
+
+    /* Start timer */
+    mmio_set(OMAP3_GPTIMER1_TCLR,
+            OMAP3_TCLR_OVF_TRG|OMAP3_TCLR_AR|OMAP3_TCLR_ST);
+}
+
+void omap3_timer_stop()
+{
+    mmio_clear(OMAP3_GPTIMER1_TCLR, OMAP3_TCLR_ST);
+}
+
+static u64_t tsc;
+void omap3_timer_int_handler()
+{
+    /* Clear the interrupt */
+    mmio_write(OMAP3_GPTIMER1_TISR, ~0);
+    tsc++;
+}
+
+void read_tsc_64(u64_t *t)
+{
+    *t = tsc;
+}
diff --git a/kernel/arch/arm/omap_timer.h b/kernel/arch/arm/omap_timer.h
new file mode 100644 (file)
index 0000000..5317b0e
--- /dev/null
@@ -0,0 +1,95 @@
+#ifndef _OMAP_TIMER_H
+#define _OMAP_TIMER_H
+
+/* General-purpose timer register map */
+#define OMAP3_GPTIMER1_BASE  0x48318000 /* GPTIMER1 physical address */
+#define OMAP3_GPTIMER2_BASE  0x49032000 /* GPTIMER2 physical address */
+#define OMAP3_GPTIMER3_BASE  0x49034000 /* GPTIMER3 physical address */
+#define OMAP3_GPTIMER4_BASE  0x49036000 /* GPTIMER4 physical address */
+#define OMAP3_GPTIMER5_BASE  0x49038000 /* GPTIMER5 physical address */
+#define OMAP3_GPTIMER6_BASE  0x4903A000 /* GPTIMER6 physical address */
+#define OMAP3_GPTIMER7_BASE  0x4903C000 /* GPTIMER7 physical address */
+#define OMAP3_GPTIMER8_BASE  0x4903E000 /* GPTIMER8 physical address */
+#define OMAP3_GPTIMER9_BASE  0x49040000 /* GPTIMER9 physical address */
+#define OMAP3_GPTIMER10_BASE 0x48086000 /* GPTIMER10 physical address */
+#define OMAP3_GPTIMER11_BASE 0x48088000 /* GPTIMER11 physical address */
+
+/* General-purpose timer registers */
+#define OMAP3_TIDR      0x000 /* IP revision code */
+#define OMAP3_TIOCP_CFG 0x010 /* Controls params for GP timer L4 interface */
+#define OMAP3_TISTAT    0x014 /* Status (excl. interrupt status) */
+#define OMAP3_TISR      0x018 /* Pending interrupt status */
+#define OMAP3_TIER      0x01C /* Interrupt enable */
+#define OMAP3_TWER      0x020 /* Wakeup enable */
+#define OMAP3_TCLR      0x024 /* Controls optional features */
+#define OMAP3_TCRR      0x028 /* Internal counter value */
+#define OMAP3_TLDR      0x02C /* Timer load value */
+#define OMAP3_TTGR      0x030 /* Triggers counter reload */
+#define OMAP3_TWPS      0x034 /* Indicates if Write-Posted pending */
+#define OMAP3_TMAR      0x038 /* Value to be compared with counter */
+#define OMAP3_TCAR1     0x03C /* First captured value of counter register */
+#define OMAP3_TSICR     0x040 /* Control posted mode and functional SW reset */
+#define OMAP3_TCAR2     0x044 /* Second captured value of counter register */
+#define OMAP3_TPIR      0x048 /* Positive increment (1 ms tick) */
+#define OMAP3_TNIR      0x04C /* Negative increment (1 ms tick) */
+#define OMAP3_TCVR      0x050 /* Defines TCRR is sub/over-period (1 ms tick) */
+#define OMAP3_TOCR      0x054 /* Masks tick interrupt */
+#define OMAP3_TOWR      0x058 /* Number of masked overflow interrupts */
+
+/* Interrupt status register fields */
+#define OMAP3_TISR_MAT_IT_FLAG  (1 << 0) /* Pending match interrupt status */
+#define OMAP3_TISR_OVF_IT_FLAG  (1 << 1) /* Pending overflow interrupt status */
+#define OMAP3_TISR_TCAR_IT_FLAG (1 << 2) /* Pending capture interrupt status */
+
+/* Interrupt enable register fields */
+#define OMAP3_TIER_MAT_IT_ENA  (1 << 0) /* Enable match interrupt */
+#define OMAP3_TIER_OVF_IT_ENA  (1 << 1) /* Enable overflow interrupt */
+#define OMAP3_TIER_TCAR_IT_ENA (1 << 2) /* Enable capture interrupt */
+
+/* Timer control fields */
+#define OMAP3_TCLR_ST       (1 << 0)  /* Start/stop timer */
+#define OMAP3_TCLR_AR       (1 << 1)  /* Autoreload or one-shot mode */
+#define OMAP3_TCLR_OVF_TRG  (1 << 10) /* Overflow trigger */
+
+#define OMAP3_GPTIMER1_TIDR      (OMAP3_GPTIMER1_BASE + OMAP3_TIDR)
+#define OMAP3_GPTIMER1_TIOCP_CFG (OMAP3_GPTIMER1_BASE + OMAP3_TIOCP_CFG)
+#define OMAP3_GPTIMER1_TISTAT    (OMAP3_GPTIMER1_BASE + OMAP3_TISTAT)
+#define OMAP3_GPTIMER1_TISR      (OMAP3_GPTIMER1_BASE + OMAP3_TISR)
+#define OMAP3_GPTIMER1_TIER      (OMAP3_GPTIMER1_BASE + OMAP3_TIER)
+#define OMAP3_GPTIMER1_TWER      (OMAP3_GPTIMER1_BASE + OMAP3_TWER)
+#define OMAP3_GPTIMER1_TCLR      (OMAP3_GPTIMER1_BASE + OMAP3_TCLR)
+#define OMAP3_GPTIMER1_TCRR      (OMAP3_GPTIMER1_BASE + OMAP3_TCRR)
+#define OMAP3_GPTIMER1_TLDR      (OMAP3_GPTIMER1_BASE + OMAP3_TLDR)
+#define OMAP3_GPTIMER1_TTGR      (OMAP3_GPTIMER1_BASE + OMAP3_TTGR)
+#define OMAP3_GPTIMER1_TWPS      (OMAP3_GPTIMER1_BASE + OMAP3_TWPS)
+#define OMAP3_GPTIMER1_TMAR      (OMAP3_GPTIMER1_BASE + OMAP3_TMAR)
+#define OMAP3_GPTIMER1_TCAR1     (OMAP3_GPTIMER1_BASE + OMAP3_TCAR1)
+#define OMAP3_GPTIMER1_TSICR     (OMAP3_GPTIMER1_BASE + OMAP3_TSICR)
+#define OMAP3_GPTIMER1_TCAR2     (OMAP3_GPTIMER1_BASE + OMAP3_TCAR2)
+#define OMAP3_GPTIMER1_TPIR      (OMAP3_GPTIMER1_BASE + OMAP3_TPIR)
+#define OMAP3_GPTIMER1_TNIR      (OMAP3_GPTIMER1_BASE + OMAP3_TNIR)
+#define OMAP3_GPTIMER1_TCVR      (OMAP3_GPTIMER1_BASE + OMAP3_TCVR)
+#define OMAP3_GPTIMER1_TOCR      (OMAP3_GPTIMER1_BASE + OMAP3_TOCR)
+#define OMAP3_GPTIMER1_TOWR      (OMAP3_GPTIMER1_BASE + OMAP3_TOWR)
+
+#define OMAP3_CM_CLKSEL_WKUP 0x48004c40 /* source clock selection */
+#define OMAP3_CLKSEL_GPT1    (1 << 0)   /* Selects GPTIMER 1 source
+                                        * clock:
+                                        *
+                                        *  0: use 32KHz clock
+                                        *  1: sys clock)
+                                        */
+
+#define TIMER_FREQ  1000    /* clock frequency for OMAP timer (1ms) */
+#define TIMER_COUNT(freq) (TIMER_FREQ/(freq)) /* initial value for counter*/
+
+#ifndef __ASSEMBLY__
+
+void omap3_timer_init(unsigned freq);
+void omap3_timer_stop(void);
+int omap3_register_timer_handler(const irq_handler_t handler);
+void omap3_timer_int_handler(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _OMAP_TIMER_H */
diff --git a/kernel/arch/arm/pg_utils.c b/kernel/arch/arm/pg_utils.c
new file mode 100644 (file)
index 0000000..15877d4
--- /dev/null
@@ -0,0 +1,292 @@
+
+#include <minix/cpufeature.h>
+
+#include <minix/type.h>
+#include <libexec.h>
+#include <assert.h>
+#include "kernel.h"
+#include "arch_proto.h"
+#include <machine/cpu.h>
+
+#include <string.h>
+#include <libexec.h>
+#include <minix/type.h>
+
+/* These are set/computed in kernel.lds. */
+extern char _kern_vir_base, _kern_phys_base, _kern_size;
+
+/* Retrieve the absolute values to something we can use. */
+static phys_bytes kern_vir_start = (phys_bytes) &_kern_vir_base;
+static phys_bytes kern_phys_start = (phys_bytes) &_kern_phys_base;
+static phys_bytes kern_kernlen = (phys_bytes) &_kern_size;
+
+/* page directory we can use to map things */
+static u32_t pagedir[4096]  __aligned(16384);
+
+void print_memmap(kinfo_t *cbi)
+{
+        int m;
+        assert(cbi->mmap_size < MAXMEMMAP);
+        for(m = 0; m < cbi->mmap_size; m++) {
+               phys_bytes addr = cbi->memmap[m].addr, endit = cbi->memmap[m].addr + cbi->memmap[m].len;
+                printf("%08lx-%08lx ",addr, endit);
+        }
+        printf("\nsize %08lx\n", cbi->mmap_size);
+}
+
+void cut_memmap(kinfo_t *cbi, phys_bytes start, phys_bytes end)
+{
+        int m;
+        phys_bytes o;
+
+        if((o=start % ARM_PAGE_SIZE))
+                start -= o;
+        if((o=end % ARM_PAGE_SIZE))
+                end += ARM_PAGE_SIZE - o;
+
+       assert(kernel_may_alloc);
+
+        for(m = 0; m < cbi->mmap_size; m++) {
+                phys_bytes substart = start, subend = end;
+                phys_bytes memaddr = cbi->memmap[m].addr,
+                        memend = cbi->memmap[m].addr + cbi->memmap[m].len;
+
+                /* adjust cut range to be a subset of the free memory */
+                if(substart < memaddr) substart = memaddr;
+                if(subend > memend) subend = memend;
+                if(substart >= subend) continue;
+
+                /* if there is any overlap, forget this one and add
+                 * 1-2 subranges back
+                 */
+                cbi->memmap[m].addr = cbi->memmap[m].len = 0;
+                if(substart > memaddr)
+                        add_memmap(cbi, memaddr, substart-memaddr);
+                if(subend < memend)
+                        add_memmap(cbi, subend, memend-subend);
+        }
+}
+
+phys_bytes alloc_lowest(kinfo_t *cbi, phys_bytes len)
+{
+       /* Allocate the lowest physical page we have. */
+       int m;
+#define EMPTY 0xffffffff
+       phys_bytes lowest = EMPTY;
+       assert(len > 0);
+       len = roundup(len, ARM_PAGE_SIZE);
+
+       assert(kernel_may_alloc);
+
+       for(m = 0; m < cbi->mmap_size; m++) {
+               if(cbi->memmap[m].len < len) continue;
+               if(cbi->memmap[m].addr < lowest) lowest = cbi->memmap[m].addr;
+       }
+       assert(lowest != EMPTY);
+       cut_memmap(cbi, lowest, len);
+       return lowest;
+}
+
+void add_memmap(kinfo_t *cbi, u64_t addr, u64_t len)
+{
+        int m;
+#define LIMIT 0xFFFFF000
+        /* Truncate available memory at 4GB as the rest of minix
+         * currently can't deal with any bigger.
+         */
+        if(addr > LIMIT) return;
+        if(addr + len > LIMIT) {
+                len -= (addr + len - LIMIT);
+        }
+        assert(cbi->mmap_size < MAXMEMMAP);
+        if(len == 0) return;
+       addr = roundup(addr, ARM_PAGE_SIZE);
+       len = rounddown(len, ARM_PAGE_SIZE);
+
+       assert(kernel_may_alloc);
+
+        for(m = 0; m < MAXMEMMAP; m++) {
+               phys_bytes highmark;
+                if(cbi->memmap[m].len) continue;
+                cbi->memmap[m].addr = addr;
+                cbi->memmap[m].len = len;
+                cbi->memmap[m].type = MULTIBOOT_MEMORY_AVAILABLE;
+                if(m >= cbi->mmap_size)
+                        cbi->mmap_size = m+1;
+               highmark = addr + len;
+               if(highmark > cbi->mem_high_phys) {
+                       cbi->mem_high_phys = highmark;
+               }
+
+                return;
+        }
+
+        panic("no available memmap slot");
+}
+
+u32_t *alloc_pagetable(phys_bytes *ph)
+{
+       u32_t *ret;
+#define PG_PAGETABLES 24
+       static u32_t pagetables[PG_PAGETABLES][256]  __aligned(1024);
+       static int pt_inuse = 0;
+       if(pt_inuse >= PG_PAGETABLES) panic("no more pagetables");
+       assert(sizeof(pagetables[pt_inuse]) == 1024);
+       ret = pagetables[pt_inuse++];
+       *ph = vir2phys(ret);
+       return ret;
+}
+
+#define PAGE_KB (ARM_PAGE_SIZE / 1024)
+
+phys_bytes pg_alloc_page(kinfo_t *cbi)
+{
+       int m;
+       multiboot_memory_map_t *mmap;
+
+       assert(kernel_may_alloc);
+
+       for(m = 0; m < cbi->mmap_size; m++) {
+               mmap = &cbi->memmap[m];
+               if(!mmap->len) continue;
+               assert(mmap->len > 0);
+               assert(!(mmap->len % ARM_PAGE_SIZE));
+               assert(!(mmap->addr % ARM_PAGE_SIZE));
+
+               u32_t addr = mmap->addr;
+               mmap->addr += ARM_PAGE_SIZE;
+               mmap->len  -= ARM_PAGE_SIZE;
+
+               return addr;
+       }
+
+       panic("can't find free memory");
+}
+
+void pg_identity(kinfo_t *cbi)
+{
+       int i;
+       phys_bytes phys;
+
+       /* We map memory that does not correspond to physical memory
+        * as non-cacheable. Make sure we know what it is.
+        */
+       assert(cbi->mem_high_phys);
+
+        /* Set up an identity mapping page directory */
+        for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
+               u32_t flags = ARM_VM_SECTION |
+                   ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER;
+                phys = i * ARM_BIG_PAGE_SIZE;
+                pagedir[i] =  phys | flags;
+        }
+}
+
+int pg_mapkernel(void)
+{
+       int pde;
+       u32_t mapped = 0, kern_phys = kern_phys_start;
+
+        assert(!(kern_vir_start % ARM_BIG_PAGE_SIZE));
+        assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE));
+        pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */
+       while(mapped < kern_kernlen) {
+               pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) |
+                       ARM_VM_SECTION |
+                       ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
+                       ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
+               mapped += ARM_BIG_PAGE_SIZE;
+               kern_phys += ARM_BIG_PAGE_SIZE;
+               pde++;
+       }
+       return pde;     /* free pde */
+}
+
+void vm_enable_paging(void)
+{
+        u32_t sctlr;
+
+       write_ttbcr(0);
+
+       /* Set all Domains to Client */
+       write_dacr(0x55555555);
+
+       /* Enable MMU and access flag */
+       sctlr = read_sctlr();
+       sctlr |= (SCTLR_M);
+       write_sctlr(sctlr);
+}
+
+phys_bytes pg_load()
+{
+       phys_bytes phpagedir = vir2phys(pagedir);
+       refresh_tlb();
+        write_ttbr0(phpagedir);
+       return phpagedir;
+}
+
+void pg_clear(void)
+{
+       memset(pagedir, 0, sizeof(pagedir));
+}
+
+phys_bytes pg_rounddown(phys_bytes b)
+{
+       phys_bytes o;
+       if(!(o = b % ARM_PAGE_SIZE))
+               return b;
+       return b  - o;
+}
+
+void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end,
+       kinfo_t *cbi)
+{
+       static int mapped_pde = -1;
+       static u32_t *pt = NULL;
+       int pde, pte;
+
+       assert(kernel_may_alloc);
+
+       if(phys == PG_ALLOCATEME) {
+               assert(!(vaddr % ARM_PAGE_SIZE));
+       } else  {
+               assert((vaddr % ARM_PAGE_SIZE) == (phys % ARM_PAGE_SIZE));
+               vaddr = pg_rounddown(vaddr);
+               phys = pg_rounddown(phys);
+       }
+       assert(vaddr < kern_vir_start);
+
+       while(vaddr < vaddr_end) {
+               phys_bytes source = phys;
+               assert(!(vaddr % ARM_PAGE_SIZE));
+               if(phys == PG_ALLOCATEME) {
+                       source = pg_alloc_page(cbi);
+               } else {
+                       assert(!(phys % ARM_PAGE_SIZE));
+               }
+               assert(!(source % ARM_PAGE_SIZE));
+               pde = ARM_VM_PDE(vaddr);
+               pte = ARM_VM_PTE(vaddr);
+               if(mapped_pde < pde) {
+                       phys_bytes ph;
+                       pt = alloc_pagetable(&ph);
+                       pagedir[pde] = (ph & ARM_VM_PDE_MASK)
+                           | ARM_VM_PAGEDIR | ARM_VM_PDE_DOMAIN;
+                       mapped_pde = pde;
+               }
+               assert(pt);
+               pt[pte] = (source & ARM_VM_PTE_MASK)
+                           | ARM_VM_PAGETABLE
+                           | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
+                           | ARM_VM_PTE_USER;
+               vaddr += ARM_PAGE_SIZE;
+               if(phys != PG_ALLOCATEME)
+                       phys += ARM_PAGE_SIZE;
+       }
+}
+
+void pg_info(reg_t *pagedir_ph, u32_t **pagedir_v)
+{
+       *pagedir_ph = vir2phys(pagedir);
+       *pagedir_v = pagedir;
+}
diff --git a/kernel/arch/arm/phys_copy.S b/kernel/arch/arm/phys_copy.S
new file mode 100644 (file)
index 0000000..e548f7e
--- /dev/null
@@ -0,0 +1,389 @@
+/*     $NetBSD: memcpy_arm.S,v 1.2 2008/04/28 20:22:52 martin Exp $    */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Neil A. Carson and Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+/*
+ * This is one fun bit of code ...
+ * Some easy listening music is suggested while trying to understand this
+ * code e.g. Iron Maiden
+ *
+ * For anyone attempting to understand it :
+ *
+ * The core code is implemented here with simple stubs for memcpy().
+ *
+ * All local labels are prefixed with Lmemcpy_
+ * Following the prefix a label starting f is used in the forward copy code
+ * while a label using b is used in the backwards copy code
+ * The source and destination addresses determine whether a forward or
+ * backward copy is performed.
+ * Separate bits of code are used to deal with the following situations
+ * for both the forward and backwards copy.
+ * unaligned source address
+ * unaligned destination address
+ * Separate copy routines are used to produce an optimised result for each
+ * of these cases.
+ * The copy code will use LDM/STM instructions to copy up to 32 bytes at
+ * a time where possible.
+ *
+ * Note: r12 (aka ip) can be trashed during the function along with
+ * r0-r3 although r0-r2 have defined uses i.e. src, dest, len through out.
+ * Additional registers are preserved prior to use i.e. r4, r5 & lr
+ *
+ * Apologies for the state of the comments ;-)
+ */
+
+/* For MINIX, we always spill r0, r4, r5, and lr, so we can easily
+ * clean up the stack after a phys_copy fault. NetBSD, in contrast,
+ * spills the minimum number of registers for each path.
+ */
+#if defined(__minix)
+/* LINTSTUB: Func: void *phys_copy(void *src, void *dst, size_t len) */
+ENTRY(phys_copy)
+       /* switch the source and destination registers */
+       eor     r0, r1, r0
+       eor     r1, r0, r1
+       eor     r0, r1, r0
+#else
+/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
+ENTRY(memcpy)
+#endif
+       /* save leaf functions having to store this away */
+#if defined(__minix)
+       stmdb   sp!, {r0, r4, r5, lr}           /* memcpy() returns dest addr */
+#else
+       stmdb   sp!, {r0, lr}           /* memcpy() returns dest addr */
+#endif
+
+       subs    r2, r2, #4
+       blt     .Lmemcpy_l4             /* less than 4 bytes */
+       ands    r12, r0, #3
+       bne     .Lmemcpy_destul         /* oh unaligned destination addr */
+       ands    r12, r1, #3
+       bne     .Lmemcpy_srcul          /* oh unaligned source addr */
+
+.Lmemcpy_t8:
+       /* We have aligned source and destination */
+       subs    r2, r2, #8
+       blt     .Lmemcpy_l12            /* less than 12 bytes (4 from above) */
+       subs    r2, r2, #0x14
+       blt     .Lmemcpy_l32            /* less than 32 bytes (12 from above) */
+#if !defined(__minix)
+       stmdb   sp!, {r4}               /* borrow r4 */
+#endif
+
+       /* blat 32 bytes at a time */
+       /* XXX for really big copies perhaps we should use more registers */
+.Lmemcpy_loop32:
+       ldmia   r1!, {r3, r4, r12, lr}
+       stmia   r0!, {r3, r4, r12, lr}
+       ldmia   r1!, {r3, r4, r12, lr}
+       stmia   r0!, {r3, r4, r12, lr}
+       subs    r2, r2, #0x20
+       bge     .Lmemcpy_loop32
+
+       cmn     r2, #0x10
+       ldmgeia r1!, {r3, r4, r12, lr}  /* blat a remaining 16 bytes */
+       stmgeia r0!, {r3, r4, r12, lr}
+       subge   r2, r2, #0x10
+#if !defined(__minix)
+       ldmia   sp!, {r4}               /* return r4 */
+#endif
+
+.Lmemcpy_l32:
+       adds    r2, r2, #0x14
+
+       /* blat 12 bytes at a time */
+.Lmemcpy_loop12:
+       ldmgeia r1!, {r3, r12, lr}
+       stmgeia r0!, {r3, r12, lr}
+       subges  r2, r2, #0x0c
+       bge     .Lmemcpy_loop12
+
+.Lmemcpy_l12:
+       adds    r2, r2, #8
+       blt     .Lmemcpy_l4
+
+       subs    r2, r2, #4
+       ldrlt   r3, [r1], #4
+       strlt   r3, [r0], #4
+       ldmgeia r1!, {r3, r12}
+       stmgeia r0!, {r3, r12}
+       subge   r2, r2, #4
+
+.Lmemcpy_l4:
+       /* less than 4 bytes to go */
+       adds    r2, r2, #4
+#if defined(__minix)
+       ldmeqia sp!, {r0, r4, r5}
+       moveq   r0, #0
+       ldmeqia sp!, {pc}
+#else
+#ifdef __APCS_26_
+       ldmeqia sp!, {r0, pc}^          /* done */
+#else
+       ldmeqia sp!, {r0, pc}           /* done */
+#endif
+#endif
+       /* copy the crud byte at a time */
+       cmp     r2, #2
+       ldrb    r3, [r1], #1
+       strb    r3, [r0], #1
+       ldrgeb  r3, [r1], #1
+       strgeb  r3, [r0], #1
+       ldrgtb  r3, [r1], #1
+       strgtb  r3, [r0], #1
+#if defined(__minix)
+       ldmia   sp!, {r0, r4, r5}
+       mov     r0, #0
+       ldmia   sp!, {pc}
+#else
+       ldmia   sp!, {r0, pc}
+#endif
+
+       /* erg - unaligned destination */
+.Lmemcpy_destul:
+       rsb     r12, r12, #4
+       cmp     r12, #2
+
+       /* align destination with byte copies */
+       ldrb    r3, [r1], #1
+       strb    r3, [r0], #1
+       ldrgeb  r3, [r1], #1
+       strgeb  r3, [r0], #1
+       ldrgtb  r3, [r1], #1
+       strgtb  r3, [r0], #1
+       subs    r2, r2, r12
+       blt     .Lmemcpy_l4             /* less the 4 bytes */
+
+       ands    r12, r1, #3
+       beq     .Lmemcpy_t8             /* we have an aligned source */
+
+       /* erg - unaligned source */
+       /* This is where it gets nasty ... */
+.Lmemcpy_srcul:
+       bic     r1, r1, #3
+       ldr     lr, [r1], #4
+       cmp     r12, #2
+       bgt     .Lmemcpy_srcul3
+       beq     .Lmemcpy_srcul2
+       cmp     r2, #0x0c
+       blt     .Lmemcpy_srcul1loop4
+       sub     r2, r2, #0x0c
+#if !defined(__minix)
+       stmdb   sp!, {r4, r5}
+#endif
+
+.Lmemcpy_srcul1loop16:
+#ifdef __ARMEB__
+       mov     r3, lr, lsl #8
+#else
+       mov     r3, lr, lsr #8
+#endif
+       ldmia   r1!, {r4, r5, r12, lr}
+#ifdef __ARMEB__
+       orr     r3, r3, r4, lsr #24
+       mov     r4, r4, lsl #8
+       orr     r4, r4, r5, lsr #24
+       mov     r5, r5, lsl #8
+       orr     r5, r5, r12, lsr #24
+       mov     r12, r12, lsl #8
+       orr     r12, r12, lr, lsr #24
+#else
+       orr     r3, r3, r4, lsl #24
+       mov     r4, r4, lsr #8
+       orr     r4, r4, r5, lsl #24
+       mov     r5, r5, lsr #8
+       orr     r5, r5, r12, lsl #24
+       mov     r12, r12, lsr #8
+       orr     r12, r12, lr, lsl #24
+#endif
+       stmia   r0!, {r3-r5, r12}
+       subs    r2, r2, #0x10
+       bge     .Lmemcpy_srcul1loop16
+#if !defined(__minix)
+       ldmia   sp!, {r4, r5}
+#endif
+       adds    r2, r2, #0x0c
+       blt     .Lmemcpy_srcul1l4
+
+.Lmemcpy_srcul1loop4:
+#ifdef __ARMEB__
+       mov     r12, lr, lsl #8
+#else
+       mov     r12, lr, lsr #8
+#endif
+       ldr     lr, [r1], #4
+#ifdef __ARMEB__
+       orr     r12, r12, lr, lsr #24
+#else
+       orr     r12, r12, lr, lsl #24
+#endif
+       str     r12, [r0], #4
+       subs    r2, r2, #4
+       bge     .Lmemcpy_srcul1loop4
+
+.Lmemcpy_srcul1l4:
+       sub     r1, r1, #3
+       b       .Lmemcpy_l4
+
+.Lmemcpy_srcul2:
+       cmp     r2, #0x0c
+       blt     .Lmemcpy_srcul2loop4
+       sub     r2, r2, #0x0c
+#if !defined(__minix)
+       stmdb   sp!, {r4, r5}
+#endif
+
+.Lmemcpy_srcul2loop16:
+#ifdef __ARMEB__
+       mov     r3, lr, lsl #16
+#else
+       mov     r3, lr, lsr #16
+#endif
+       ldmia   r1!, {r4, r5, r12, lr}
+#ifdef __ARMEB__
+       orr     r3, r3, r4, lsr #16
+       mov     r4, r4, lsl #16
+       orr     r4, r4, r5, lsr #16
+       mov     r5, r5, lsl #16
+       orr     r5, r5, r12, lsr #16
+       mov     r12, r12, lsl #16
+       orr     r12, r12, lr, lsr #16
+#else
+       orr     r3, r3, r4, lsl #16
+       mov     r4, r4, lsr #16
+       orr     r4, r4, r5, lsl #16
+       mov     r5, r5, lsr #16
+       orr     r5, r5, r12, lsl #16
+       mov     r12, r12, lsr #16
+       orr     r12, r12, lr, lsl #16
+#endif
+       stmia   r0!, {r3-r5, r12}
+       subs    r2, r2, #0x10
+       bge     .Lmemcpy_srcul2loop16
+#if !defined(__minix)
+       ldmia   sp!, {r4, r5}
+#endif
+       adds    r2, r2, #0x0c
+       blt     .Lmemcpy_srcul2l4
+
+.Lmemcpy_srcul2loop4:
+#ifdef __ARMEB__
+       mov     r12, lr, lsl #16
+#else
+       mov     r12, lr, lsr #16
+#endif
+       ldr     lr, [r1], #4
+#ifdef __ARMEB__
+       orr     r12, r12, lr, lsr #16
+#else
+       orr     r12, r12, lr, lsl #16
+#endif
+       str     r12, [r0], #4
+       subs    r2, r2, #4
+       bge     .Lmemcpy_srcul2loop4
+
+.Lmemcpy_srcul2l4:
+       sub     r1, r1, #2
+       b       .Lmemcpy_l4
+
+.Lmemcpy_srcul3:
+       cmp     r2, #0x0c
+       blt     .Lmemcpy_srcul3loop4
+       sub     r2, r2, #0x0c
+#if !defined(__minix)
+       stmdb   sp!, {r4, r5}
+#endif
+
+.Lmemcpy_srcul3loop16:
+#ifdef __ARMEB__
+       mov     r3, lr, lsl #24
+#else
+       mov     r3, lr, lsr #24
+#endif
+       ldmia   r1!, {r4, r5, r12, lr}
+#ifdef __ARMEB__
+       orr     r3, r3, r4, lsr #8
+       mov     r4, r4, lsl #24
+       orr     r4, r4, r5, lsr #8
+       mov     r5, r5, lsl #24
+       orr     r5, r5, r12, lsr #8
+       mov     r12, r12, lsl #24
+       orr     r12, r12, lr, lsr #8
+#else
+       orr     r3, r3, r4, lsl #8
+       mov     r4, r4, lsr #24
+       orr     r4, r4, r5, lsl #8
+       mov     r5, r5, lsr #24
+       orr     r5, r5, r12, lsl #8
+       mov     r12, r12, lsr #24
+       orr     r12, r12, lr, lsl #8
+#endif
+       stmia   r0!, {r3-r5, r12}
+       subs    r2, r2, #0x10
+       bge     .Lmemcpy_srcul3loop16
+#if !defined(__minix)
+       ldmia   sp!, {r4, r5}
+#endif
+       adds    r2, r2, #0x0c
+       blt     .Lmemcpy_srcul3l4
+
+.Lmemcpy_srcul3loop4:
+#ifdef __ARMEB__
+       mov     r12, lr, lsl #24
+#else
+       mov     r12, lr, lsr #24
+#endif
+       ldr     lr, [r1], #4
+#ifdef __ARMEB__
+       orr     r12, r12, lr, lsr #8
+#else
+       orr     r12, r12, lr, lsl #8
+#endif
+       str     r12, [r0], #4
+       subs    r2, r2, #4
+       bge     .Lmemcpy_srcul3loop4
+
+.Lmemcpy_srcul3l4:
+       sub     r1, r1, #1
+       b       .Lmemcpy_l4
+
+#if defined(__minix)
+LABEL(phys_copy_fault)         /* kernel can send us here */
+       ldmia   sp!, {r0, r4, r5}
+       ldmia   sp!, {pc}
+
+LABEL(phys_copy_fault_in_kernel)       /* kernel can send us here */
+       ldmia   sp!, {r0, r4, r5}
+       mrc     p15, 0, r0, c6, c0, 0   /* Read DFAR */
+       ldmia   sp!, {pc}
+#endif
diff --git a/kernel/arch/arm/phys_memset.S b/kernel/arch/arm/phys_memset.S
new file mode 100644 (file)
index 0000000..c320023
--- /dev/null
@@ -0,0 +1,274 @@
+/*     $NetBSD: memset.S,v 1.1 2005/12/20 19:28:49 christos Exp $      */
+
+/*
+ * Copyright 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed for the NetBSD Project by
+ *      Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ *    or promote products derived from this software without specific prior
+ *    written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ *    endorse or promote products derived from this software without specific
+ *    prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/asm.h>
+
+/*
+ * memset: Sets a block of memory to the specified value
+ *
+ * On entry:
+ *   r0 - dest address
+ *   r1 - byte to write
+ *   r2 - number of bytes to write
+ *
+ * On exit:
+ *   r0 - dest address
+ */
+#ifdef _BZERO
+/* LINTSTUB: Func: void bzero(void *, size_t) */
+ENTRY(bzero)
+       mov     r3, #0x00
+#else
+#if defined(__minix)
+/* LINTSTUB: Func: void *phys_memset(void *, int, size_t) */
+ENTRY(phys_memset)
+#else
+/* LINTSTUB: Func: void *memset(void *, int, size_t) */
+ENTRY(memset)
+#endif
+       and     r3, r1, #0xff           /* We deal with bytes */
+       mov     r1, r2
+#endif
+       cmp     r1, #0x04               /* Do we have less than 4 bytes */
+       mov     ip, r0
+       blt     .Lmemset_lessthanfour
+
+       /* Ok first we will word align the address */
+       ands    r2, ip, #0x03           /* Get the bottom two bits */
+       bne     .Lmemset_wordunaligned  /* The address is not word aligned */
+
+       /* We are now word aligned */
+.Lmemset_wordaligned:
+#ifndef _BZERO
+       orr     r3, r3, r3, lsl #8      /* Extend value to 16-bits */
+#endif
+#ifdef __XSCALE__
+       tst     ip, #0x04               /* Quad-align for Xscale */
+#else
+       cmp     r1, #0x10
+#endif
+#ifndef _BZERO
+       orr     r3, r3, r3, lsl #16     /* Extend value to 32-bits */
+#endif
+#ifdef __XSCALE__
+       subne   r1, r1, #0x04           /* Quad-align if necessary */
+       strne   r3, [ip], #0x04
+       cmp     r1, #0x10
+#endif
+       blt     .Lmemset_loop4          /* If less than 16 then use words */
+       mov     r2, r3                  /* Duplicate data */
+       cmp     r1, #0x80               /* If < 128 then skip the big loop */
+       blt     .Lmemset_loop32
+
+       /* Do 128 bytes at a time */
+.Lmemset_loop128:
+       subs    r1, r1, #0x80
+#ifdef __XSCALE__
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+#else
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+#endif
+       bgt     .Lmemset_loop128
+#if defined(__minix)
+       moveq   r0, #0
+#endif
+       RETc(eq)                        /* Zero length so just exit */
+
+       add     r1, r1, #0x80           /* Adjust for extra sub */
+
+       /* Do 32 bytes at a time */
+.Lmemset_loop32:
+       subs    r1, r1, #0x20
+#ifdef __XSCALE__
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+#else
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+#endif
+       bgt     .Lmemset_loop32
+#if defined(__minix)
+       moveq   r0, #0
+#endif
+       RETc(eq)                        /* Zero length so just exit */
+
+       adds    r1, r1, #0x10           /* Partially adjust for extra sub */
+
+       /* Deal with 16 bytes or more */
+#ifdef __XSCALE__
+       strged  r2, [ip], #0x08
+       strged  r2, [ip], #0x08
+#else
+       stmgeia ip!, {r2-r3}
+       stmgeia ip!, {r2-r3}
+#endif
+#if defined(__minix)
+       moveq   r0, #0
+#endif
+       RETc(eq)                        /* Zero length so just exit */
+
+       addlt   r1, r1, #0x10           /* Possibly adjust for extra sub */
+
+       /* We have at least 4 bytes so copy as words */
+.Lmemset_loop4:
+       subs    r1, r1, #0x04
+       strge   r3, [ip], #0x04
+       bgt     .Lmemset_loop4
+#if defined(__minix)
+       moveq   r0, #0
+#endif
+       RETc(eq)                        /* Zero length so just exit */
+
+#ifdef __XSCALE__
+       /* Compensate for 64-bit alignment check */
+       adds    r1, r1, #0x04
+#if defined(__minix)
+       moveq   r0, #0
+#endif
+       RETc(eq)
+       cmp     r1, #2
+#else
+       cmp     r1, #-2
+#endif
+
+       strb    r3, [ip], #0x01         /* Set 1 byte */
+       strgeb  r3, [ip], #0x01         /* Set another byte */
+       strgtb  r3, [ip]                /* and a third */
+#if defined(__minix)
+       mov     r0, #0
+#endif
+       RET                             /* Exit */
+
+.Lmemset_wordunaligned:
+       rsb     r2, r2, #0x004
+       strb    r3, [ip], #0x01         /* Set 1 byte */
+       cmp     r2, #0x02
+       strgeb  r3, [ip], #0x01         /* Set another byte */
+       sub     r1, r1, r2
+       strgtb  r3, [ip], #0x01         /* and a third */
+       cmp     r1, #0x04               /* More than 4 bytes left? */
+       bge     .Lmemset_wordaligned    /* Yup */
+
+.Lmemset_lessthanfour:
+       cmp     r1, #0x00
+#if defined(__minix)
+       moveq   r0, #0
+#endif
+       RETc(eq)                                /* Zero length so exit */
+       strb    r3, [ip], #0x01         /* Set 1 byte */
+       cmp     r1, #0x02
+       strgeb  r3, [ip], #0x01         /* Set another byte */
+       strgtb  r3, [ip]                /* and a third */
+#if defined(__minix)
+       mov     r0, #0
+#endif
+       RET                             /* Exit */
+
+#if defined(__minix)
+LABEL(memset_fault)            /* kernel can send us here */
+       mov     r0, #0
+       RET
+
+LABEL(memset_fault_in_kernel)  /* kernel can send us here */
+       mrc     p15, 0, r0, c6, c0, 0   /* Read DFAR */
+       RET
+#endif
diff --git a/kernel/arch/arm/pre_init.c b/kernel/arch/arm/pre_init.c
new file mode 100644 (file)
index 0000000..970e44d
--- /dev/null
@@ -0,0 +1,240 @@
+
+#define UNPAGED 1      /* for proper kmain() prototype */
+
+#include "kernel.h"
+#include <assert.h>
+#include <stdlib.h>
+#include <minix/minlib.h>
+#include <minix/const.h>
+#include <minix/types.h>
+#include <minix/type.h>
+#include <minix/com.h>
+#include <sys/param.h>
+#include <sys/reboot.h>
+#include "string.h"
+#include "arch_proto.h"
+#include "libexec.h"
+#include "direct_utils.h"
+#include "serial.h"
+#include "glo.h"
+#include <machine/multiboot.h>
+
+#if USE_SYSDEBUG
+#define MULTIBOOT_VERBOSE 1
+#endif
+
+/* to-be-built kinfo struct, diagnostics buffer */
+kinfo_t kinfo;
+struct kmessages kmessages;
+
+/* pg_utils.c uses this; in this phase, there is a 1:1 mapping. */
+phys_bytes vir2phys(void *addr) { return (phys_bytes) addr; } 
+
+/* String length used for mb_itoa */
+#define ITOA_BUFFER_SIZE 20
+
+/* Kernel may use memory */
+int kernel_may_alloc = 1;
+
+static int mb_set_param(char *bigbuf, char *name, char *value, kinfo_t *cbi)
+{
+       char *p = bigbuf;
+       char *bufend = bigbuf + MULTIBOOT_PARAM_BUF_SIZE;
+       char *q;
+       int namelen = strlen(name);
+       int valuelen = strlen(value);
+
+       /* Some variables we recognize */
+       if(!strcmp(name, SERVARNAME)) { cbi->do_serial_debug = 1; }
+       if(!strcmp(name, SERBAUDVARNAME)) { cbi->serial_debug_baud = atoi(value); }
+       
+       /* Delete the item if already exists */
+       while (*p) {
+               if (strncmp(p, name, namelen) == 0 && p[namelen] == '=') {
+                       q = p;
+                       while (*q) q++;
+                       for (q++; q < bufend; q++, p++)
+                               *p = *q;
+                       break;
+               }
+               while (*p++)
+                       ;
+               p++;
+       }
+       
+       for (p = bigbuf; p < bufend && (*p || *(p + 1)); p++)
+               ;
+       if (p > bigbuf) p++;
+       
+       /* Make sure there's enough space for the new parameter */
+       if (p + namelen + valuelen + 3 > bufend)
+               return -1;
+       
+       strcpy(p, name);
+       p[namelen] = '=';
+       strcpy(p + namelen + 1, value);
+       p[namelen + valuelen + 1] = 0;
+       p[namelen + valuelen + 2] = 0;
+       return 0;
+}
+
+int overlaps(multiboot_module_t *mod, int n, int cmp_mod)
+{
+       multiboot_module_t *cmp = &mod[cmp_mod];
+       int m;
+
+#define INRANGE(mod, v) ((v) >= mod->mod_start && (v) <= thismod->mod_end)
+#define OVERLAP(mod1, mod2) (INRANGE(mod1, mod2->mod_start) || \
+                       INRANGE(mod1, mod2->mod_end))
+       for(m = 0; m < n; m++) {
+               multiboot_module_t *thismod = &mod[m];
+               if(m == cmp_mod) continue;
+               if(OVERLAP(thismod, cmp))
+                       return 1;
+       }
+       return 0;
+}
+
+void get_parameters(u32_t ebx, kinfo_t *cbi) 
+{
+       multiboot_memory_map_t *mmap;
+       multiboot_info_t *mbi = &cbi->mbi;
+       int var_i,value_i, m, k;
+       char *p;
+       extern char _kern_phys_base, _kern_vir_base, _kern_size,
+               _kern_unpaged_start, _kern_unpaged_end;
+       phys_bytes kernbase = (phys_bytes) &_kern_phys_base,
+               kernsize = (phys_bytes) &_kern_size;
+#define BUF 1024
+       static char cmdline[BUF];
+
+       /* get our own copy of the multiboot info struct and module list */
+       memcpy((void *) mbi, (void *) ebx, sizeof(*mbi));
+
+       /* Set various bits of info for the higher-level kernel. */
+       cbi->mem_high_phys = 0;
+       cbi->user_sp = (vir_bytes) &_kern_vir_base;
+       cbi->vir_kern_start = (vir_bytes) &_kern_vir_base;
+       cbi->bootstrap_start = (vir_bytes) &_kern_unpaged_start;
+       cbi->bootstrap_len = (vir_bytes) &_kern_unpaged_end -
+               cbi->bootstrap_start;
+       cbi->kmess = &kmess;
+
+       /* set some configurable defaults */
+       cbi->do_serial_debug = 1;
+       cbi->serial_debug_baud = 115200;
+
+       /* parse boot command line */
+       if (mbi->flags&MULTIBOOT_INFO_CMDLINE) {
+               static char var[BUF];
+               static char value[BUF];
+
+               /* Override values with cmdline argument */
+               memcpy(cmdline, (void *) mbi->cmdline, BUF);
+               p = cmdline;
+               while (*p) {
+                       var_i = 0;
+                       value_i = 0;
+                       while (*p == ' ') p++;
+                       if (!*p) break;
+                       while (*p && *p != '=' && *p != ' ' && var_i < BUF - 1) 
+                               var[var_i++] = *p++ ;
+                       var[var_i] = 0;
+                       if (*p++ != '=') continue; /* skip if not name=value */
+                       while (*p && *p != ' ' && value_i < BUF - 1) 
+                               value[value_i++] = *p++ ;
+                       value[value_i] = 0;
+                       
+                       mb_set_param(cbi->param_buf, var, value, cbi);
+               }
+       }
+
+       /* round user stack down to leave a gap to catch kernel
+        * stack overflow; and to distinguish kernel and user addresses
+        * at a glance (0xf.. vs 0xe..) 
+        */
+       cbi->user_sp &= 0xF0000000;
+       cbi->user_end = cbi->user_sp;
+
+       /* kernel bytes without bootstrap code/data that is currently
+        * still needed but will be freed after bootstrapping.
+        */
+       kinfo.kernel_allocated_bytes = (phys_bytes) &_kern_size;
+
+       assert(!(cbi->bootstrap_start % ARM_PAGE_SIZE));
+       cbi->bootstrap_len = rounddown(cbi->bootstrap_len, ARM_PAGE_SIZE);
+       assert(mbi->flags & MULTIBOOT_INFO_MODS);
+       assert(mbi->mods_count < MULTIBOOT_MAX_MODS);
+       assert(mbi->mods_count > 0);
+       memcpy(&cbi->module_list, (void *) mbi->mods_addr,
+               mbi->mods_count * sizeof(multiboot_module_t));
+       
+       memset(cbi->memmap, 0, sizeof(cbi->memmap));
+       /* mem_map has a variable layout */
+       if(mbi->flags & MULTIBOOT_INFO_MEM_MAP) {
+               cbi->mmap_size = 0;
+               for (mmap = (multiboot_memory_map_t *) mbi->mmap_addr;
+                    (unsigned long) mmap < mbi->mmap_addr + mbi->mmap_length;
+                      mmap = (multiboot_memory_map_t *) 
+                       ((unsigned long) mmap + mmap->size + sizeof(mmap->size))) {
+                       if(mmap->type != MULTIBOOT_MEMORY_AVAILABLE) continue;
+                       add_memmap(cbi, mmap->addr, mmap->len);
+               }
+       } else {
+               assert(mbi->flags & MULTIBOOT_INFO_MEMORY);
+               add_memmap(cbi, 0, mbi->mem_lower_unused*1024);
+               add_memmap(cbi, 0x100000, mbi->mem_upper_unused*1024);
+       }
+
+       /* Sanity check: the kernel nor any of the modules may overlap
+        * with each other. Pretend the kernel is an extra module for a
+        * second.
+        */
+       k = mbi->mods_count;
+       assert(k < MULTIBOOT_MAX_MODS);
+       cbi->module_list[k].mod_start = kernbase;
+       cbi->module_list[k].mod_end = kernbase + kernsize;
+       cbi->mods_with_kernel = mbi->mods_count+1;
+       cbi->kern_mod = k;
+
+       for(m = 0; m < cbi->mods_with_kernel; m++) {
+#if 0
+               printf("checking overlap of module %08lx-%08lx\n",
+                 cbi->module_list[m].mod_start, cbi->module_list[m].mod_end);
+#endif
+               if(overlaps(cbi->module_list, cbi->mods_with_kernel, m))
+                       panic("overlapping boot modules/kernel");
+               /* We cut out the bits of memory that we know are
+                * occupied by the kernel and boot modules.
+                */
+               cut_memmap(cbi,
+                       cbi->module_list[m].mod_start, 
+                       cbi->module_list[m].mod_end);
+       }
+}
+
+kinfo_t *pre_init(u32_t magic, u32_t ebx)
+{
+       /* Get our own copy boot params pointed to by ebx.
+        * Here we find out whether we should do serial output.
+        */
+       get_parameters(ebx, &kinfo);
+
+       /* Make and load a pagetable that will map the kernel
+        * to where it should be; but first a 1:1 mapping so
+        * this code stays where it should be.
+        */
+       pg_clear();
+       pg_identity(&kinfo);
+       kinfo.freepde_start = pg_mapkernel();
+       pg_load();
+       vm_enable_paging();
+
+       /* Done, return boot info so it can be passed to kmain(). */
+       return &kinfo;
+}
+
+int send_sig(endpoint_t proc_nr, int sig_nr) { return 0; }
+void minix_shutdown(timer_t *t) { arch_shutdown(RBT_PANIC); }
+void busy_delay_ms(int x) { }
+
diff --git a/kernel/arch/arm/procoffsets.cf b/kernel/arch/arm/procoffsets.cf
new file mode 100644 (file)
index 0000000..3f59253
--- /dev/null
@@ -0,0 +1,24 @@
+
+include "kernel.h"
+include "proc.h"
+
+struct proc
+member REG0 p_reg.retreg
+member REG1 p_reg.r1
+member REG2 p_reg.r2
+member REG3 p_reg.r3
+member REG4 p_reg.r4
+member REG5 p_reg.r5
+member REG6 p_reg.r6
+member REG7 p_reg.r7
+member REG8 p_reg.r8
+member REG9 p_reg.r9
+member REG10 p_reg.r10
+member FPREG p_reg.fp
+member REG12 p_reg.r12
+member SPREG p_reg.sp
+member LRREG p_reg.lr
+member PCREG p_reg.pc
+member PSREG p_reg.psr
+member P_TTBR p_seg.p_ttbr
+
diff --git a/kernel/arch/arm/protect.c b/kernel/arch/arm/protect.c
new file mode 100644 (file)
index 0000000..c2afc44
--- /dev/null
@@ -0,0 +1,159 @@
+/* This file contains code for initialization of protected mode, to initialize
+ * code and data segment descriptors, and to initialize global descriptors
+ * for local descriptors in the process table.
+ */
+
+#include <string.h>
+#include <assert.h>
+#include <machine/multiboot.h>
+
+#include "kernel/kernel.h"
+#include "kernel/proc.h"
+#include "archconst.h"
+
+#include "arch_proto.h"
+
+#include <libexec.h>
+
+struct tss_s tss[CONFIG_MAX_CPUS];
+extern int exc_vector_table;
+
+int prot_init_done = 0;
+
+phys_bytes vir2phys(void *vir)
+{
+       extern char _kern_vir_base, _kern_phys_base;    /* in kernel.lds */
+       u32_t offset = (vir_bytes) &_kern_vir_base -
+               (vir_bytes) &_kern_phys_base;
+       return (phys_bytes)vir - offset;
+}
+
+int tss_init(unsigned cpu, void * kernel_stack)
+{
+
+       struct tss_s * t = &tss[cpu];
+
+       /*
+        * make space for process pointer and cpu id and point to the first
+        * usable word
+        */
+       t->sp0 = ((unsigned) kernel_stack) - ARM_STACK_TOP_RESERVED;
+       /*
+        * set the cpu id at the top of the stack so we know on which cpu is
+        * this stak in use when we trap to kernel
+        */
+       *((reg_t *)(t->sp0 + 1 * sizeof(reg_t))) = cpu;
+
+       return 0;
+}
+
+multiboot_module_t *bootmod(int pnr)
+{
+       int i;
+
+       assert(pnr >= 0);
+
+       /* Search for desired process in boot process
+        * list. The first NR_TASKS ones do not correspond
+        * to a module, however, so we don't search those.
+        */
+       for(i = NR_TASKS; i < NR_BOOT_PROCS; i++) {
+               int p;
+               p = i - NR_TASKS;
+               if(image[i].proc_nr == pnr) {
+                       assert(p < MULTIBOOT_MAX_MODS);
+                       assert(p < kinfo.mbi.mods_count);
+                       return &kinfo.module_list[p];
+               }
+       }
+
+       panic("boot module %d not found", pnr);
+}
+
+int booting_cpu = 0;
+
+void prot_init()
+{
+  write_vbar((reg_t)&exc_vector_table);
+
+  /* Set up a new post-relocate bootstrap pagetable so that
+   * we can map in VM, and we no longer rely on pre-relocated
+   * data.
+   */
+
+  pg_clear();
+  pg_identity(&kinfo); /* Still need 1:1 for device memory . */
+  pg_mapkernel();
+  pg_load();
+
+  prot_init_done = 1;
+}
+
+static int alloc_for_vm = 0;
+
+void arch_post_init(void)
+{
+  /* Let memory mapping code know what's going on at bootstrap time */
+  struct proc *vm;
+  vm = proc_addr(VM_PROC_NR);
+  get_cpulocal_var(ptproc) = vm;
+  pg_info(&vm->p_seg.p_ttbr, &vm->p_seg.p_ttbr_v);
+}
+
+int libexec_pg_alloc(struct exec_info *execi, off_t vaddr, size_t len)
+{
+        pg_map(PG_ALLOCATEME, vaddr, vaddr+len, &kinfo);
+       pg_load();
+        memset((char *) vaddr, 0, len);
+       alloc_for_vm += len;
+        return OK;
+}
+
+void arch_boot_proc(struct boot_image *ip, struct proc *rp)
+{
+       multiboot_module_t *mod;
+
+       if(rp->p_nr < 0) return;
+
+       mod = bootmod(rp->p_nr);
+
+       /* Important special case: we put VM in the bootstrap pagetable
+        * so it can run.
+        */
+
+       if(rp->p_nr == VM_PROC_NR) {
+               struct exec_info execi;
+
+               memset(&execi, 0, sizeof(execi));
+
+               /* exec parameters */
+               execi.stack_high = kinfo.user_sp;
+               execi.stack_size = 32 * 1024;   /* not too crazy as it must be preallocated */
+               execi.proc_e = ip->endpoint;
+               execi.hdr = (char *) mod->mod_start; /* phys mem direct */
+               execi.hdr_len = mod->mod_end - mod->mod_start;
+               strcpy(execi.progname, ip->proc_name);
+               execi.frame_len = 0;
+
+               /* callbacks for use in the kernel */
+               execi.copymem = libexec_copy_memcpy;
+               execi.clearmem = libexec_clear_memset;
+               execi.allocmem_prealloc = libexec_pg_alloc;
+               execi.allocmem_ondemand = libexec_pg_alloc;
+               execi.clearproc = NULL;
+
+               /* parse VM ELF binary and alloc/map it into bootstrap pagetable */
+               libexec_load_elf(&execi);
+
+               /* Initialize the server stack pointer. Take it down three words
+                * to give startup code something to use as "argc", "argv" and "envp".
+                */
+               arch_proc_init(rp, execi.pc, kinfo.user_sp - 3*4, ip->proc_name);
+
+               /* Free VM blob that was just copied into existence. */
+               cut_memmap(&kinfo, mod->mod_start, mod->mod_end);
+
+               /* Remember them */
+               kinfo.vm_allocated_bytes = alloc_for_vm;
+       }
+}
diff --git a/kernel/arch/arm/sconst.h b/kernel/arch/arm/sconst.h
new file mode 100644 (file)
index 0000000..6282717
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef __SCONST_H__
+#define __SCONST_H__
+
+#include "kernel/const.h"
+#include "kernel/procoffsets.h"
+
+#endif /* __SCONST_H__ */
diff --git a/kernel/arch/arm/serial.h b/kernel/arch/arm/serial.h
new file mode 100644 (file)
index 0000000..d403ee4
--- /dev/null
@@ -0,0 +1,7 @@
+
+#ifndef _KERN_SERIAL_H
+#define _KERN_SERIAL_H
+
+#include "omap_serial.h"
+
+#endif
diff --git a/kernel/arch/arm/timer.h b/kernel/arch/arm/timer.h
new file mode 100644 (file)
index 0000000..df911b5
--- /dev/null
@@ -0,0 +1,7 @@
+
+#ifndef _KERN_TIMER_H
+#define _KERN_TIMER_H
+
+#include "omap_timer.h"
+
+#endif
index b7b7c1ec3b0fe434a3216ad12a2de46288690f1d..62111f360dbe22660960a699f46c8caf22771c32 100644 (file)
@@ -68,6 +68,10 @@ void stop_8253A_timer(void)
        outb(TIMER0, 0);
 }
 
+void arch_timer_int_handler(void)
+{
+}
+
 static int calib_cpu_handler(irq_hook_t * UNUSED(hook))
 {
        u64_t tsc;
index a768c71a39158c7c41b4f5ee58c2c3e6b1f512d0..dfdcbd038a992d3303406d81e1b8040ffe66aee3 100644 (file)
@@ -5,5 +5,6 @@
 
 int init_8253A_timer(unsigned freq);
 void stop_8253A_timer(void);
+void arch_timer_int_handler(void);
 
 #endif /* __CLOCK_X86_H__ */
index 7d478eec7203dc662c48d939ed82bbe3a604e60b..2ea4c17d0b7ca5b3d13427821a78280227b0c17a 100644 (file)
@@ -146,6 +146,8 @@ int timer_int_handler(void)
 
        }
 
+       arch_timer_int_handler();
+
        return(1);                                      /* reenable interrupts */
 }
 
index ad51a69c11fcdb393742f3f624484fa06359d2a3..9f89363288da832a79ae5875b83284f7a59535d9 100644 (file)
@@ -251,12 +251,16 @@ void print_proc(struct proc *pp)
        endpoint_t dep;
 
        printf("%d: %s %d prio %d time %d/%d cycles 0x%x%08x cpu %2d "
-                       "cr3 0x%lx rts %s misc %s sched %s ",
+                       "pdbr 0x%lx rts %s misc %s sched %s ",
                proc_nr(pp), pp->p_name, pp->p_endpoint, 
                pp->p_priority, pp->p_user_time,
                pp->p_sys_time, ex64hi(pp->p_cycles),
                ex64lo(pp->p_cycles), pp->p_cpu,
+#if defined(__i386__)
                pp->p_seg.p_cr3,
+#elif defined(__arm__)
+               pp->p_seg.p_ttbr,
+#endif
                rtsflagstr(pp->p_rts_flags), miscflagstr(pp->p_misc_flags),
                schedulerstr(pp->p_scheduler));
 
index 1fff91c300aff746bc3fbe9fb33a946f63b45a19..0518551e12a6305954d0263c62caf4eb85acfab6 100644 (file)
@@ -380,7 +380,11 @@ check_misc_flags:
         */
        p->p_misc_flags &= ~MF_CONTEXT_SET;
 
+#if defined(__i386__)
        assert(p->p_seg.p_cr3 != 0);
+#elif defined(__arm__)
+       assert(p->p_seg.p_ttbr != 0);
+#endif
 #ifdef CONFIG_SMP
        if (p->p_misc_flags & MF_FLUSH_TLB) {
                if (tlb_must_refresh)
index cb6ef14e8b33ae4e36de5091d286da3ca708c738..8fd7de33a700de597e529cb9a45d5123c58e464d 100644 (file)
@@ -209,8 +209,10 @@ void system_init(void)
 
   /* Device I/O. */
   map(SYS_IRQCTL, do_irqctl);                  /* interrupt control operations */ 
+#if defined(__i386__)
   map(SYS_DEVIO, do_devio);            /* inb, inw, inl, outb, outw, outl */ 
   map(SYS_VDEVIO, do_vdevio);                  /* vector with devio requests */ 
+#endif
 
   /* Memory management. */
   map(SYS_MEMSET, do_memset);          /* write char to memory area */
@@ -255,11 +257,11 @@ void system_init(void)
   map(SYS_READBIOS, do_readbios);      /* read from BIOS locations */
   map(SYS_IOPENABLE, do_iopenable);    /* Enable I/O */
   map(SYS_SDEVIO, do_sdevio);          /* phys_insb, _insw, _outsb, _outsw */
+#endif
 
   /* Machine state switching. */
   map(SYS_SETMCONTEXT, do_setmcontext); /* set machine context */
   map(SYS_GETMCONTEXT, do_getmcontext); /* get machine context */
-#endif
 
   /* Scheduling */
   map(SYS_SCHEDULE, do_schedule);      /* reschedule a process */
index 50f4e41b387ce05d8a09876253bd58bdd1dd307e..dc452d291d349d3c936576b08a6d53fc73e30ce7 100644 (file)
@@ -15,8 +15,6 @@ SRCS+=        \
        do_stime.c \
        do_vtimer.c \
        do_irqctl.c \
-       do_devio.c \
-       do_vdevio.c \
        do_copy.c \
        do_umap.c \
        do_umap_remote.c \
@@ -42,3 +40,9 @@ SRCS+=        \
        do_schedctl.c \
        do_statectl.c
 
+.if ${MACHINE_ARCH} == "i386"
+SRCS+=  \
+       do_devio.c \
+       do_vdevio.c
+.endif
+
index 1ba3369a03594c1e7b7d479258c753db381b9991..dfad1db37a25178dc6acfffbf2a3e48d15e1592f 100644 (file)
@@ -71,7 +71,9 @@ int do_fork(struct proc * caller, message * m_ptr)
   rpc->p_user_time = 0;                /* set all the accounting times to 0 */
   rpc->p_sys_time = 0;
 
+#if defined(__i386__)
   rpc->p_reg.psw &= ~TRACEBIT;         /* clear trace bit */
+#endif
   rpc->p_misc_flags &=
        ~(MF_VIRT_TIMER | MF_PROF_TIMER | MF_SC_TRACE | MF_SPROF_SEEN);
   rpc->p_virt_left = 0;                /* disable, clear the process-virtual timers */
@@ -116,8 +118,13 @@ int do_fork(struct proc * caller, message * m_ptr)
   RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
   (void) sigemptyset(&rpc->p_pending);
 
+#if defined(__i386__)
   rpc->p_seg.p_cr3 = 0;
   rpc->p_seg.p_cr3_v = NULL;
+#elif defined(__arm__)
+  rpc->p_seg.p_ttbr = 0;
+  rpc->p_seg.p_ttbr_v = NULL;
+#endif
 
   return OK;
 }
index f96a727bebd0cebe8f85c5e81d8ee33f1e73b64b..b3603179cbe48956d611595023910a3661c32a51 100644 (file)
@@ -34,9 +34,11 @@ int do_sigreturn(struct proc * caller, message * m_ptr)
        KERNEL, (vir_bytes) &sc, sizeof(struct sigcontext))) != OK)
        return r;
 
+#if defined(__i386__)
   /* Restore user bits of psw from sc, maintain system bits from proc. */
   sc.sc_psw  =  (sc.sc_psw & X86_FLAGS_USER) |
                 (rp->p_reg.psw & ~X86_FLAGS_USER);
+#endif
 
 #if defined(__i386__)
   /* Don't panic kernel if user gave bad selectors. */
index 10dd4bcb92709e5f381bedad1a63917f1a749514..83f30c5b6ff20c8127181cb193da58859e1417d5 100644 (file)
@@ -87,7 +87,9 @@ int do_trace(struct proc * caller, message * m_ptr)
   switch (tr_request) {
   case T_STOP:                 /* stop process */
        RTS_SET(rp, RTS_P_STOP);
+#if defined(__i386__)
        rp->p_reg.psw &= ~TRACEBIT;     /* clear trace bit */
+#endif
        rp->p_misc_flags &= ~MF_SC_TRACE;       /* clear syscall trace flag */
        return(OK);
 
@@ -148,11 +150,13 @@ int do_trace(struct proc * caller, message * m_ptr)
            i == (int) &((struct proc *) 0)->p_reg.ss)
                return(EFAULT);
 #endif
+#if defined(__i386__)
        if (i == (int) &((struct proc *) 0)->p_reg.psw)
                /* only selected bits are changeable */
                SETPSW(rp, tr_data);
        else
                *(reg_t *) ((char *) &rp->p_reg + i) = (reg_t) tr_data;
+#endif
        m_ptr->CTL_DATA = 0;
        break;
 
@@ -166,7 +170,9 @@ int do_trace(struct proc * caller, message * m_ptr)
        break;
 
   case T_STEP:                 /* set trace bit */
+#if defined(__i386__)
        rp->p_reg.psw |= TRACEBIT;
+#endif
        RTS_UNSET(rp, RTS_P_STOP);
        m_ptr->CTL_DATA = 0;
        break;
index e431270cd0e7dcb7f3a6f50ac677ef5ad618910b..352f33ce8099b5f8740e2fde6836c284a7996da4 100644 (file)
@@ -4,7 +4,7 @@
 /**========================================================================* */
 /*                           IPC assembly routines                       * */
 /**========================================================================* */
-ENTRY(_send)
+ENTRY(_send_orig)
        push    {fp}
        mov     fp, sp
        mov     r2, r1       /* r2 = msg ptr */
@@ -15,7 +15,7 @@ ENTRY(_send)
        pop     {fp}
        bx      lr
 
-ENTRY(_receive)
+ENTRY(_receive_orig)
        push    {fp}
        mov     fp, sp
        push    {r2}         /* save status ptr */
@@ -29,7 +29,7 @@ ENTRY(_receive)
        pop     {fp}
        bx      lr
 
-ENTRY(_sendrec)
+ENTRY(_sendrec_orig)
        push    {fp}
        mov     fp, sp
        mov     r2, r1       /* r2 = msg ptr */
@@ -55,7 +55,7 @@ ENTRY(_minix_kernel_info_struct)
        pop     {fp}
        bx      lr
 
-ENTRY(_notify)
+ENTRY(_notify_orig)
        push    {fp}
        mov     fp, sp
        mov     r1, r0       /* r1 = src_dest */
@@ -65,7 +65,7 @@ ENTRY(_notify)
        pop     {fp}
        bx      lr
 
-ENTRY(_sendnb)
+ENTRY(_sendnb_orig)
        push    {fp}
        mov     fp, sp
        mov     r2, r1       /* r2 = msg ptr */
@@ -77,7 +77,7 @@ ENTRY(_sendnb)
        bx      lr
 
 
-ENTRY(_do_kernel_call)
+ENTRY(_do_kernel_call_orig)
        /* r0 already holds msg ptr */
        mov     r3, #KERVEC /* r3 determines the SVC type */
        svc     #0          /* trap to kernel */
index f91eb4fd84a6f4ba06d6e9b721ee6573c48d256c..7301bacb43a262b02fbc58a2c77539f8b5a6f70c 100644 (file)
@@ -1,7 +1,7 @@
 #include <minix/ipcconst.h>
 #include <machine/asm.h>
 
-ENTRY(_senda)
+ENTRY(_senda_orig)
        push    {fp}
        mov     fp, sp
        mov     r2, r0       /* r2 = table */
diff --git a/servers/vm/arch/arm/Makefile.inc b/servers/vm/arch/arm/Makefile.inc
new file mode 100644 (file)
index 0000000..f3d71c8
--- /dev/null
@@ -0,0 +1,5 @@
+.include <bsd.own.mk>
+
+#Arch-specific sources
+.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}
+SRCS+= pagetable.c #util.S
diff --git a/servers/vm/arch/arm/memory.h b/servers/vm/arch/arm/memory.h
new file mode 100644 (file)
index 0000000..4ceb046
--- /dev/null
@@ -0,0 +1,12 @@
+#include <machine/vm.h>
+
+/* And what is the highest addressable piece of memory, when in paged
+ * mode?
+ */
+#define VM_DATATOP     kernel_boot_info.user_end
+#define VM_STACKTOP    kernel_boot_info.user_sp
+
+#define SLAB_PAGESIZE  ARM_PAGE_SIZE
+#define VM_PAGE_SIZE   ARM_PAGE_SIZE
+
+#define CLICKSPERPAGE (ARM_PAGE_SIZE/CLICK_SIZE)
diff --git a/servers/vm/arch/arm/pagefaults.h b/servers/vm/arch/arm/pagefaults.h
new file mode 100644 (file)
index 0000000..757cb7f
--- /dev/null
@@ -0,0 +1,14 @@
+
+#ifndef _PAGEFAULTS_H
+#define _PAGEFAULTS_H 1
+
+#include <machine/vm.h>
+
+#define PFERR_PROT(e)  ((ARM_VM_PFE_FS(e) == ARM_VM_PFE_L1PERM) \
+                        || (ARM_VM_PFE_FS(e) == ARM_VM_PFE_L2PERM))
+#define PFERR_NOPAGE(e) (!PFERR_PROT(e))
+#define PFERR_WRITE(e) ((e) & ARM_VM_PFE_W)
+#define PFERR_READ(e)  (!((e) & ARM_VM_PFE_W))
+
+#endif
+
diff --git a/servers/vm/arch/arm/pagetable.c b/servers/vm/arch/arm/pagetable.c
new file mode 100644 (file)
index 0000000..841fd41
--- /dev/null
@@ -0,0 +1,1261 @@
+
+#define _SYSTEM 1
+#define _POSIX_SOURCE 1
+
+#include <minix/callnr.h>
+#include <minix/com.h>
+#include <minix/config.h>
+#include <minix/const.h>
+#include <minix/ds.h>
+#include <minix/endpoint.h>
+#include <minix/keymap.h>
+#include <minix/minlib.h>
+#include <minix/type.h>
+#include <minix/ipc.h>
+#include <minix/sysutil.h>
+#include <minix/syslib.h>
+#include <minix/safecopies.h>
+#include <minix/cpufeature.h>
+#include <minix/bitmap.h>
+#include <minix/debug.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <env.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <stdlib.h>
+
+#include "proto.h"
+#include "glo.h"
+#include "util.h"
+#include "vm.h"
+#include "sanitycheck.h"
+
+#include "memory.h"
+
+static int vm_self_pages;
+
+/* PDE used to map in kernel, kernel physical address. */
+static int pagedir_pde = -1;
+static u32_t pagedir_pde_val;
+
+static multiboot_module_t *kern_mb_mod = NULL;
+static size_t kern_size = 0;
+static int kern_start_pde = -1;
+
+/* 1MB page size available in hardware? */
+static int bigpage_ok = 1;
+
+/* Our process table entry. */
+struct vmproc *vmprocess = &vmproc[VM_PROC_NR];
+
+/* Spare memory, ready to go after initialization, to avoid a
+ * circular dependency on allocating memory and writing it into VM's
+ * page table.
+ */
+#define SPAREPAGEDIRS 11
+#define STATIC_SPAREPAGEDIRS 10
+#define SPAREPAGES 250
+#define STATIC_SPAREPAGES 100
+int missing_sparedirs = SPAREPAGEDIRS;
+static struct {
+       void *pagedir;
+       phys_bytes phys;
+} sparepagedirs[SPAREPAGEDIRS];
+
+int missing_spares = SPAREPAGES;
+static struct {
+       void *page;
+       phys_bytes phys;
+} sparepages[SPAREPAGES];
+
+extern char _end;      
+#define is_staticaddr(v) ((vir_bytes) (v) < (vir_bytes) &_end)
+
+#define MAX_KERNMAPPINGS 10
+static struct {
+       phys_bytes      phys_addr;      /* Physical addr. */
+       phys_bytes      len;            /* Length in bytes. */
+       vir_bytes       vir_addr;       /* Offset in page table. */
+       int             flags;
+} kern_mappings[MAX_KERNMAPPINGS];
+int kernmappings = 0;
+
+/* Clicks must be pages, as
+ *  - they must be page aligned to map them
+ *  - they must be a multiple of the page size
+ *  - it's inconvenient to have them bigger than pages, because we often want
+ *    just one page
+ * May as well require them to be equal then.
+ */
+#if CLICK_SIZE != ARM_PAGE_SIZE
+#error CLICK_SIZE must be page size.
+#endif
+
+/* Page table that contains pointers to all page directories. */
+phys_bytes page_directories_phys;
+u32_t *page_directories = NULL;
+
+static char static_sparepagedirs[ARM_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARM_PAGEDIR_SIZE] __aligned(ARM_PAGEDIR_SIZE);
+
+static char static_sparepages[ARM_PAGE_SIZE*STATIC_SPAREPAGES] __aligned(ARM_PAGE_SIZE);
+
+#if SANITYCHECKS
+/*===========================================================================*
+ *                             pt_sanitycheck                               *
+ *===========================================================================*/
+void pt_sanitycheck(pt_t *pt, char *file, int line)
+{
+/* Basic pt sanity check. */
+       int slot;
+
+       MYASSERT(pt);
+       MYASSERT(pt->pt_dir);
+       MYASSERT(pt->pt_dir_phys);
+
+       for(slot = 0; slot < ELEMENTS(vmproc); slot++) {
+               if(pt == &vmproc[slot].vm_pt)
+                       break;
+       }
+
+       if(slot >= ELEMENTS(vmproc)) {
+               panic("pt_sanitycheck: passed pt not in any proc");
+       }
+
+       MYASSERT(usedpages_add(pt->pt_dir_phys, ARM_PAGE_SIZE) == OK);
+}
+#endif
+
+/*===========================================================================*
+ *                             findhole                                     *
+ *===========================================================================*/
+static u32_t findhole(int pages)
+{
+/* Find a space in the virtual address space of VM. */
+       u32_t curv;
+       int pde = 0, try_restart;
+       static u32_t lastv = 0;
+       pt_t *pt = &vmprocess->vm_pt;
+       vir_bytes vmin, vmax;
+       u32_t holev;
+
+       vmin = (vir_bytes) (&_end) & ARM_VM_ADDR_MASK; /* marks end of VM BSS */
+       vmax = VM_STACKTOP;
+
+       /* Input sanity check. */
+       assert(vmin + ARM_PAGE_SIZE >= vmin);
+       assert(vmax >= vmin + ARM_PAGE_SIZE);
+       assert((vmin % ARM_PAGE_SIZE) == 0);
+       assert((vmax % ARM_PAGE_SIZE) == 0);
+       assert(pages > 0);
+
+#if SANITYCHECKS
+       curv = ((u32_t) random()) % ((vmax - vmin)/ARM_PAGE_SIZE);
+       curv *= ARM_PAGE_SIZE;
+       curv += vmin;
+#else
+       curv = lastv;
+       if(curv < vmin || curv >= vmax)
+               curv = vmin;
+#endif
+       try_restart = 1;
+
+       /* Start looking for a free page starting at vmin. */
+       while(curv < vmax) {
+               int pte;
+               int i, nohole;
+
+               assert(curv >= vmin);
+               assert(curv < vmax);
+
+               holev = curv; /* the candidate hole */
+               nohole = 0;
+               for (i = 0; i < pages && !nohole; ++i) {
+                   if(curv >= vmax) {
+                       break;
+                   }
+
+                   pde = ARM_VM_PDE(curv);
+                   pte = ARM_VM_PTE(curv);
+
+                   /* if page present, no hole */
+                   if((pt->pt_dir[pde] & ARM_VM_PDE_PRESENT) &&
+                      (pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT))
+                       nohole = 1;
+
+                   /* if not contiguous, no hole */
+                   if (curv != holev + i * ARM_PAGE_SIZE)
+                       nohole = 1;
+
+                   curv+=ARM_PAGE_SIZE;
+               }
+
+               /* there's a large enough hole */
+               if (!nohole && i == pages) {
+                       lastv = curv;
+                       return holev;
+               }
+
+               /* Reset curv */
+               if(curv >= vmax && try_restart) {
+                       curv = vmin;
+                       try_restart = 0;
+               }
+       }
+
+       printf("VM: out of virtual address space in vm\n");
+
+       return NO_MEM;
+}
+
+/*===========================================================================*
+ *                             vm_freepages                                 *
+ *===========================================================================*/
+void vm_freepages(vir_bytes vir, int pages)
+{
+       assert(!(vir % ARM_PAGE_SIZE)); 
+
+       if(is_staticaddr(vir)) {
+               printf("VM: not freeing static page\n");
+               return;
+       }
+
+       if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir,
+               MAP_NONE, pages*ARM_PAGE_SIZE, 0,
+               WMF_OVERWRITE | WMF_FREE) != OK)
+               panic("vm_freepages: pt_writemap failed");
+
+       vm_self_pages--;
+
+#if SANITYCHECKS
+       /* If SANITYCHECKS are on, flush tlb so accessing freed pages is
+        * always trapped, also if not in tlb.
+        */
+       if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+               panic("VMCTL_FLUSHTLB failed");
+       }
+#endif
+}
+
+/*===========================================================================*
+ *                             vm_getsparepage                              *
+ *===========================================================================*/
+static void *vm_getsparepage(phys_bytes *phys)
+{
+       int s;
+       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+       for(s = 0; s < SPAREPAGES; s++) {
+               if(sparepages[s].page) {
+                       void *sp;
+                       sp = sparepages[s].page;
+                       *phys = sparepages[s].phys;
+                       sparepages[s].page = NULL;
+                       missing_spares++;
+                       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+                       return sp;
+               }
+       }
+       return NULL;
+}
+
+/*===========================================================================*
+ *                             vm_getsparepagedir                           *
+ *===========================================================================*/
+static void *vm_getsparepagedir(phys_bytes *phys)
+{
+       int s;
+       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
+       for(s = 0; s < SPAREPAGEDIRS; s++) {
+               if(sparepagedirs[s].pagedir) {
+                       void *sp;
+                       sp = sparepagedirs[s].pagedir;
+                       *phys = sparepagedirs[s].phys;
+                       sparepagedirs[s].pagedir = NULL;
+                       missing_sparedirs++;
+                       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
+                       return sp;
+               }
+       }
+       return NULL;
+}
+
+/*===========================================================================*
+ *                             vm_checkspares                               *
+ *===========================================================================*/
+static void *vm_checkspares(void)
+{
+       int s, n = 0;
+       static int total = 0, worst = 0;
+       assert(missing_spares >= 0 && missing_spares <= SPAREPAGES);
+       for(s = 0; s < SPAREPAGES && missing_spares > 0; s++)
+           if(!sparepages[s].page) {
+               n++;
+               if((sparepages[s].page = vm_allocpage(&sparepages[s].phys, 
+                       VMP_SPARE))) {
+                       missing_spares--;
+                       assert(missing_spares >= 0);
+                       assert(missing_spares <= SPAREPAGES);
+               } else {
+                       printf("VM: warning: couldn't get new spare page\n");
+               }
+       }
+       if(worst < n) worst = n;
+       total += n;
+
+       return NULL;
+}
+
+/*===========================================================================*
+ *                             vm_checksparedirs                            *
+ *===========================================================================*/
+static void *vm_checksparedirs(void)
+{
+       int s, n = 0;
+       static int total = 0, worst = 0;
+       assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS);
+       for(s = 0; s < SPAREPAGEDIRS && missing_sparedirs > 0; s++)
+           if(!sparepagedirs[s].pagedir) {
+               n++;
+               if((sparepagedirs[s].pagedir = vm_allocpage(&sparepagedirs[s].phys,
+                       VMP_SPARE))) {
+                       missing_sparedirs--;
+                       assert(missing_sparedirs >= 0);
+                       assert(missing_sparedirs <= SPAREPAGEDIRS);
+               } else {
+                       printf("VM: warning: couldn't get new spare pagedir\n");
+               }
+       }
+       if(worst < n) worst = n;
+       total += n;
+
+       return NULL;
+}
+
+static int pt_init_done;
+
+/*===========================================================================*
+ *                             vm_allocpage                                 *
+ *===========================================================================*/
+void *vm_allocpage(phys_bytes *phys, int reason)
+{
+/* Allocate a page for use by VM itself. */
+       phys_bytes newpage;
+       vir_bytes loc;
+       pt_t *pt;
+       int r;
+       static int level = 0;
+       void *ret;
+       u32_t mem_bytes, mem_clicks, mem_flags;
+
+       pt = &vmprocess->vm_pt;
+       assert(reason >= 0 && reason < VMP_CATEGORIES);
+
+       level++;
+
+       assert(level >= 1);
+       assert(level <= 2);
+
+       if(level > 1 || !pt_init_done) {
+               void *s;
+
+               if (reason == VMP_PAGEDIR)
+                       s=vm_getsparepagedir(phys);
+               else
+                       s=vm_getsparepage(phys);
+
+               level--;
+               if(!s) {
+                       util_stacktrace();
+                       printf("VM: warning: out of spare pages\n");
+               }
+               if(!is_staticaddr(s)) vm_self_pages++;
+               return s;
+       }
+
+       if (reason == VMP_PAGEDIR) {
+               mem_bytes = ARM_PAGEDIR_SIZE;
+               mem_flags = PAF_ALIGN16K;
+       } else {
+               mem_bytes = ARM_PAGE_SIZE;
+               mem_flags = 0;
+       }
+       mem_clicks = mem_bytes / ARM_PAGE_SIZE * CLICKSPERPAGE;
+
+       /* VM does have a pagetable, so get a page and map it in there.
+        * Where in our virtual address space can we put it?
+        */
+       loc = findhole(mem_bytes / ARM_PAGE_SIZE);
+       if(loc == NO_MEM) {
+               level--;
+               printf("VM: vm_allocpage: findhole failed\n");
+               return NULL;
+       }
+
+       /* Allocate page of memory for use by VM. As VM
+        * is trusted, we don't have to pre-clear it.
+        */
+       if((newpage = alloc_mem(mem_clicks, mem_flags)) == NO_MEM) {
+               level--;
+               printf("VM: vm_allocpage: alloc_mem failed\n");
+               return NULL;
+       }
+
+       *phys = CLICK2ABS(newpage);
+
+       /* Map this page into our address space. */
+       if((r=pt_writemap(vmprocess, pt, loc, *phys, mem_bytes,
+               ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER | ARM_VM_PTE_RW |
+               ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, 0)) != OK) {
+               free_mem(newpage, mem_clicks);
+               printf("vm_allocpage writemap failed\n");
+               level--;
+               return NULL;
+       }
+
+       if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+               panic("VMCTL_FLUSHTLB failed: %d", r);
+       }
+
+       level--;
+
+       /* Return user-space-ready pointer to it. */
+       ret = (void *) loc;
+
+       vm_self_pages++;
+       return ret;
+}
+
+/*===========================================================================*
+ *                             vm_pagelock                                  *
+ *===========================================================================*/
+void vm_pagelock(void *vir, int lockflag)
+{
+/* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */
+       vir_bytes m = (vir_bytes) vir;
+       int r;
+       u32_t flags = ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER;
+       pt_t *pt;
+
+       pt = &vmprocess->vm_pt;
+
+       assert(!(m % ARM_PAGE_SIZE));
+
+       if(!lockflag)
+               flags |= ARM_VM_PTE_RW;
+       else
+               flags |= ARM_VM_PTE_RO;
+       flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
+
+       /* Update flags. */
+       if((r=pt_writemap(vmprocess, pt, m, 0, ARM_PAGE_SIZE,
+               flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) {
+               panic("vm_lockpage: pt_writemap failed");
+       }
+
+       if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) {
+               panic("VMCTL_FLUSHTLB failed: %d", r);
+       }
+
+       return;
+}
+
+/*===========================================================================*
+ *                             vm_addrok                                    *
+ *===========================================================================*/
+int vm_addrok(void *vir, int writeflag)
+{
+       pt_t *pt = &vmprocess->vm_pt;
+       int pde, pte;
+       vir_bytes v = (vir_bytes) vir;
+
+       pde = ARM_VM_PDE(v);
+       pte = ARM_VM_PTE(v);
+
+       if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
+               printf("addr not ok: missing pde %d\n", pde);
+               return 0;
+       }
+
+       if(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT)) {
+               printf("addr not ok: missing pde %d / pte %d\n",
+                       pde, pte);
+               return 0;
+       }
+
+       if(!writeflag &&
+               !(pt->pt_pt[pde][pte] & ARM_VM_PTE_RO)) {
+               printf("addr not ok: pde %d / pte %d present but writable\n",
+                       pde, pte);
+               return 0;
+       }
+
+       return 1;
+}
+
+/*===========================================================================*
+ *                             pt_ptalloc                                   *
+ *===========================================================================*/
+static int pt_ptalloc(pt_t *pt, int pde, u32_t flags)
+{
+/* Allocate a page table and write its address into the page directory. */
+       int i;
+       phys_bytes pt_phys;
+
+       /* Argument must make sense. */
+       assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
+       assert(!(flags & ~(PTF_ALLFLAGS)));
+
+       /* We don't expect to overwrite page directory entry, nor
+        * storage for the page table.
+        */
+       assert(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT));
+       assert(!pt->pt_pt[pde]);
+
+       /* Get storage for the page table. */
+        if(!(pt->pt_pt[pde] = vm_allocpage(&pt_phys, VMP_PAGETABLE)))
+               return ENOMEM;
+
+       for(i = 0; i < ARM_VM_PT_ENTRIES; i++)
+               pt->pt_pt[pde][i] = 0;  /* Empty entry. */
+
+       /* Make page directory entry.
+        * The PDE is always 'present,' 'writable,' and 'user accessible,'
+        * relying on the PTE for protection.
+        */
+       pt->pt_dir[pde] = (pt_phys & ARM_VM_PDE_MASK)
+               | ARM_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
+
+       return OK;
+}
+
+/*===========================================================================*
+ *                         pt_ptalloc_in_range                              *
+ *===========================================================================*/
+int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end,
+       u32_t flags, int verify)
+{
+/* Allocate all the page tables in the range specified. */
+       int pde, first_pde, last_pde;
+
+       first_pde = ARM_VM_PDE(start);
+       last_pde = ARM_VM_PDE(end-1);
+       assert(first_pde >= 0);
+       assert(last_pde < ARM_VM_DIR_ENTRIES);
+
+       /* Scan all page-directory entries in the range. */
+       for(pde = first_pde; pde <= last_pde; pde++) {
+               assert(!(pt->pt_dir[pde] & ARM_VM_BIGPAGE));
+               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
+                       int r;
+                       if(verify) {
+                               printf("pt_ptalloc_in_range: no pde %d\n", pde);
+                               return EFAULT;
+                       }
+                       assert(!pt->pt_dir[pde]);
+                       if((r=pt_ptalloc(pt, pde, flags)) != OK) {
+                               /* Couldn't do (complete) mapping.
+                                * Don't bother freeing any previously
+                                * allocated page tables, they're
+                                * still writable, don't point to nonsense,
+                                * and pt_ptalloc leaves the directory
+                                * and other data in a consistent state.
+                                */
+                               printf("pt_ptalloc_in_range: pt_ptalloc failed\n");
+                               return r;
+                       }
+               }
+               assert(pt->pt_dir[pde]);
+               assert(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT);
+       }
+
+       return OK;
+}
+
+static char *ptestr(u32_t pte)
+{
+#define FLAG(constant, name) {                                         \
+       if(pte & (constant)) { strcat(str, name); strcat(str, " "); }   \
+}
+
+       static char str[30];
+       if(!(pte & ARM_VM_PTE_PRESENT)) {
+               return "not present";
+       }
+       str[0] = '\0';
+       if(pte & ARM_VM_PTE_RO) {
+           strcat(str, "R ");
+       } else {
+           strcat(str, "W ");
+       }
+       FLAG(ARM_VM_PTE_USER, "U");
+       FLAG(ARM_VM_PTE_SUPER, "S");
+       FLAG(ARM_VM_PTE_SHAREABLE, "SH");
+       FLAG(ARM_VM_PTE_WB, "WB");
+       FLAG(ARM_VM_PTE_WT, "WT");
+
+       return str;
+}
+
+/*===========================================================================*
+ *                          pt_map_in_range                                 *
+ *===========================================================================*/
+int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp,
+       vir_bytes start, vir_bytes end)
+{
+/* Transfer all the mappings from the pt of the source process to the pt of
+ * the destination process in the range specified.
+ */
+       int pde, pte;
+       vir_bytes viraddr;
+       pt_t *pt, *dst_pt;
+
+       pt = &src_vmp->vm_pt;
+       dst_pt = &dst_vmp->vm_pt;
+
+       end = end ? end : VM_DATATOP;
+       assert(start % ARM_PAGE_SIZE == 0);
+       assert(end % ARM_PAGE_SIZE == 0);
+       assert(ARM_VM_PDE(start) >= 0 && start <= end);
+       assert(ARM_VM_PDE(end) < ARM_VM_DIR_ENTRIES);
+
+#if LU_DEBUG
+       printf("VM: pt_map_in_range: src = %d, dst = %d\n",
+               src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
+       printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n",
+               start, ARM_VM_PDE(start), ARM_VM_PTE(start),
+               end, ARM_VM_PDE(end), ARM_VM_PTE(end));
+#endif
+
+       /* Scan all page-table entries in the range. */
+       for(viraddr = start; viraddr <= end; viraddr += ARM_PAGE_SIZE) {
+               pde = ARM_VM_PDE(viraddr);
+               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
+                       if(viraddr == VM_DATATOP) break;
+                       continue;
+               }
+               pte = ARM_VM_PTE(viraddr);
+               if(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT)) {
+                       if(viraddr == VM_DATATOP) break;
+                       continue;
+               }
+
+               /* Transfer the mapping. */
+               dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte];
+
+                if(viraddr == VM_DATATOP) break;
+       }
+
+       return OK;
+}
+
+/*===========================================================================*
+ *                             pt_ptmap                                     *
+ *===========================================================================*/
+int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp)
+{
+/* Transfer mappings to page dir and page tables from source process and
+ * destination process. Make sure all the mappings are above the stack, not
+ * to corrupt valid mappings in the data segment of the destination process.
+ */
+       int pde, r;
+       phys_bytes physaddr;
+       vir_bytes viraddr;
+       pt_t *pt;
+
+       pt = &src_vmp->vm_pt;
+
+#if LU_DEBUG
+       printf("VM: pt_ptmap: src = %d, dst = %d\n",
+               src_vmp->vm_endpoint, dst_vmp->vm_endpoint);
+#endif
+
+       /* Transfer mapping to the page directory. */
+       viraddr = (vir_bytes) pt->pt_dir;
+       physaddr = pt->pt_dir_phys & ARM_VM_ADDR_MASK;
+       if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARM_PAGEDIR_SIZE,
+               ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER | ARM_VM_PTE_RW |
+               ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
+               WMF_OVERWRITE)) != OK) {
+               return r;
+       }
+#if LU_DEBUG
+       printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n",
+               viraddr, physaddr);
+#endif
+
+       /* Scan all non-reserved page-directory entries. */
+       for(pde=0; pde < ARM_VM_DIR_ENTRIES; pde++) {
+               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT)) {
+                       continue;
+               }
+
+               /* Transfer mapping to the page table. */
+               viraddr = (vir_bytes) pt->pt_pt[pde];
+               physaddr = pt->pt_dir[pde] & ARM_VM_PDE_MASK;
+               if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARM_PAGE_SIZE,
+                       ARM_VM_PTE_PRESENT | ARM_VM_PTE_USER | ARM_VM_PTE_RW |
+                       ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
+                       WMF_OVERWRITE)) != OK) {
+                       return r;
+               }
+       }
+
+       return OK;
+}
+
+void pt_clearmapcache(void)
+{
+       /* Make sure kernel will invalidate tlb when using current
+        * pagetable (i.e. vm's) to make new mappings before new cr3
+        * is loaded.
+        */
+       if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK)
+               panic("VMCTL_CLEARMAPCACHE failed");
+}
+
+/*===========================================================================*
+ *                             pt_writemap                                  *
+ *===========================================================================*/
+int pt_writemap(struct vmproc * vmp,
+                       pt_t *pt,
+                       vir_bytes v,
+                       phys_bytes physaddr,
+                       size_t bytes,
+                       u32_t flags,
+                       u32_t writemapflags)
+{
+/* Write mapping into page table. Allocate a new page table if necessary. */
+/* Page directory and table entries for this virtual address. */
+       int p, pages;
+       int verify = 0;
+       int ret = OK;
+
+#ifdef CONFIG_SMP
+       int vminhibit_clear = 0;
+       /* FIXME
+        * don't do it everytime, stop the process only on the first change and
+        * resume the execution on the last change. Do in a wrapper of this
+        * function
+        */
+       if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
+                       !(vmp->vm_flags & VMF_EXITING)) {
+               sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0);
+               vminhibit_clear = 1;
+       }
+#endif
+
+       if(writemapflags & WMF_VERIFY)
+               verify = 1;
+
+       assert(!(bytes % ARM_PAGE_SIZE));
+       assert(!(flags & ~(PTF_ALLFLAGS)));
+
+       pages = bytes / ARM_PAGE_SIZE;
+
+       /* MAP_NONE means to clear the mapping. It doesn't matter
+        * what's actually written into the PTE if ARM_VM_PRESENT
+        * isn't on, so we can just write MAP_NONE into it.
+        */
+       assert(physaddr == MAP_NONE || (flags & ARM_VM_PTE_PRESENT));
+       assert(physaddr != MAP_NONE || !flags);
+
+       /* First make sure all the necessary page tables are allocated,
+        * before we start writing in any of them, because it's a pain
+        * to undo our work properly.
+        */
+       ret = pt_ptalloc_in_range(pt, v, v + ARM_PAGE_SIZE*pages, flags, verify);
+       if(ret != OK) {
+               printf("VM: writemap: pt_ptalloc_in_range failed\n");
+               goto resume_exit;
+       }
+
+       /* Now write in them. */
+       for(p = 0; p < pages; p++) {
+               u32_t entry;
+               int pde = ARM_VM_PDE(v);
+               int pte = ARM_VM_PTE(v);
+
+               if(!v) { printf("VM: warning: making zero page for %d\n",
+                       vmp->vm_endpoint); }
+
+               assert(!(v % ARM_PAGE_SIZE));
+               assert(pte >= 0 && pte < ARM_VM_PT_ENTRIES);
+               assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
+
+               /* Page table has to be there. */
+               assert(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT);
+
+               /* We do not expect it to be a bigpage. */
+               assert(!(pt->pt_dir[pde] & ARM_VM_BIGPAGE));
+
+               /* Make sure page directory entry for this page table
+                * is marked present and page table entry is available.
+                */
+               assert(pt->pt_pt[pde]);
+
+#if SANITYCHECKS
+               /* We don't expect to overwrite a page. */
+               if(!(writemapflags & (WMF_OVERWRITE|WMF_VERIFY)))
+                       assert(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT));
+#endif
+               if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) {
+                       physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK;
+               }
+
+               if(writemapflags & WMF_FREE) {
+                       free_mem(ABS2CLICK(physaddr), 1);
+               }
+
+               /* Entry we will write. */
+               entry = (physaddr & ARM_VM_PTE_MASK) | flags;
+
+               if(verify) {
+                       u32_t maskedentry;
+                       maskedentry = pt->pt_pt[pde][pte];
+                       /* Verify pagetable entry. */
+                       if(entry & ARM_VM_PTE_RW) {
+                               /* If we expect a writable page, allow a readonly page. */
+                               maskedentry |= ARM_VM_PTE_RW;
+                       }
+                       if(maskedentry != entry) {
+                               printf("pt_writemap: mismatch: ");
+                               if((entry & ARM_VM_PTE_MASK) !=
+                                       (maskedentry & ARM_VM_PTE_MASK)) {
+                                       printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ",
+                                               (long)entry, (long)maskedentry);
+                               } else printf("phys ok; ");
+                               printf(" flags: found %s; ",
+                                       ptestr(pt->pt_pt[pde][pte]));
+                               printf(" masked %s; ",
+                                       ptestr(maskedentry));
+                               printf(" expected %s\n", ptestr(entry));
+                               ret = EFAULT;
+                               goto resume_exit;
+                       }
+               } else {
+                       /* Write pagetable entry. */
+                       pt->pt_pt[pde][pte] = entry;
+               }
+
+               physaddr += ARM_PAGE_SIZE;
+               v += ARM_PAGE_SIZE;
+       }
+
+resume_exit:
+
+#ifdef CONFIG_SMP
+       if (vminhibit_clear) {
+               assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR &&
+                       !(vmp->vm_flags & VMF_EXITING));
+               sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0);
+       }
+#endif
+
+       return ret;
+}
+
+/*===========================================================================*
+ *                             pt_checkrange                                *
+ *===========================================================================*/
+int pt_checkrange(pt_t *pt, vir_bytes v,  size_t bytes,
+       int write)
+{
+       int p, pages;
+
+       assert(!(bytes % ARM_PAGE_SIZE));
+
+       pages = bytes / ARM_PAGE_SIZE;
+
+       for(p = 0; p < pages; p++) {
+               int pde = ARM_VM_PDE(v);
+               int pte = ARM_VM_PTE(v);
+
+               assert(!(v % ARM_PAGE_SIZE));
+               assert(pte >= 0 && pte < ARM_VM_PT_ENTRIES);
+               assert(pde >= 0 && pde < ARM_VM_DIR_ENTRIES);
+
+               /* Page table has to be there. */
+               if(!(pt->pt_dir[pde] & ARM_VM_PDE_PRESENT))
+                       return EFAULT;
+
+               /* Make sure page directory entry for this page table
+                * is marked present and page table entry is available.
+                */
+               assert((pt->pt_dir[pde] & ARM_VM_PDE_PRESENT) && pt->pt_pt[pde]);
+
+               if(!(pt->pt_pt[pde][pte] & ARM_VM_PTE_PRESENT)) {
+                       return EFAULT;
+               }
+
+               if(write && (pt->pt_pt[pde][pte] & ARM_VM_PTE_RO)) {
+                       return EFAULT;
+               }
+
+               v += ARM_PAGE_SIZE;
+       }
+
+       return OK;
+}
+
+/*===========================================================================*
+ *                             pt_new                                       *
+ *===========================================================================*/
+int pt_new(pt_t *pt)
+{
+/* Allocate a pagetable root. On ARM, allocate a page-aligned page directory
+ * and set them to 0 (indicating no page tables are allocated). Lookup
+ * its physical address as we'll need that in the future. Verify it's
+ * page-aligned.
+ */
+       int i;
+
+       /* Don't ever re-allocate/re-move a certain process slot's
+        * page directory once it's been created. This is a fraction
+        * faster, but also avoids having to invalidate the page
+        * mappings from in-kernel page tables pointing to
+        * the page directories (the page_directories data).
+        */
+        if(!pt->pt_dir &&
+          !(pt->pt_dir = vm_allocpage((phys_bytes *)&pt->pt_dir_phys, VMP_PAGEDIR))) {
+               return ENOMEM;
+       }
+       assert(!((u32_t)pt->pt_dir_phys % ARM_PAGEDIR_SIZE));
+
+       for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
+               pt->pt_dir[i] = 0; /* invalid entry (ARM_VM_PRESENT bit = 0) */
+               pt->pt_pt[i] = NULL;
+       }
+
+       /* Where to start looking for free virtual address space? */
+       pt->pt_virtop = 0;
+
+        /* Map in kernel. */
+        if(pt_mapkernel(pt) != OK)
+                panic("pt_new: pt_mapkernel failed");
+
+       return OK;
+}
+
+static int freepde(void)
+{
+       int p = kernel_boot_info.freepde_start++;
+       assert(kernel_boot_info.freepde_start < ARM_VM_DIR_ENTRIES);
+       return p;
+}
+
+/*===========================================================================*
+ *                              pt_init                                      *
+ *===========================================================================*/
+void pt_init(void)
+{
+        pt_t *newpt;
+        int s, r, p;
+       vir_bytes sparepages_mem;
+       vir_bytes sparepagedirs_mem;
+       static u32_t currentpagedir[ARM_VM_DIR_ENTRIES];
+       int m = kernel_boot_info.kern_mod;
+       u32_t myttbr;
+
+       /* Find what the physical location of the kernel is. */
+       assert(m >= 0);
+       assert(m < kernel_boot_info.mods_with_kernel);
+       assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS);
+       kern_mb_mod = &kernel_boot_info.module_list[m];
+       kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start;
+       assert(!(kern_mb_mod->mod_start % ARM_BIG_PAGE_SIZE));
+       assert(!(kernel_boot_info.vir_kern_start % ARM_BIG_PAGE_SIZE));
+       kern_start_pde = kernel_boot_info.vir_kern_start / ARM_BIG_PAGE_SIZE;
+
+        /* Get ourselves spare pages. */
+        sparepages_mem = (vir_bytes) static_sparepages;
+       assert(!(sparepages_mem % ARM_PAGE_SIZE));
+
+        /* Get ourselves spare pagedirs. */
+       sparepagedirs_mem = (vir_bytes) static_sparepagedirs;
+       assert(!(sparepagedirs_mem % ARM_PAGEDIR_SIZE));
+
+       /* Spare pages are used to allocate memory before VM has its own page
+        * table that things (i.e. arbitrary physical memory) can be mapped into.
+        * We get it by pre-allocating it in our bss (allocated and mapped in by
+        * the kernel) in static_sparepages. We also need the physical addresses
+        * though; we look them up now so they are ready for use.
+        */
+        missing_sparedirs = 0;
+        assert(STATIC_SPAREPAGEDIRS < SPAREPAGEDIRS);
+        for(s = 0; s < SPAREPAGEDIRS; s++) {
+               vir_bytes v = (sparepagedirs_mem + s*ARM_PAGEDIR_SIZE);;
+               phys_bytes ph;
+               if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
+                       ARM_PAGEDIR_SIZE, &ph)) != OK)
+                               panic("pt_init: sys_umap failed: %d", r);
+               if(s >= STATIC_SPAREPAGEDIRS) {
+                       sparepagedirs[s].pagedir = NULL;
+                       missing_sparedirs++;
+                       continue;
+               }
+               sparepagedirs[s].pagedir = (void *) v;
+               sparepagedirs[s].phys = ph;
+        }
+
+        missing_spares = 0;
+        assert(STATIC_SPAREPAGES < SPAREPAGES);
+        for(s = 0; s < SPAREPAGES; s++) {
+               vir_bytes v = (sparepages_mem + s*ARM_PAGE_SIZE);;
+               phys_bytes ph;
+               if((r=sys_umap(SELF, VM_D, (vir_bytes) v,
+                       ARM_PAGE_SIZE*SPAREPAGES, &ph)) != OK)
+                               panic("pt_init: sys_umap failed: %d", r);
+               if(s >= STATIC_SPAREPAGES) {
+                       sparepages[s].page = NULL;
+                       missing_spares++;
+                       continue;
+               }
+               sparepages[s].page = (void *) v;
+               sparepages[s].phys = ph;
+        }
+
+       /* 1MB pages available? */
+       bigpage_ok = 1;
+
+       /* Allocate us a page table in which to remember page directory
+        * pointers.
+        */
+       if(!(page_directories = vm_allocpage(&page_directories_phys,
+               VMP_PAGETABLE)))
+                panic("no virt addr for vm mappings");
+
+       memset(page_directories, 0, ARM_PAGE_SIZE);
+
+       /* Now reserve another pde for kernel's own mappings. */
+       {
+               int kernmap_pde;
+               phys_bytes addr, len;
+               int flags, index = 0;
+               u32_t offset = 0;
+
+               kernmap_pde = freepde();
+               offset = kernmap_pde * ARM_BIG_PAGE_SIZE;
+
+               while(sys_vmctl_get_mapping(index, &addr, &len,
+                       &flags) == OK)  {
+                       vir_bytes vir;
+                       if(index >= MAX_KERNMAPPINGS)
+                               panic("VM: too many kernel mappings: %d", index);
+                       kern_mappings[index].phys_addr = addr;
+                       kern_mappings[index].len = len;
+                       kern_mappings[index].flags = flags;
+                       kern_mappings[index].vir_addr = addr;
+                       kern_mappings[index].flags =
+                               ARM_VM_PTE_PRESENT;
+                       if(flags & VMMF_UNCACHED)
+                               kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
+                       else
+                               kern_mappings[index].flags |=
+                                   ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
+                       if(flags & VMMF_USER)
+                               kern_mappings[index].flags |= ARM_VM_PTE_USER;
+                       else
+                               kern_mappings[index].flags |= ARM_VM_PTE_SUPER;
+                       if(flags & VMMF_WRITE)
+                               kern_mappings[index].flags |= ARM_VM_PTE_RW;
+                       else
+                               kern_mappings[index].flags |= ARM_VM_PTE_RO;
+                       if(addr % ARM_PAGE_SIZE)
+                               panic("VM: addr unaligned: %d", addr);
+                       if(len % ARM_PAGE_SIZE)
+                               panic("VM: len unaligned: %d", len);
+                       vir = offset;
+                       if(sys_vmctl_reply_mapping(index, vir) != OK)
+                               panic("VM: reply failed");
+                       offset += len;
+                       index++;
+                       kernmappings++;
+               }
+       }
+
+       /* Find a PDE below processes available for mapping in the
+        * page directories.
+        */
+       pagedir_pde = freepde();
+       pagedir_pde_val = (page_directories_phys & ARM_VM_PDE_MASK) |
+                       ARM_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
+
+       /* Allright. Now. We have to make our own page directory and page tables,
+        * that the kernel has already set up, accessible to us. It's easier to
+        * understand if we just copy all the required pages (i.e. page directory
+        * and page tables), and set up the pointers as if VM had done it itself.
+        *
+        * This allocation will happen without using any page table, and just
+        * uses spare pages.
+        */
+        newpt = &vmprocess->vm_pt;
+       if(pt_new(newpt) != OK)
+               panic("vm pt_new failed");
+
+       /* Get our current pagedir so we can see it. */
+       if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK)
+               panic("VM: sys_vmctl_get_pdbr failed");
+       if(sys_vircopy(NONE, myttbr, SELF,
+               (vir_bytes) currentpagedir, ARM_PAGEDIR_SIZE) != OK)
+               panic("VM: sys_vircopy failed");
+
+       /* We have mapped in kernel ourselves; now copy mappings for VM
+        * that kernel made, including allocations for BSS. Skip identity
+        * mapping bits; just map in VM.
+        */
+       for(p = 0; p < ARM_VM_DIR_ENTRIES; p++) {
+               u32_t entry = currentpagedir[p];
+               phys_bytes ptaddr_kern, ptaddr_us;
+
+               /* BIGPAGEs are kernel mapping (do ourselves) or boot
+                * identity mapping (don't want).
+                */
+               if(!(entry & ARM_VM_PDE_PRESENT)) continue;
+               if((entry & ARM_VM_BIGPAGE)) continue;
+
+               if(pt_ptalloc(newpt, p, 0) != OK)
+                       panic("pt_ptalloc failed");
+               assert(newpt->pt_dir[p] & ARM_VM_PDE_PRESENT);
+
+               ptaddr_kern = entry & ARM_VM_PDE_MASK;
+               ptaddr_us = newpt->pt_dir[p] & ARM_VM_PDE_MASK;
+
+               /* Copy kernel-initialized pagetable contents into our
+                * normally accessible pagetable.
+                */
+                if(sys_abscopy(ptaddr_kern, ptaddr_us, ARM_PAGETABLE_SIZE) != OK)
+                       panic("pt_init: abscopy failed");
+       }
+
+       /* Inform kernel vm has a newly built page table. */
+       assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR);
+       pt_bind(newpt, &vmproc[VM_PROC_NR]);
+
+       pt_init_done = 1;
+
+        /* All OK. */
+        return;
+}
+
+/*===========================================================================*
+ *                             pt_bind                                      *
+ *===========================================================================*/
+int pt_bind(pt_t *pt, struct vmproc *who)
+{
+       int slot;
+       u32_t phys;
+       void *pdes;
+       int i;
+       int pages_per_pagedir = ARM_PAGEDIR_SIZE/ARM_PAGE_SIZE;
+
+       /* Basic sanity checks. */
+       assert(who);
+       assert(who->vm_flags & VMF_INUSE);
+       assert(pt);
+
+       assert(pagedir_pde >= 0);
+
+       slot = who->vm_slot;
+       assert(slot >= 0);
+       assert(slot < ELEMENTS(vmproc));
+       assert(slot < ARM_VM_PT_ENTRIES / pages_per_pagedir);
+
+       phys = pt->pt_dir_phys & ARM_VM_PTE_MASK;
+       assert(pt->pt_dir_phys == phys);
+       assert(!(pt->pt_dir_phys % ARM_PAGEDIR_SIZE));
+
+       /* Update "page directory pagetable." */
+       for (i = 0; i < pages_per_pagedir; i++)
+           page_directories[slot*pages_per_pagedir+i] =
+               (phys+i*ARM_PAGE_SIZE) |
+               ARM_VM_PTE_PRESENT | ARM_VM_PTE_RW |
+               ARM_VM_PTE_USER;
+
+       /* This is where the PDE's will be visible to the kernel
+        * in its address space.
+        */
+       pdes = (void *) (pagedir_pde*ARM_BIG_PAGE_SIZE + 
+                       slot * ARM_PAGEDIR_SIZE);
+
+#if 0
+       printf("VM: slot %d endpoint %d has pde val 0x%lx at kernel address 0x%lx\n",
+               slot, who->vm_endpoint, page_directories[slot], pdes);
+#endif
+       /* Tell kernel about new page table root. */
+       return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys, pdes);
+}
+
+/*===========================================================================*
+ *                             pt_free                                      *
+ *===========================================================================*/
+void pt_free(pt_t *pt)
+{
+/* Free memory associated with this pagetable. */
+       int i;
+
+       for(i = 0; i < ARM_VM_DIR_ENTRIES; i++)
+               if(pt->pt_pt[i])
+                       vm_freepages((vir_bytes) pt->pt_pt[i], 1);
+
+       return;
+}
+
+/*===========================================================================*
+ *                             pt_mapkernel                                 *
+ *===========================================================================*/
+int pt_mapkernel(pt_t *pt)
+{
+       int i;
+       int kern_pde = kern_start_pde;
+       phys_bytes addr, mapped = 0;
+
+        /* Any ARM page table needs to map in the kernel address space. */
+       assert(bigpage_ok);
+       assert(pagedir_pde >= 0);
+       assert(kern_pde >= 0);
+
+       /* pt_init() has made sure this is ok. */
+       addr = kern_mb_mod->mod_start;
+
+       /* Actually mapping in kernel */
+       while(mapped < kern_size) {
+               pt->pt_dir[kern_pde] = (addr & ARM_VM_PDE_MASK) |
+                       ARM_VM_SECTION |
+                       ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
+                       ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
+               kern_pde++;
+               mapped += ARM_BIG_PAGE_SIZE;
+               addr += ARM_BIG_PAGE_SIZE;
+       }
+
+       /* Kernel also wants to know about all page directories. */
+       assert(pagedir_pde > kern_pde);
+       pt->pt_dir[pagedir_pde] = pagedir_pde_val;
+
+       /* Kernel also wants various mappings of its own. */
+       for(i = 0; i < kernmappings; i++) {
+               if(pt_writemap(NULL, pt,
+                       kern_mappings[i].vir_addr,
+                       kern_mappings[i].phys_addr,
+                       kern_mappings[i].len,
+                       kern_mappings[i].flags, 0) != OK) {
+                       panic("pt_mapkernel: pt_writemap failed");
+               }
+       }
+
+       return OK;
+}
+
+/*===========================================================================*
+ *                             pt_cycle                                     *
+ *===========================================================================*/
+void pt_cycle(void)
+{
+       vm_checkspares();
+       vm_checksparedirs();
+}
+
+int get_vm_self_pages(void) { return vm_self_pages; }
diff --git a/servers/vm/arch/arm/pagetable.h b/servers/vm/arch/arm/pagetable.h
new file mode 100644 (file)
index 0000000..f78be15
--- /dev/null
@@ -0,0 +1,51 @@
+
+#ifndef _PAGETABLE_H
+#define _PAGETABLE_H 1
+
+#include <stdint.h>
+#include <machine/vm.h>
+
+#include "vm.h"
+
+/* An ARM pagetable. */
+typedef struct {
+       /* Directory entries in VM addr space - root of page table.  */
+       u32_t *pt_dir;          /* 16KB aligned (ARM_VM_DIR_ENTRIES) */
+       u32_t pt_dir_phys;      /* physical address of pt_dir */
+
+       /* Pointers to page tables in VM address space. */
+       u32_t *pt_pt[ARM_VM_DIR_ENTRIES];
+
+       /* When looking for a hole in virtual address space, start
+        * looking here. This is in linear addresses, i.e.,
+        * not as the process sees it but the position in the page
+        * page table. This is just a hint.
+        */
+       u32_t pt_virtop;
+} pt_t;
+
+/* Mapping flags. */
+#define PTF_WRITE      ARM_VM_PTE_RW
+#define PTF_READ       ARM_VM_PTE_RO
+#define PTF_PRESENT    ARM_VM_PTE_PRESENT
+#define PTF_SUPER      ARM_VM_PTE_SUPER
+#define PTF_USER       ARM_VM_PTE_USER
+#define PTF_NOCACHE    ARM_VM_PTE_DEVICE
+#define PTF_CACHEWB    ARM_VM_PTE_WB
+#define PTF_CACHEWT    ARM_VM_PTE_WT
+#define PTF_SHARE      ARM_VM_PTE_SHAREABLE
+
+/* For arch-specific PT routines to check if no bits outside
+ * the regular flags are set.
+ */
+#define PTF_ALLFLAGS   (PTF_READ|PTF_WRITE|PTF_PRESENT|PTF_SUPER|PTF_USER|PTF_NOCACHE|PTF_CACHEWB|PTF_CACHEWT|PTF_SHARE)
+
+#if SANITYCHECKS
+#define PT_SANE(p) { pt_sanitycheck((p), __FILE__, __LINE__); }
+#else
+#define PT_SANE(p)
+#endif
+
+#endif
+
+
index 2824d84e2757ad4ba19c31136f1923b4563fcab9..19917f7dd074be04077c8fbcf2d54683075864e6 100644 (file)
@@ -96,6 +96,7 @@ int pt_writemap(struct vmproc * vmp, pt_t *pt, vir_bytes v, phys_bytes
 int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes, int write);
 int pt_bind(pt_t *pt, struct vmproc *who);
 void *vm_allocpage(phys_bytes *p, int cat);
+void *vm_allocpagedir(phys_bytes *p);
 void pt_cycle(void);
 int pt_mapkernel(pt_t *pt);
 void vm_pagelock(void *vir, int lockflag);