INCSDIR= /usr/include/arm
-INCS= aeabi.h ansi.h armreg.h asm.h \
+INCS= aeabi.h ansi.h aout_machdep.h armreg.h asm.h atomic.h \
bswap.h byte_swap.h \
- cdefs.h cpu.h \
+ cdefs.h cpu.h cpuconf.h \
disklabel.h \
elf_machdep.h endian.h endian_machdep.h \
- float.h \
+ float.h fp.h frame.h \
ieee.h ieeefp.h \
- proc.h int_const.h int_fmtio.h int_limits.h int_mwgwtypes.h int_types.h \
- \
+ int_const.h int_fmtio.h int_limits.h int_mwgwtypes.h int_types.h \
+ kcore.h \
limits.h lock.h \
math.h mcontext.h mutex.h \
- param.h ptrace.h profile.h rwlock.h \
- \
- rwlock.h \
- setjmp.h signal.h \
- types.h \
- \
+ param.h pcb.h pmc.h proc.h profile.h rwlock.h \
+ ptrace.h \
+ reg.h rwlock.h \
+ setjmp.h signal.h swi.h sysarch.h \
+ trap.h types.h \
+ vfpreg.h \
wchar_limits.h
.include <bsd.kinc.mk>
#if defined(__minix)
/* To change this, this require also changing the defintion of size_t in GCC,
* and to adapt the following headers: int_fmt.h, int_types.h */
-#define _BSD_PTRDIFF_T_ int /* ptr1 - ptr2 */
-#define _BSD_SIZE_T_ unsigned int /* sizeof() */
-#define _BSD_SSIZE_T_ int /* byte count or error */
+#define _BSD_PTRDIFF_T_ int /* ptr1 - ptr2 */
+#define _BSD_SIZE_T_ unsigned int /* sizeof() */
+#define _BSD_SSIZE_T_ int /* byte count or error */
+#else
+#define _BSD_PTRDIFF_T_ long int /* ptr1 - ptr2 */
+#define _BSD_SIZE_T_ unsigned long int /* sizeof() */
+#define _BSD_SSIZE_T_ long int /* byte count or error */
#endif /* defined(__minix) */
#define _BSD_TIME_T_ __int64_t /* time() */
#define _BSD_CLOCKID_T_ int /* clockid_t */
--- /dev/null
+/* $NetBSD: aout_machdep.h,v 1.5 2005/12/11 12:16:46 christos Exp $ */
+
+/*
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * 4. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_AOUT_MACHDEP_H_
+#define _ARM32_AOUT_MACHDEP_H_
+
+#define AOUT_LDPGSZ 4096
+
+/* Relocation format. */
+
+struct relocation_info_arm6 {
+ int r_address; /* offset in text or data segment */
+ unsigned r_symbolnum:24;/* ordinal number of add symbol */
+ unsigned r_pcrel:1; /* 1 if value should be pc-relative */
+ unsigned r_length:2; /* 0=byte, 1=word, 2=long, 3=24bits shifted by 2 */
+ unsigned r_extern:1; /* 1 if need to add symbol to value */
+ unsigned r_neg:1; /* 1 if addend is negative */
+ unsigned r_baserel:1; /* 1 if linkage table relative */
+ unsigned r_jmptable:1; /* 1 if relocation to jump table */
+ unsigned r_relative:1; /* 1 if load address relative */
+};
+
+#define relocation_info relocation_info_arm6
+
+/* No special executable format */
+#define cpu_exec_aout_makecmds(a, b) ENOEXEC
+
+#endif /* _ARM32_AOUT_MACHDEP_H_ */
INCSDIR= /usr/include/arm/arm32
-INCS= param.h types.h vmparam.h pte.h
+INCS= frame.h katelib.h param.h pmap.h psl.h pte.h rtc.h types.h vmparam.h
.include <bsd.kinc.mk>
--- /dev/null
+/* $NetBSD: db_machdep.h,v 1.6 2012/09/21 22:12:36 matt Exp $ */
+
+#ifndef _ARM32_DB_MACHDEP_H_
+#define _ARM32_DB_MACHDEP_H_
+
+#include <arm/db_machdep.h>
+
+void db_show_panic_cmd(db_expr_t, bool, db_expr_t, const char *);
+void db_show_frame_cmd(db_expr_t, bool, db_expr_t, const char *);
+void db_show_fault_cmd(db_expr_t, bool, db_expr_t, const char *);
+
+#endif
--- /dev/null
+/* $NetBSD: frame.h,v 1.33 2012/08/29 07:09:12 matt Exp $ */
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * frame.h
+ *
+ * Stack frames structures
+ *
+ * Created : 30/09/94
+ */
+
+#ifndef _ARM32_FRAME_H_
+#define _ARM32_FRAME_H_
+
+#include <arm/frame.h> /* Common ARM stack frames */
+
+#ifndef _LOCORE
+
+/*
+ * System stack frames.
+ */
+
+struct clockframe {
+ struct trapframe cf_tf;
+};
+
+/*
+ * Switch frame.
+ *
+ * Should be a multiple of 8 bytes for dumpsys.
+ */
+
+struct switchframe {
+ u_int sf_r4;
+ u_int sf_r5;
+ u_int sf_r6;
+ u_int sf_r7;
+ u_int sf_sp;
+ u_int sf_pc;
+};
+
+/*
+ * Stack frame. Used during stack traces (db_trace.c)
+ */
+struct frame {
+ u_int fr_fp;
+ u_int fr_sp;
+ u_int fr_lr;
+ u_int fr_pc;
+};
+
+#ifdef _KERNEL
+void validate_trapframe(trapframe_t *, int);
+#endif /* _KERNEL */
+
+#else /* _LOCORE */
+
+#include "opt_compat_netbsd.h"
+#include "opt_execfmt.h"
+#include "opt_multiprocessor.h"
+#include "opt_cpuoptions.h"
+#include "opt_arm_debug.h"
+#include "opt_cputypes.h"
+
+#include <machine/cpu.h>
+
+/*
+ * This macro is used by DO_AST_AND_RESTORE_ALIGNMENT_FAULTS to process
+ * any pending softints.
+ */
+#if defined(__HAVE_FAST_SOFTINTS) && !defined(__HAVE_PIC_FAST_SOFTINTS)
+#define DO_PENDING_SOFTINTS \
+ ldr r0, [r4, #CI_INTR_DEPTH]/* Get current intr depth */ ;\
+ teq r0, #0 /* Test for 0. */ ;\
+ bne 10f /* skip softints if != 0 */ ;\
+ ldr r0, [r4, #CI_CPL] /* Get current priority level */;\
+ ldr r1, [r4, #CI_SOFTINTS] /* Get pending softint mask */ ;\
+ lsrs r0, r1, r0 /* shift mask by cpl */ ;\
+ blne _C_LABEL(dosoftints) /* dosoftints(void) */ ;\
+10:
+#else
+#define DO_PENDING_SOFTINTS /* nothing */
+#endif
+
+#ifdef MULTIPROCESSOR
+#define KERNEL_LOCK \
+ mov r0, #1 ;\
+ mov r1, #0 ;\
+ bl _C_LABEL(_kernel_lock)
+
+#define KERNEL_UNLOCK \
+ mov r0, #1 ;\
+ mov r1, #0 ;\
+ mov r2, #0 ;\
+ bl _C_LABEL(_kernel_unlock)
+#else
+#define KERNEL_LOCK /* nothing */
+#define KERNEL_UNLOCK /* nothing */
+#endif
+
+#ifdef _ARM_ARCH_6
+#define GET_CPSR(rb) /* nothing */
+#define CPSID_I(ra,rb) cpsid i
+#define CPSIE_I(ra,rb) cpsie i
+#else
+#define GET_CPSR(rb) \
+ mrs rb, cpsr /* fetch CPSR */
+
+#define CPSID_I(ra,rb) \
+ orr ra, rb, #(IF32_bits) ;\
+ msr cpsr_c, ra /* Disable interrupts */
+
+#define CPSIE_I(ra,rb) \
+ bic ra, rb, #(IF32_bits) ;\
+ msr cpsr_c, ra /* Restore interrupts */
+#endif
+
+/*
+ * AST_ALIGNMENT_FAULT_LOCALS and ENABLE_ALIGNMENT_FAULTS
+ * These are used in order to support dynamic enabling/disabling of
+ * alignment faults when executing old a.out ARM binaries.
+ *
+ * Note that when ENABLE_ALIGNMENTS_FAULTS finishes r4 will contain
+ * pointer to the cpu's cpu_info. DO_AST_AND_RESTORE_ALIGNMENT_FAULTS
+ * relies on r4 being preserved.
+ */
+#ifdef EXEC_AOUT
+#define AST_ALIGNMENT_FAULT_LOCALS \
+.Laflt_cpufuncs: ;\
+ .word _C_LABEL(cpufuncs)
+
+/*
+ * This macro must be invoked following PUSHFRAMEINSVC or PUSHFRAME at
+ * the top of interrupt/exception handlers.
+ *
+ * When invoked, r0 *must* contain the value of SPSR on the current
+ * trap/interrupt frame. This is always the case if ENABLE_ALIGNMENT_FAULTS
+ * is invoked immediately after PUSHFRAMEINSVC or PUSHFRAME.
+ */
+#define ENABLE_ALIGNMENT_FAULTS \
+ and r7, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\
+ teq r7, #(PSR_USR32_MODE) ;\
+ GET_CURCPU(r4) /* r4 = cpuinfo */ ;\
+ bne 1f /* Not USR mode skip AFLT */ ;\
+ ldr r1, [r4, #CI_CURLWP] /* get curlwp from cpu_info */ ;\
+ ldr r1, [r1, #L_MD_FLAGS] /* Fetch l_md.md_flags */ ;\
+ tst r1, #MDLWP_NOALIGNFLT ;\
+ beq 1f /* AFLTs already enabled */ ;\
+ ldr r2, .Laflt_cpufuncs ;\
+ ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\
+ mov r0, #-1 ;\
+ mov lr, pc ;\
+ ldr pc, [r2, #CF_CONTROL] /* Enable alignment faults */ ;\
+1: KERNEL_LOCK
+
+/*
+ * This macro must be invoked just before PULLFRAMEFROMSVCANDEXIT or
+ * PULLFRAME at the end of interrupt/exception handlers. We know that
+ * r4 points to cpu_info since that is what ENABLE_ALIGNMENT_FAULTS did
+ * for use.
+ */
+#define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \
+ DO_PENDING_SOFTINTS ;\
+ GET_CPSR(r5) /* save CPSR */ ;\
+ CPSID_I(r1, r5) /* Disable interrupts */ ;\
+ teq r7, #(PSR_USR32_MODE) /* Returning to USR mode? */ ;\
+ bne 3f /* Nope, get out now */ ;\
+1: ldr r1, [r4, #CI_ASTPENDING] /* Pending AST? */ ;\
+ teq r1, #0x00000000 ;\
+ bne 2f /* Yup. Go deal with it */ ;\
+ ldr r1, [r4, #CI_CURLWP] /* get curlwp from cpu_info */ ;\
+ ldr r0, [r1, #L_MD_FLAGS] /* get md_flags from lwp */ ;\
+ tst r0, #MDLWP_NOALIGNFLT ;\
+ beq 3f /* Keep AFLTs enabled */ ;\
+ ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\
+ ldr r2, .Laflt_cpufuncs ;\
+ mov r0, #-1 ;\
+ bic r1, r1, #CPU_CONTROL_AFLT_ENABLE /* Disable AFLTs */ ;\
+ adr lr, 3f ;\
+ ldr pc, [r2, #CF_CONTROL] /* Set new CTRL reg value */ ;\
+ /* NOTREACHED */ \
+2: mov r1, #0x00000000 ;\
+ str r1, [r4, #CI_ASTPENDING] /* Clear astpending */ ;\
+ CPSIE_I(r5, r5) /* Restore interrupts */ ;\
+ mov r0, sp ;\
+ bl _C_LABEL(ast) /* ast(frame) */ ;\
+ CPSID_I(r0, r5) /* Disable interrupts */ ;\
+ b 1b /* Back around again */ ;\
+3: KERNEL_UNLOCK
+
+#else /* !EXEC_AOUT */
+
+#define AST_ALIGNMENT_FAULT_LOCALS
+
+#define ENABLE_ALIGNMENT_FAULTS \
+ and r7, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\
+ GET_CURCPU(r4) /* r4 = cpuinfo */ ;\
+ KERNEL_LOCK
+
+#define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \
+ DO_PENDING_SOFTINTS ;\
+ GET_CPSR(r5) /* save CPSR */ ;\
+ CPSID_I(r1, r5) /* Disable interrupts */ ;\
+ teq r7, #(PSR_USR32_MODE) ;\
+ bne 2f /* Nope, get out now */ ;\
+1: ldr r1, [r4, #CI_ASTPENDING] /* Pending AST? */ ;\
+ teq r1, #0x00000000 ;\
+ beq 2f /* Nope. Just bail */ ;\
+ mov r1, #0x00000000 ;\
+ str r1, [r4, #CI_ASTPENDING] /* Clear astpending */ ;\
+ CPSIE_I(r5, r5) /* Restore interrupts */ ;\
+ mov r0, sp ;\
+ bl _C_LABEL(ast) /* ast(frame) */ ;\
+ CPSID_I(r0, r5) /* Disable interrupts */ ;\
+ b 1b ;\
+2: KERNEL_UNLOCK /* unlock the kernel */
+#endif /* EXEC_AOUT */
+
+#ifndef _ARM_ARCH_6
+#ifdef ARM_LOCK_CAS_DEBUG
+#define LOCK_CAS_DEBUG_LOCALS \
+.L_lock_cas_restart: ;\
+ .word _C_LABEL(_lock_cas_restart)
+
+#if defined(__ARMEB__)
+#define LOCK_CAS_DEBUG_COUNT_RESTART \
+ ble 99f ;\
+ ldr r0, .L_lock_cas_restart ;\
+ ldmia r0, {r1-r2} /* load ev_count */ ;\
+ adds r2, r2, #1 /* 64-bit incr (lo) */ ;\
+ adc r1, r1, #0 /* 64-bit incr (hi) */ ;\
+ stmia r0, {r1-r2} /* store ev_count */
+#else /* __ARMEB__ */
+#define LOCK_CAS_DEBUG_COUNT_RESTART \
+ ble 99f ;\
+ ldr r0, .L_lock_cas_restart ;\
+ ldmia r0, {r1-r2} /* load ev_count */ ;\
+ adds r1, r1, #1 /* 64-bit incr (lo) */ ;\
+ adc r2, r2, #0 /* 64-bit incr (hi) */ ;\
+ stmia r0, {r1-r2} /* store ev_count */
+#endif /* __ARMEB__ */
+#else /* ARM_LOCK_CAS_DEBUG */
+#define LOCK_CAS_DEBUG_LOCALS /* nothing */
+#define LOCK_CAS_DEBUG_COUNT_RESTART /* nothing */
+#endif /* ARM_LOCK_CAS_DEBUG */
+
+#define LOCK_CAS_CHECK_LOCALS \
+.L_lock_cas: ;\
+ .word _C_LABEL(_lock_cas) ;\
+.L_lock_cas_end: ;\
+ .word _C_LABEL(_lock_cas_end) ;\
+LOCK_CAS_DEBUG_LOCALS
+
+#define LOCK_CAS_CHECK \
+ ldr r0, [sp] /* get saved PSR */ ;\
+ and r0, r0, #(PSR_MODE) /* check for SVC32 mode */ ;\
+ teq r0, #(PSR_SVC32_MODE) ;\
+ bne 99f /* nope, get out now */ ;\
+ ldr r0, [sp, #(TF_PC)] ;\
+ ldr r1, .L_lock_cas_end ;\
+ cmp r0, r1 ;\
+ bge 99f ;\
+ ldr r1, .L_lock_cas ;\
+ cmp r0, r1 ;\
+ strgt r1, [sp, #(TF_PC)] ;\
+ LOCK_CAS_DEBUG_COUNT_RESTART ;\
+99:
+
+#else
+#define LOCK_CAS_CHECK /* nothing */
+#define LOCK_CAS_CHECK_LOCALS /* nothing */
+#endif
+
+/*
+ * ASM macros for pushing and pulling trapframes from the stack
+ *
+ * These macros are used to handle the trapframe structure defined above.
+ */
+
+/*
+ * PUSHFRAME - macro to push a trap frame on the stack in the current mode
+ * Since the current mode is used, the SVC lr field is not defined.
+ */
+
+#ifdef CPU_SA110
+/*
+ * NOTE: r13 and r14 are stored separately as a work around for the
+ * SA110 rev 2 STM^ bug
+ */
+#define PUSHUSERREGS \
+ stmia sp, {r0-r12}; /* Push the user mode registers */ \
+ add r0, sp, #(4*13); /* Adjust the stack pointer */ \
+ stmia r0, {r13-r14}^ /* Push the user mode registers */
+#else
+#define PUSHUSERREGS \
+ stmia sp, {r0-r14}^ /* Push the user mode registers */
+#endif
+
+#define PUSHFRAME \
+ str lr, [sp, #-4]!; /* Push the return address */ \
+ sub sp, sp, #(4*17); /* Adjust the stack pointer */ \
+ PUSHUSERREGS; /* Push the user mode registers */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ mrs r0, spsr_all; /* Get the SPSR */ \
+ str r0, [sp, #-8]! /* Push the SPSR on the stack */
+
+/*
+ * PULLFRAME - macro to pull a trap frame from the stack in the current mode
+ * Since the current mode is used, the SVC lr field is ignored.
+ */
+
+#define PULLFRAME \
+ ldr r0, [sp], #0x0008; /* Pop the SPSR from stack */ \
+ msr spsr_all, r0; \
+ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ add sp, sp, #(4*17); /* Adjust the stack pointer */ \
+ ldr lr, [sp], #0x0004 /* Pop the return address */
+
+/*
+ * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode
+ * This should only be used if the processor is not currently in SVC32
+ * mode. The processor mode is switched to SVC mode and the trap frame is
+ * stored. The SVC lr field is used to store the previous value of
+ * lr in SVC mode.
+ *
+ * NOTE: r13 and r14 are stored separately as a work around for the
+ * SA110 rev 2 STM^ bug
+ */
+
+#ifdef _ARM_ARCH_6
+#define SET_CPSR_MODE(tmp, mode) \
+ cps #(mode)
+#else
+#define SET_CPSR_MODE(tmp, mode) \
+ mrs tmp, cpsr; /* Get the CPSR */ \
+ bic tmp, tmp, #(PSR_MODE); /* Fix for SVC mode */ \
+ orr tmp, tmp, #(mode); \
+ msr cpsr_c, tmp /* Punch into SVC mode */
+#endif
+
+#define PUSHFRAMEINSVC \
+ stmdb sp, {r0-r3}; /* Save 4 registers */ \
+ mov r0, lr; /* Save xxx32 r14 */ \
+ mov r1, sp; /* Save xxx32 sp */ \
+ mrs r3, spsr; /* Save xxx32 spsr */ \
+ SET_CPSR_MODE(r2, PSR_SVC32_MODE); \
+ bic r2, sp, #7; /* Align new SVC sp */ \
+ str r0, [r2, #-4]!; /* Push return address */ \
+ stmdb r2!, {sp, lr}; /* Push SVC sp, lr */ \
+ mov sp, r2; /* Keep stack aligned */ \
+ msr spsr_all, r3; /* Restore correct spsr */ \
+ ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \
+ sub sp, sp, #(4*15); /* Adjust the stack pointer */ \
+ PUSHUSERREGS; /* Push the user mode registers */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ mrs r0, spsr_all; /* Get the SPSR */ \
+ str r0, [sp, #-8]! /* Push the SPSR onto the stack */
+
+/*
+ * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack
+ * in SVC32 mode and restore the saved processor mode and PC.
+ * This should be used when the SVC lr register needs to be restored on
+ * exit.
+ */
+
+#define PULLFRAMEFROMSVCANDEXIT \
+ ldr r0, [sp], #0x0008; /* Pop the SPSR from stack */ \
+ msr spsr_all, r0; /* restore SPSR */ \
+ ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \
+ mov r0, r0; /* NOP for previous instruction */ \
+ add sp, sp, #(4*15); /* Adjust the stack pointer */ \
+ ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
+
+#endif /* _LOCORE */
+
+#endif /* _ARM32_FRAME_H_ */
--- /dev/null
+/* $NetBSD: katelib.h,v 1.3 2001/11/23 19:21:48 thorpej Exp $ */
+
+/*
+ * Copyright (c) 1994-1996 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * katelib.h
+ *
+ * Prototypes for machine specific functions. Most of these
+ * could be inlined.
+ *
+ * This should not really be a separate header file. Eventually I will merge
+ * this into other header files once I have decided where the declarations
+ * should go.
+ *
+ * Created : 18/09/94
+ *
+ * Based on kate/katelib/prototypes.h
+ */
+
+/*
+ * USE OF THIS FILE IS DEPRECATED
+ */
+
+#include <sys/types.h>
+#include <arm/cpufunc.h>
+
+#ifdef _KERNEL
+
+/* Assembly modules */
+
+/* In blockio.S */
+#include <arm/blockio.h>
+
+/* Macros for reading and writing words, shorts, bytes */
+
+#define WriteWord(a, b) \
+*((volatile unsigned int *)(a)) = (b)
+
+#define ReadWord(a) \
+(*((volatile unsigned int *)(a)))
+
+#define WriteShort(a, b) \
+*((volatile unsigned int *)(a)) = ((b) | ((b) << 16))
+
+#define ReadShort(a) \
+((*((volatile unsigned int *)(a))) & 0xffff)
+
+#define WriteByte(a, b) \
+*((volatile unsigned char *)(a)) = (b)
+
+#define ReadByte(a) \
+(*((volatile unsigned char *)(a)))
+
+/* Define in/out macros */
+
+#define inb(port) ReadByte((port))
+#define outb(port, byte) WriteByte((port), (byte))
+#define inw(port) ReadShort((port))
+#define outw(port, word) WriteShort((port), (word))
+#define inl(port) ReadWord((port))
+#define outl(port, lword) WriteWord((port), (lword))
+
+#endif
+
+/* End of katelib.h */
--- /dev/null
+/* $NetBSD: machdep.h,v 1.16 2012/09/01 12:19:32 martin Exp $ */
+
+#ifndef _ARM32_BOOT_MACHDEP_H_
+#define _ARM32_BOOT_MACHDEP_H_
+
+/* Define various stack sizes in pages */
+#ifndef IRQ_STACK_SIZE
+#define IRQ_STACK_SIZE 1
+#endif
+#ifndef ABT_STACK_SIZE
+#define ABT_STACK_SIZE 1
+#endif
+#ifndef UND_STACK_SIZE
+#ifdef IPKDB
+#define UND_STACK_SIZE 2
+#else
+#define UND_STACK_SIZE 1
+#endif
+#endif
+#ifndef FIQ_STACK_SIZE
+#define FIQ_STACK_SIZE 1
+#endif
+
+
+extern void (*cpu_reset_address)(void);
+extern paddr_t cpu_reset_address_paddr;
+
+extern u_int data_abort_handler_address;
+extern u_int prefetch_abort_handler_address;
+// extern u_int undefined_handler_address;
+#define undefined_handler_address (curcpu()->ci_undefsave[2])
+
+struct bootmem_info {
+ paddr_t bmi_start;
+ paddr_t bmi_kernelstart;
+ paddr_t bmi_kernelend;
+ paddr_t bmi_end;
+ pv_addrqh_t bmi_freechunks;
+ pv_addrqh_t bmi_chunks; /* sorted list of memory to be mapped */
+ pv_addr_t bmi_freeblocks[4];
+ /*
+ * These need to be static for pmap's kernel_pt list.
+ */
+ pv_addr_t bmi_vector_l2pt;
+ pv_addr_t bmi_io_l2pt;
+ pv_addr_t bmi_l2pts[16];
+ u_int bmi_freepages;
+ u_int bmi_nfreeblocks;
+};
+
+extern struct bootmem_info bootmem_info;
+
+extern char *booted_kernel;
+
+extern volatile uint32_t arm_cpu_hatched;
+extern uint32_t arm_cpu_mbox;
+extern u_int arm_cpu_max;
+
+/* misc prototypes used by the many arm machdeps */
+void cortex_pmc_ccnt_init(void);
+void cpu_hatch(struct cpu_info *, cpuid_t, void (*)(struct cpu_info *));
+void halt(void);
+void parse_mi_bootargs(char *);
+void data_abort_handler(trapframe_t *);
+void prefetch_abort_handler(trapframe_t *);
+void undefinedinstruction_bounce(trapframe_t *);
+void dumpsys(void);
+
+/*
+ * note that we use void *as all the platforms have different ideas on what
+ * the structure is
+ */
+u_int initarm(void *);
+struct pmap_devmap;
+struct boot_physmem;
+void arm32_bootmem_init(paddr_t memstart, psize_t memsize, paddr_t kernelstart);
+void arm32_kernel_vm_init(vaddr_t kvm_base, vaddr_t vectors,
+ vaddr_t iovbase /* (can be zero) */,
+ const struct pmap_devmap *devmap, bool mapallmem_p);
+vaddr_t initarm_common(vaddr_t kvm_base, vsize_t kvm_size,
+ const struct boot_physmem *bp, size_t nbp);
+
+
+/* from arm/arm32/intr.c */
+void dosoftints(void);
+void set_spl_masks(void);
+#ifdef DIAGNOSTIC
+void dump_spl_masks(void);
+#endif
+#endif
--- /dev/null
+/* $NetBSD: pmap.h,v 1.112 2012/09/22 00:33:38 matt Exp $ */
+
+/*
+ * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1994,1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_PMAP_H_
+#define _ARM32_PMAP_H_
+
+#ifdef _KERNEL
+
+#include <arm/cpuconf.h>
+#include <arm/arm32/pte.h>
+#ifndef _LOCORE
+#if defined(_KERNEL_OPT)
+#include "opt_arm32_pmap.h"
+#endif
+#include <arm/cpufunc.h>
+#include <uvm/uvm_object.h>
+#endif
+
+/*
+ * a pmap describes a processes' 4GB virtual address space. this
+ * virtual address space can be broken up into 4096 1MB regions which
+ * are described by L1 PTEs in the L1 table.
+ *
+ * There is a line drawn at KERNEL_BASE. Everything below that line
+ * changes when the VM context is switched. Everything above that line
+ * is the same no matter which VM context is running. This is achieved
+ * by making the L1 PTEs for those slots above KERNEL_BASE reference
+ * kernel L2 tables.
+ *
+ * The basic layout of the virtual address space thus looks like this:
+ *
+ * 0xffffffff
+ * .
+ * .
+ * .
+ * KERNEL_BASE
+ * --------------------
+ * .
+ * .
+ * .
+ * 0x00000000
+ */
+
+/*
+ * The number of L2 descriptor tables which can be tracked by an l2_dtable.
+ * A bucket size of 16 provides for 16MB of contiguous virtual address
+ * space per l2_dtable. Most processes will, therefore, require only two or
+ * three of these to map their whole working set.
+ */
+#define L2_BUCKET_LOG2 4
+#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
+
+/*
+ * Given the above "L2-descriptors-per-l2_dtable" constant, the number
+ * of l2_dtable structures required to track all possible page descriptors
+ * mappable by an L1 translation table is given by the following constants:
+ */
+#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
+#define L2_SIZE (1 << L2_LOG2)
+
+/*
+ * tell MI code that the cache is virtually-indexed.
+ * ARMv6 is physically-tagged but all others are virtually-tagged.
+ */
+#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
+#define PMAP_CACHE_VIPT
+#else
+#define PMAP_CACHE_VIVT
+#endif
+
+#ifndef _LOCORE
+
+struct l1_ttable;
+struct l2_dtable;
+
+/*
+ * Track cache/tlb occupancy using the following structure
+ */
+union pmap_cache_state {
+ struct {
+ union {
+ u_int8_t csu_cache_b[2];
+ u_int16_t csu_cache;
+ } cs_cache_u;
+
+ union {
+ u_int8_t csu_tlb_b[2];
+ u_int16_t csu_tlb;
+ } cs_tlb_u;
+ } cs_s;
+ u_int32_t cs_all;
+};
+#define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
+#define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
+#define cs_cache cs_s.cs_cache_u.csu_cache
+#define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
+#define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
+#define cs_tlb cs_s.cs_tlb_u.csu_tlb
+
+/*
+ * Assigned to cs_all to force cacheops to work for a particular pmap
+ */
+#define PMAP_CACHE_STATE_ALL 0xffffffffu
+
+/*
+ * This structure is used by machine-dependent code to describe
+ * static mappings of devices, created at bootstrap time.
+ */
+struct pmap_devmap {
+ vaddr_t pd_va; /* virtual address */
+ paddr_t pd_pa; /* physical address */
+ psize_t pd_size; /* size of region */
+ vm_prot_t pd_prot; /* protection code */
+ int pd_cache; /* cache attributes */
+};
+
+/*
+ * The pmap structure itself
+ */
+struct pmap {
+ u_int8_t pm_domain;
+ bool pm_remove_all;
+ bool pm_activated;
+ struct l1_ttable *pm_l1;
+ pd_entry_t *pm_pl1vec;
+ pd_entry_t pm_l1vec;
+ union pmap_cache_state pm_cstate;
+ struct uvm_object pm_obj;
+ kmutex_t pm_obj_lock;
+#define pm_lock pm_obj.vmobjlock
+ struct l2_dtable *pm_l2[L2_SIZE];
+ struct pmap_statistics pm_stats;
+ LIST_ENTRY(pmap) pm_list;
+};
+
+/*
+ * Physical / virtual address structure. In a number of places (particularly
+ * during bootstrapping) we need to keep track of the physical and virtual
+ * addresses of various pages
+ */
+typedef struct pv_addr {
+ SLIST_ENTRY(pv_addr) pv_list;
+ paddr_t pv_pa;
+ vaddr_t pv_va;
+ vsize_t pv_size;
+ uint8_t pv_cache;
+ uint8_t pv_prot;
+} pv_addr_t;
+typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
+
+extern pv_addrqh_t pmap_freeq;
+extern pv_addr_t kernelstack;
+extern pv_addr_t abtstack;
+extern pv_addr_t fiqstack;
+extern pv_addr_t irqstack;
+extern pv_addr_t undstack;
+extern pv_addr_t idlestack;
+extern pv_addr_t systempage;
+extern pv_addr_t kernel_l1pt;
+
+/*
+ * Determine various modes for PTEs (user vs. kernel, cacheable
+ * vs. non-cacheable).
+ */
+#define PTE_KERNEL 0
+#define PTE_USER 1
+#define PTE_NOCACHE 0
+#define PTE_CACHE 1
+#define PTE_PAGETABLE 2
+
+/*
+ * Flags that indicate attributes of pages or mappings of pages.
+ *
+ * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
+ * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
+ * pv_entry's for each page. They live in the same "namespace" so
+ * that we can clear multiple attributes at a time.
+ *
+ * Note the "non-cacheable" flag generally means the page has
+ * multiple mappings in a given address space.
+ */
+#define PVF_MOD 0x01 /* page is modified */
+#define PVF_REF 0x02 /* page is referenced */
+#define PVF_WIRED 0x04 /* mapping is wired */
+#define PVF_WRITE 0x08 /* mapping is writable */
+#define PVF_EXEC 0x10 /* mapping is executable */
+#ifdef PMAP_CACHE_VIVT
+#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
+#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
+#define PVF_NC (PVF_UNC|PVF_KNC)
+#endif
+#ifdef PMAP_CACHE_VIPT
+#define PVF_NC 0x20 /* mapping is 'kernel' non-cacheable */
+#define PVF_MULTCLR 0x40 /* mapping is multi-colored */
+#endif
+#define PVF_COLORED 0x80 /* page has or had a color */
+#define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
+#define PVF_KMPAGE 0x0200 /* page is used for kmem */
+#define PVF_DIRTY 0x0400 /* page may have dirty cache lines */
+#define PVF_KMOD 0x0800 /* unmanaged page is modified */
+#define PVF_KWRITE (PVF_KENTRY|PVF_WRITE)
+#define PVF_DMOD (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
+
+/*
+ * Commonly referenced structures
+ */
+extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
+
+/*
+ * Macros that we need to export
+ */
+#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
+#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
+
+#define pmap_is_modified(pg) \
+ (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
+#define pmap_is_referenced(pg) \
+ (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
+#define pmap_is_page_colored_p(md) \
+ (((md)->pvh_attrs & PVF_COLORED) != 0)
+
+#define pmap_copy(dp, sp, da, l, sa) /* nothing */
+
+#define pmap_phys_address(ppn) (arm_ptob((ppn)))
+u_int arm32_mmap_flags(paddr_t);
+#define ARM32_MMAP_WRITECOMBINE 0x40000000
+#define ARM32_MMAP_CACHEABLE 0x20000000
+#define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn)
+
+/*
+ * Functions that we need to export
+ */
+void pmap_procwr(struct proc *, vaddr_t, int);
+void pmap_remove_all(pmap_t);
+bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
+
+#define PMAP_NEED_PROCWR
+#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
+#define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
+
+#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
+#define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
+void pmap_prefer(vaddr_t, vaddr_t *, int);
+#endif
+
+void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
+
+/* Functions we use internally. */
+#ifdef PMAP_STEAL_MEMORY
+void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
+void pmap_boot_pageadd(pv_addr_t *);
+vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
+#endif
+void pmap_bootstrap(vaddr_t, vaddr_t);
+
+void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
+int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
+bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
+bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
+void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
+
+void pmap_debug(int);
+void pmap_postinit(void);
+
+void vector_page_setprot(int);
+
+const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
+const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
+
+/* Bootstrapping routines. */
+void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
+void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
+vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
+void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
+void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
+void pmap_devmap_register(const struct pmap_devmap *);
+
+/*
+ * Special page zero routine for use by the idle loop (no cache cleans).
+ */
+bool pmap_pageidlezero(paddr_t);
+#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
+
+/*
+ * used by dumpsys to record the PA of the L1 table
+ */
+uint32_t pmap_kernel_L1_addr(void);
+/*
+ * The current top of kernel VM
+ */
+extern vaddr_t pmap_curmaxkvaddr;
+
+/*
+ * Useful macros and constants
+ */
+
+/* Virtual address to page table entry */
+static inline pt_entry_t *
+vtopte(vaddr_t va)
+{
+ pd_entry_t *pdep;
+ pt_entry_t *ptep;
+
+ if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
+ return (NULL);
+ return (ptep);
+}
+
+/*
+ * Virtual address to physical address
+ */
+static inline paddr_t
+vtophys(vaddr_t va)
+{
+ paddr_t pa;
+
+ if (pmap_extract(pmap_kernel(), va, &pa) == false)
+ return (0); /* XXXSCW: Panic? */
+
+ return (pa);
+}
+
+/*
+ * The new pmap ensures that page-tables are always mapping Write-Thru.
+ * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
+ * on every change.
+ *
+ * Unfortunately, not all CPUs have a write-through cache mode. So we
+ * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
+ * and if there is the chance for PTE syncs to be needed, we define
+ * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
+ * the code.
+ */
+extern int pmap_needs_pte_sync;
+#if defined(_KERNEL_OPT)
+/*
+ * StrongARM SA-1 caches do not have a write-through mode. So, on these,
+ * we need to do PTE syncs. If only SA-1 is configured, then evaluate
+ * this at compile time.
+ */
+#if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
+#define PMAP_INCLUDE_PTE_SYNC
+#if (ARM_MMU_V6 > 0)
+#define PMAP_NEEDS_PTE_SYNC 1
+#elif (ARM_MMU_SA1 == 0)
+#define PMAP_NEEDS_PTE_SYNC 0
+#endif
+#endif
+#endif /* _KERNEL_OPT */
+
+/*
+ * Provide a fallback in case we were not able to determine it at
+ * compile-time.
+ */
+#ifndef PMAP_NEEDS_PTE_SYNC
+#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
+#define PMAP_INCLUDE_PTE_SYNC
+#endif
+
+static inline void
+pmap_ptesync(pt_entry_t *ptep, size_t cnt)
+{
+ if (PMAP_NEEDS_PTE_SYNC)
+ cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
+#if ARM_MMU_V7 > 0
+ __asm("dsb");
+#endif
+}
+
+#define PTE_SYNC(ptep) pmap_ptesync((ptep), 1)
+#define PTE_SYNC_RANGE(ptep, cnt) pmap_ptesync((ptep), (cnt))
+
+#define l1pte_valid(pde) ((pde) != 0)
+#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
+#define l1pte_supersection_p(pde) (l1pte_section_p(pde) \
+ && ((pde) & L1_S_V6_SUPER) != 0)
+#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
+#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
+
+#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
+#define l2pte_valid(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
+#define l2pte_pa(pte) ((pte) & L2_S_FRAME)
+#define l2pte_minidata(pte) (((pte) & \
+ (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
+ == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
+
+/* L1 and L2 page table macros */
+#define pmap_pde_v(pde) l1pte_valid(*(pde))
+#define pmap_pde_section(pde) l1pte_section_p(*(pde))
+#define pmap_pde_supersection(pde) l1pte_supersection_p(*(pde))
+#define pmap_pde_page(pde) l1pte_page_p(*(pde))
+#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
+
+#define pmap_pte_v(pte) l2pte_valid(*(pte))
+#define pmap_pte_pa(pte) l2pte_pa(*(pte))
+
+/* Size of the kernel part of the L1 page table */
+#define KERNEL_PD_SIZE \
+ (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
+
+/************************* ARM MMU configuration *****************************/
+
+#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
+void pmap_copy_page_generic(paddr_t, paddr_t);
+void pmap_zero_page_generic(paddr_t);
+
+void pmap_pte_init_generic(void);
+#if defined(CPU_ARM8)
+void pmap_pte_init_arm8(void);
+#endif
+#if defined(CPU_ARM9)
+void pmap_pte_init_arm9(void);
+#endif /* CPU_ARM9 */
+#if defined(CPU_ARM10)
+void pmap_pte_init_arm10(void);
+#endif /* CPU_ARM10 */
+#if defined(CPU_ARM11) /* ARM_MMU_V6 */
+void pmap_pte_init_arm11(void);
+#endif /* CPU_ARM11 */
+#if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */
+void pmap_pte_init_arm11mpcore(void);
+#endif
+#if ARM_MMU_V7 == 1
+void pmap_pte_init_armv7(void);
+#endif /* ARM_MMU_V7 */
+#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
+
+#if ARM_MMU_SA1 == 1
+void pmap_pte_init_sa1(void);
+#endif /* ARM_MMU_SA1 == 1 */
+
+#if ARM_MMU_XSCALE == 1
+void pmap_copy_page_xscale(paddr_t, paddr_t);
+void pmap_zero_page_xscale(paddr_t);
+
+void pmap_pte_init_xscale(void);
+
+void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
+
+#define PMAP_UAREA(va) pmap_uarea(va)
+void pmap_uarea(vaddr_t);
+#endif /* ARM_MMU_XSCALE == 1 */
+
+extern pt_entry_t pte_l1_s_cache_mode;
+extern pt_entry_t pte_l1_s_cache_mask;
+
+extern pt_entry_t pte_l2_l_cache_mode;
+extern pt_entry_t pte_l2_l_cache_mask;
+
+extern pt_entry_t pte_l2_s_cache_mode;
+extern pt_entry_t pte_l2_s_cache_mask;
+
+extern pt_entry_t pte_l1_s_cache_mode_pt;
+extern pt_entry_t pte_l2_l_cache_mode_pt;
+extern pt_entry_t pte_l2_s_cache_mode_pt;
+
+extern pt_entry_t pte_l1_s_wc_mode;
+extern pt_entry_t pte_l2_l_wc_mode;
+extern pt_entry_t pte_l2_s_wc_mode;
+
+extern pt_entry_t pte_l1_s_prot_u;
+extern pt_entry_t pte_l1_s_prot_w;
+extern pt_entry_t pte_l1_s_prot_ro;
+extern pt_entry_t pte_l1_s_prot_mask;
+
+extern pt_entry_t pte_l2_s_prot_u;
+extern pt_entry_t pte_l2_s_prot_w;
+extern pt_entry_t pte_l2_s_prot_ro;
+extern pt_entry_t pte_l2_s_prot_mask;
+
+extern pt_entry_t pte_l2_l_prot_u;
+extern pt_entry_t pte_l2_l_prot_w;
+extern pt_entry_t pte_l2_l_prot_ro;
+extern pt_entry_t pte_l2_l_prot_mask;
+
+extern pt_entry_t pte_l1_ss_proto;
+extern pt_entry_t pte_l1_s_proto;
+extern pt_entry_t pte_l1_c_proto;
+extern pt_entry_t pte_l2_s_proto;
+
+extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
+extern void (*pmap_zero_page_func)(paddr_t);
+
+#endif /* !_LOCORE */
+
+/*****************************************************************************/
+
+/*
+ * Definitions for MMU domains
+ */
+#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
+#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
+
+/*
+ * These macros define the various bit masks in the PTE.
+ *
+ * We use these macros since we use different bits on different processor
+ * models.
+ */
+#define L1_S_PROT_U_generic (L1_S_AP(AP_U))
+#define L1_S_PROT_W_generic (L1_S_AP(AP_W))
+#define L1_S_PROT_RO_generic (0)
+#define L1_S_PROT_MASK_generic (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
+
+#define L1_S_PROT_U_xscale (L1_S_AP(AP_U))
+#define L1_S_PROT_W_xscale (L1_S_AP(AP_W))
+#define L1_S_PROT_RO_xscale (0)
+#define L1_S_PROT_MASK_xscale (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
+
+#define L1_S_PROT_U_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
+#define L1_S_PROT_W_armv6 (L1_S_AP(AP_W))
+#define L1_S_PROT_RO_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
+#define L1_S_PROT_MASK_armv6 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
+
+#define L1_S_PROT_U_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
+#define L1_S_PROT_W_armv7 (L1_S_AP(AP_W))
+#define L1_S_PROT_RO_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
+#define L1_S_PROT_MASK_armv7 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
+
+#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
+#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
+#define L1_S_CACHE_MASK_armv6 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
+#define L1_S_CACHE_MASK_armv7 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
+
+#define L2_L_PROT_U_generic (L2_AP(AP_U))
+#define L2_L_PROT_W_generic (L2_AP(AP_W))
+#define L2_L_PROT_RO_generic (0)
+#define L2_L_PROT_MASK_generic (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
+
+#define L2_L_PROT_U_xscale (L2_AP(AP_U))
+#define L2_L_PROT_W_xscale (L2_AP(AP_W))
+#define L2_L_PROT_RO_xscale (0)
+#define L2_L_PROT_MASK_xscale (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
+
+#define L2_L_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
+#define L2_L_PROT_W_armv6n (L2_AP0(AP_W))
+#define L2_L_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
+#define L2_L_PROT_MASK_armv6n (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
+
+#define L2_L_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
+#define L2_L_PROT_W_armv7 (L2_AP0(AP_W))
+#define L2_L_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
+#define L2_L_PROT_MASK_armv7 (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
+
+#define L2_L_CACHE_MASK_generic (L2_B|L2_C)
+#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
+#define L2_L_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
+#define L2_L_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
+
+#define L2_S_PROT_U_generic (L2_AP(AP_U))
+#define L2_S_PROT_W_generic (L2_AP(AP_W))
+#define L2_S_PROT_RO_generic (0)
+#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
+
+#define L2_S_PROT_U_xscale (L2_AP0(AP_U))
+#define L2_S_PROT_W_xscale (L2_AP0(AP_W))
+#define L2_S_PROT_RO_xscale (0)
+#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
+
+#define L2_S_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
+#define L2_S_PROT_W_armv6n (L2_AP0(AP_W))
+#define L2_S_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
+#define L2_S_PROT_MASK_armv6n (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
+
+#define L2_S_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
+#define L2_S_PROT_W_armv7 (L2_AP0(AP_W))
+#define L2_S_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
+#define L2_S_PROT_MASK_armv7 (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
+
+#define L2_S_CACHE_MASK_generic (L2_B|L2_C)
+#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
+#define L2_XS_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
+#define L2_S_CACHE_MASK_armv6n L2_XS_CACHE_MASK_armv6
+#ifdef ARMV6_EXTENDED_SMALL_PAGE
+#define L2_S_CACHE_MASK_armv6c L2_XS_CACHE_MASK_armv6
+#else
+#define L2_S_CACHE_MASK_armv6c L2_S_CACHE_MASK_generic
+#endif
+#define L2_S_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
+
+
+#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
+#define L1_S_PROTO_xscale (L1_TYPE_S)
+#define L1_S_PROTO_armv6 (L1_TYPE_S)
+#define L1_S_PROTO_armv7 (L1_TYPE_S)
+
+#define L1_SS_PROTO_generic 0
+#define L1_SS_PROTO_xscale 0
+#define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS)
+#define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS)
+
+#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
+#define L1_C_PROTO_xscale (L1_TYPE_C)
+#define L1_C_PROTO_armv6 (L1_TYPE_C)
+#define L1_C_PROTO_armv7 (L1_TYPE_C)
+
+#define L2_L_PROTO (L2_TYPE_L)
+
+#define L2_S_PROTO_generic (L2_TYPE_S)
+#define L2_S_PROTO_xscale (L2_TYPE_XS)
+#ifdef ARMV6_EXTENDED_SMALL_PAGE
+#define L2_S_PROTO_armv6c (L2_TYPE_XS) /* XP=0, extended small page */
+#else
+#define L2_S_PROTO_armv6c (L2_TYPE_S) /* XP=0, subpage APs */
+#endif
+#define L2_S_PROTO_armv6n (L2_TYPE_S) /* with XP=1 */
+#define L2_S_PROTO_armv7 (L2_TYPE_S)
+
+/*
+ * User-visible names for the ones that vary with MMU class.
+ */
+
+#if ARM_NMMUS > 1
+/* More than one MMU class configured; use variables. */
+#define L1_S_PROT_U pte_l1_s_prot_u
+#define L1_S_PROT_W pte_l1_s_prot_w
+#define L1_S_PROT_RO pte_l1_s_prot_ro
+#define L1_S_PROT_MASK pte_l1_s_prot_mask
+
+#define L2_S_PROT_U pte_l2_s_prot_u
+#define L2_S_PROT_W pte_l2_s_prot_w
+#define L2_S_PROT_RO pte_l2_s_prot_ro
+#define L2_S_PROT_MASK pte_l2_s_prot_mask
+
+#define L2_L_PROT_U pte_l2_l_prot_u
+#define L2_L_PROT_W pte_l2_l_prot_w
+#define L2_L_PROT_RO pte_l2_l_prot_ro
+#define L2_L_PROT_MASK pte_l2_l_prot_mask
+
+#define L1_S_CACHE_MASK pte_l1_s_cache_mask
+#define L2_L_CACHE_MASK pte_l2_l_cache_mask
+#define L2_S_CACHE_MASK pte_l2_s_cache_mask
+
+#define L1_SS_PROTO pte_l1_ss_proto
+#define L1_S_PROTO pte_l1_s_proto
+#define L1_C_PROTO pte_l1_c_proto
+#define L2_S_PROTO pte_l2_s_proto
+
+#define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
+#define pmap_zero_page(d) (*pmap_zero_page_func)((d))
+#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
+#define L1_S_PROT_U L1_S_PROT_U_generic
+#define L1_S_PROT_W L1_S_PROT_W_generic
+#define L1_S_PROT_RO L1_S_PROT_RO_generic
+#define L1_S_PROT_MASK L1_S_PROT_MASK_generic
+
+#define L2_S_PROT_U L2_S_PROT_U_generic
+#define L2_S_PROT_W L2_S_PROT_W_generic
+#define L2_S_PROT_RO L2_S_PROT_RO_generic
+#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
+
+#define L2_L_PROT_U L2_L_PROT_U_generic
+#define L2_L_PROT_W L2_L_PROT_W_generic
+#define L2_L_PROT_RO L2_L_PROT_RO_generic
+#define L2_L_PROT_MASK L2_L_PROT_MASK_generic
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
+
+#define L1_SS_PROTO L1_SS_PROTO_generic
+#define L1_S_PROTO L1_S_PROTO_generic
+#define L1_C_PROTO L1_C_PROTO_generic
+#define L2_S_PROTO L2_S_PROTO_generic
+
+#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
+#define pmap_zero_page(d) pmap_zero_page_generic((d))
+#elif ARM_MMU_V6N != 0
+#define L1_S_PROT_U L1_S_PROT_U_armv6
+#define L1_S_PROT_W L1_S_PROT_W_armv6
+#define L1_S_PROT_RO L1_S_PROT_RO_armv6
+#define L1_S_PROT_MASK L1_S_PROT_MASK_armv6
+
+#define L2_S_PROT_U L2_S_PROT_U_armv6n
+#define L2_S_PROT_W L2_S_PROT_W_armv6n
+#define L2_S_PROT_RO L2_S_PROT_RO_armv6n
+#define L2_S_PROT_MASK L2_S_PROT_MASK_armv6n
+
+#define L2_L_PROT_U L2_L_PROT_U_armv6n
+#define L2_L_PROT_W L2_L_PROT_W_armv6n
+#define L2_L_PROT_RO L2_L_PROT_RO_armv6n
+#define L2_L_PROT_MASK L2_L_PROT_MASK_armv6n
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv6
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv6
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv6n
+
+/* These prototypes make writeable mappings, while the other MMU types
+ * make read-only mappings. */
+#define L1_SS_PROTO L1_SS_PROTO_armv6
+#define L1_S_PROTO L1_S_PROTO_armv6
+#define L1_C_PROTO L1_C_PROTO_armv6
+#define L2_S_PROTO L2_S_PROTO_armv6n
+
+#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
+#define pmap_zero_page(d) pmap_zero_page_generic((d))
+#elif ARM_MMU_V6C != 0
+#define L1_S_PROT_U L1_S_PROT_U_generic
+#define L1_S_PROT_W L1_S_PROT_W_generic
+#define L1_S_PROT_RO L1_S_PROT_RO_generic
+#define L1_S_PROT_MASK L1_S_PROT_MASK_generic
+
+#define L2_S_PROT_U L2_S_PROT_U_generic
+#define L2_S_PROT_W L2_S_PROT_W_generic
+#define L2_S_PROT_RO L2_S_PROT_RO_generic
+#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
+
+#define L2_L_PROT_U L2_L_PROT_U_generic
+#define L2_L_PROT_W L2_L_PROT_W_generic
+#define L2_L_PROT_RO L2_L_PROT_RO_generic
+#define L2_L_PROT_MASK L2_L_PROT_MASK_generic
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
+
+#define L1_SS_PROTO L1_SS_PROTO_generic
+#define L1_S_PROTO L1_S_PROTO_generic
+#define L1_C_PROTO L1_C_PROTO_generic
+#define L2_S_PROTO L2_S_PROTO_generic
+
+#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
+#define pmap_zero_page(d) pmap_zero_page_generic((d))
+#elif ARM_MMU_XSCALE == 1
+#define L1_S_PROT_U L1_S_PROT_U_generic
+#define L1_S_PROT_W L1_S_PROT_W_generic
+#define L1_S_PROT_RO L1_S_PROT_RO_generic
+#define L1_S_PROT_MASK L1_S_PROT_MASK_generic
+
+#define L2_S_PROT_U L2_S_PROT_U_xscale
+#define L2_S_PROT_W L2_S_PROT_W_xscale
+#define L2_S_PROT_RO L2_S_PROT_RO_xscale
+#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
+
+#define L2_L_PROT_U L2_L_PROT_U_generic
+#define L2_L_PROT_W L2_L_PROT_W_generic
+#define L2_L_PROT_RO L2_L_PROT_RO_generic
+#define L2_L_PROT_MASK L2_L_PROT_MASK_generic
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
+
+#define L1_SS_PROTO L1_SS_PROTO_xscale
+#define L1_S_PROTO L1_S_PROTO_xscale
+#define L1_C_PROTO L1_C_PROTO_xscale
+#define L2_S_PROTO L2_S_PROTO_xscale
+
+#define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
+#define pmap_zero_page(d) pmap_zero_page_xscale((d))
+#elif ARM_MMU_V7 == 1
+#define L1_S_PROT_U L1_S_PROT_U_armv7
+#define L1_S_PROT_W L1_S_PROT_W_armv7
+#define L1_S_PROT_RO L1_S_PROT_RO_armv7
+#define L1_S_PROT_MASK L1_S_PROT_MASK_armv7
+
+#define L2_S_PROT_U L2_S_PROT_U_armv7
+#define L2_S_PROT_W L2_S_PROT_W_armv7
+#define L2_S_PROT_RO L2_S_PROT_RO_armv7
+#define L2_S_PROT_MASK L2_S_PROT_MASK_armv7
+
+#define L2_L_PROT_U L2_L_PROT_U_armv7
+#define L2_L_PROT_W L2_L_PROT_W_armv7
+#define L2_L_PROT_RO L2_L_PROT_RO_armv7
+#define L2_L_PROT_MASK L2_L_PROT_MASK_armv7
+
+#define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv7
+#define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv7
+#define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv7
+
+/* These prototypes make writeable mappings, while the other MMU types
+ * make read-only mappings. */
+#define L1_SS_PROTO L1_SS_PROTO_armv7
+#define L1_S_PROTO L1_S_PROTO_armv7
+#define L1_C_PROTO L1_C_PROTO_armv7
+#define L2_S_PROTO L2_S_PROTO_armv7
+
+#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
+#define pmap_zero_page(d) pmap_zero_page_generic((d))
+#endif /* ARM_NMMUS > 1 */
+
+/*
+ * Macros to set and query the write permission on page descriptors.
+ */
+#define l1pte_set_writable(pte) (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
+#define l1pte_set_readonly(pte) (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
+#define l2pte_set_writable(pte) (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
+#define l2pte_set_readonly(pte) (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
+
+#define l2pte_writable_p(pte) (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
+ (L2_S_PROT_RO == 0 || \
+ ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
+
+/*
+ * These macros return various bits based on kernel/user and protection.
+ * Note that the compiler will usually fold these at compile time.
+ */
+#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
+
+#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
+
+#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
+ (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
+
+/*
+ * Macros to test if a mapping is mappable with an L1 SuperSection,
+ * L1 Section, or an L2 Large Page mapping.
+ */
+#define L1_SS_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
+
+#define L1_S_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
+
+#define L2_L_MAPPABLE_P(va, pa, size) \
+ ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
+
+/*
+ * Hooks for the pool allocator.
+ */
+#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
+
+#ifndef _LOCORE
+
+/*
+ * pmap-specific data store in the vm_page structure.
+ */
+#define __HAVE_VM_PAGE_MD
+struct vm_page_md {
+ SLIST_HEAD(,pv_entry) pvh_list; /* pv_entry list */
+ int pvh_attrs; /* page attributes */
+ u_int uro_mappings;
+ u_int urw_mappings;
+ union {
+ u_short s_mappings[2]; /* Assume kernel count <= 65535 */
+ u_int i_mappings;
+ } k_u;
+#define kro_mappings k_u.s_mappings[0]
+#define krw_mappings k_u.s_mappings[1]
+#define k_mappings k_u.i_mappings
+};
+
+/*
+ * Set the default color of each page.
+ */
+#if ARM_MMU_V6 > 0
+#define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
+ (pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
+#else
+#define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
+ (pg)->mdpage.pvh_attrs = 0
+#endif
+
+#define VM_MDPAGE_INIT(pg) \
+do { \
+ SLIST_INIT(&(pg)->mdpage.pvh_list); \
+ VM_MDPAGE_PVH_ATTRS_INIT(pg); \
+ (pg)->mdpage.uro_mappings = 0; \
+ (pg)->mdpage.urw_mappings = 0; \
+ (pg)->mdpage.k_mappings = 0; \
+} while (/*CONSTCOND*/0)
+
+#endif /* !_LOCORE */
+
+#endif /* _KERNEL */
+
+#endif /* _ARM32_PMAP_H_ */
--- /dev/null
+/* $NetBSD: psl.h,v 1.19 2012/07/27 05:36:09 matt Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * psl.h
+ *
+ * spl prototypes.
+ * Eventually this will become a set of defines.
+ *
+ * Created : 21/07/95
+ */
+
+#ifndef _ARM_PSL_H_
+#define _ARM_PSL_H_
+#include <machine/intr.h>
+
+/*
+ * These are the different SPL states
+ *
+ * Each state has an interrupt mask associated with it which
+ * indicate which interrupts are allowed.
+ */
+
+#define spl0() splx(IPL_NONE)
+#define splsoftclock() raisespl(IPL_SOFTCLOCK)
+#define splsoftbio() raisespl(IPL_SOFTBIO)
+#define splsoftnet() raisespl(IPL_SOFTNET)
+#define splsoftserial() raisespl(IPL_SOFTSERIAL)
+#define splvm() raisespl(IPL_VM)
+#define splsched() raisespl(IPL_SCHED)
+#define splhigh() raisespl(IPL_HIGH)
+
+#define IPL_SAFEPRI IPL_NONE /* for kern_sleepq.c */
+
+#ifdef _KERNEL
+#ifndef _LOCORE
+int raisespl (int);
+int lowerspl (int);
+void splx (int);
+
+#ifdef __HAVE_FAST_SOFTINTS
+void _setsoftintr (int si);
+#endif
+
+typedef uint8_t ipl_t;
+typedef struct {
+ uint8_t _ipl;
+} ipl_cookie_t;
+
+static inline ipl_cookie_t
+makeiplcookie(ipl_t ipl)
+{
+
+ return (ipl_cookie_t){._ipl = (uint8_t)ipl};
+}
+
+static inline int
+splraiseipl(ipl_cookie_t icookie)
+{
+
+ return raisespl(icookie._ipl);
+}
+#endif /* _LOCORE */
+#endif /* _KERNEL */
+
+#endif /* _ARM_PSL_H_ */
+/* End of psl.h */
--- /dev/null
+/* $NetBSD: rtc.h,v 1.2 2009/03/14 14:45:55 dsl Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * rtc.h
+ *
+ * Header file for RTC / CMOS stuff
+ *
+ * Created : 13/10/94
+ * Updated : 15/07/2000
+ *
+ * Based of kate/display/iiccontrol.c
+ */
+
+/*
+ * IIC addresses for RTC chip
+ * Two PCF8583 chips are supported on the IIC bus
+ */
+
+#define IIC_PCF8583_MASK 0xfc
+#define IIC_PCF8583_ADDR 0xa0
+
+#define RTC_Write (IIC_PCF8583_ADDR | IIC_WRITE)
+#define RTC_Read (IIC_PCF8583_ADDR | IIC_READ)
+
+typedef struct {
+ u_char rtc_micro;
+ u_char rtc_centi;
+ u_char rtc_sec;
+ u_char rtc_min;
+ u_char rtc_hour;
+ u_char rtc_day;
+ u_char rtc_mon;
+ u_char rtc_year;
+ u_char rtc_cen;
+} rtc_t;
+
+#define RTC_ADDR_CHECKSUM 0x3f
+#define RTC_ADDR_BOOTOPTS 0x90
+#define RTC_ADDR_REBOOTCNT 0x91
+#define RTC_ADDR_YEAR 0xc0
+#define RTC_ADDR_CENT 0xc1
+
+#ifdef _KERNEL
+int cmos_read(int);
+int cmos_write(int, int);
+#endif /* _KERNEL */
+
+/* End of rtc.h */
--- /dev/null
+/* $NetBSD: atomic.h,v 1.11 2008/11/19 06:39:17 matt Exp $ */
+
+/*-
+ * Copyright (c) 2008 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_ATOMIC_H_
+#define _ARM_ATOMIC_H_
+
+#endif /* _ARM_ATOMIC_H_ */
--- /dev/null
+/* $NetBSD: blockio.h,v 1.2 2001/06/02 10:44:56 bjh21 Exp $ */
+
+/*-
+ * Copyright (c) 2001 Ben Harris
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+/*
+ * blockio.h - low level functions for bulk PIO data transfer
+ */
+
+#ifndef _ARM_BLOCKIO_H_
+#define _ARM_BLOCKIO_H_
+
+/*
+ * All these take three arguments:
+ * I/O address
+ * Memory address
+ * Number of bytes to copy
+ */
+
+void read_multi_1(u_int, void *, u_int);
+void write_multi_1(u_int, const void *, u_int);
+#define read_multi_2 insw16
+#define write_multi_2 outsw16
+
+void insw(u_int, void *, u_int);
+void outsw(u_int, void *, u_int);
+void insw16(u_int, void *, u_int);
+void outsw16(u_int, void *, u_int);
+
+#endif
--- /dev/null
+/* $NetBSD: bootconfig.h,v 1.6 2012/08/31 23:59:52 matt Exp $ */
+
+/*
+ * Copyright (c) 1994 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe
+ * for the NetBSD Project.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef _KERNEL
+#define BOOTOPT_TYPE_BOOLEAN 0
+#define BOOTOPT_TYPE_STRING 1
+#define BOOTOPT_TYPE_INT 2
+#define BOOTOPT_TYPE_BININT 3
+#define BOOTOPT_TYPE_HEXINT 4
+#define BOOTOPT_TYPE_MASK 7
+
+struct boot_physmem {
+ paddr_t bp_start; /* starting PFN (not address) */
+ psize_t bp_pages; /* # of pages */
+ u_int bp_freelist; /* VM_FREELIST_ * */
+ u_int bp_flags;
+#define BOOT_PHYSMEM_CAN_DMA 1 /* Can DMA direct to this memory. */
+};
+
+int get_bootconf_option(char *, const char *, int, void *);
+
+extern char *boot_args;
+#endif /* _KERNEL */
--- /dev/null
+/* $NetBSD: bus_defs.h,v 1.2 2012/09/18 05:47:27 matt Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_BUS_DEFS_H_
+#define _ARM32_BUS_DEFS_H_
+
+#if defined(_KERNEL_OPT)
+#include "opt_arm_bus_space.h"
+#endif
+
+/*
+ * Addresses (in bus space).
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+/*
+ * Access methods for bus space.
+ */
+typedef struct bus_space *bus_space_tag_t;
+typedef u_long bus_space_handle_t;
+
+/*
+ * int bus_space_map(bus_space_tag_t t, bus_addr_t addr,
+ * bus_size_t size, int flags, bus_space_handle_t *bshp);
+ *
+ * Map a region of bus space.
+ */
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+#define BUS_SPACE_MAP_PREFETCHABLE 0x04
+
+struct bus_space {
+ /* cookie */
+ void *bs_cookie;
+
+ /* mapping/unmapping */
+ int (*bs_map)(void *, bus_addr_t, bus_size_t,
+ int, bus_space_handle_t *);
+ void (*bs_unmap)(void *, bus_space_handle_t,
+ bus_size_t);
+ int (*bs_subregion)(void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *);
+
+ /* allocation/deallocation */
+ int (*bs_alloc)(void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_size_t, int,
+ bus_addr_t *, bus_space_handle_t *);
+ void (*bs_free)(void *, bus_space_handle_t,
+ bus_size_t);
+
+ /* get kernel virtual address */
+ void * (*bs_vaddr)(void *, bus_space_handle_t);
+
+ /* mmap bus space for user */
+ paddr_t (*bs_mmap)(void *, bus_addr_t, off_t, int, int);
+
+ /* barrier */
+ void (*bs_barrier)(void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int);
+
+ /* read (single) */
+ u_int8_t (*bs_r_1)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int16_t (*bs_r_2)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int32_t (*bs_r_4)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int64_t (*bs_r_8)(void *, bus_space_handle_t,
+ bus_size_t);
+
+ /* read multiple */
+ void (*bs_rm_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rm_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rm_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region */
+ void (*bs_rr_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write (single) */
+ void (*bs_w_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple */
+ void (*bs_wm_1)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2)(void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4)(void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8)(void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region */
+ void (*bs_wr_1)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2)(void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4)(void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8)(void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* set multiple */
+ void (*bs_sm_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sm_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sm_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sm_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* set region */
+ void (*bs_sr_1)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sr_2)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sr_4)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sr_8)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* copy */
+ void (*bs_c_1)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_2)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_4)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_8)(void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+ /* read stream (single) */
+ u_int8_t (*bs_r_1_s)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int16_t (*bs_r_2_s)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int32_t (*bs_r_4_s)(void *, bus_space_handle_t,
+ bus_size_t);
+ u_int64_t (*bs_r_8_s)(void *, bus_space_handle_t,
+ bus_size_t);
+
+ /* read multiple stream */
+ void (*bs_rm_1_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rm_2_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rm_4_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region stream */
+ void (*bs_rr_1_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write stream (single) */
+ void (*bs_w_1_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8_s)(void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple stream */
+ void (*bs_wm_1_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region stream */
+ void (*bs_wr_1_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8_s)(void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+#endif /* __BUS_SPACE_HAS_STREAM_METHODS */
+};
+
+#define BUS_SPACE_BARRIER_READ 0x01
+#define BUS_SPACE_BARRIER_WRITE 0x02
+
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+/* Bus Space DMA macros */
+
+/*
+ * Flags used in various bus DMA methods.
+ */
+#define BUS_DMA_WAITOK 0x000 /* safe to sleep (pseudo-flag) */
+#define BUS_DMA_NOWAIT 0x001 /* not safe to sleep */
+#define BUS_DMA_ALLOCNOW 0x002 /* perform resource allocation now */
+#define BUS_DMA_COHERENT 0x004 /* hint: map memory DMA coherent */
+#define BUS_DMA_STREAMING 0x008 /* hint: sequential, unidirectional */
+#define BUS_DMA_BUS1 0x010 /* placeholders for bus functions... */
+#define BUS_DMA_BUS2 0x020
+#define BUS_DMA_BUS3 0x040
+#define BUS_DMA_BUS4 0x080
+#define BUS_DMA_READ 0x100 /* mapping is device -> memory only */
+#define BUS_DMA_WRITE 0x200 /* mapping is memory -> device only */
+#define BUS_DMA_NOCACHE 0x400 /* hint: map non-cached memory */
+
+/*
+ * Private flags stored in the DMA map.
+ */
+#define _BUS_DMAMAP_COHERENT 0x10000 /* no cache flush necessary on sync */
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+/*
+ * Operations performed by bus_dmamap_sync().
+ */
+#define BUS_DMASYNC_PREREAD 0x01 /* pre-read synchronization */
+#define BUS_DMASYNC_POSTREAD 0x02 /* post-read synchronization */
+#define BUS_DMASYNC_PREWRITE 0x04 /* pre-write synchronization */
+#define BUS_DMASYNC_POSTWRITE 0x08 /* post-write synchronization */
+
+typedef struct arm32_bus_dma_tag *bus_dma_tag_t;
+typedef struct arm32_bus_dmamap *bus_dmamap_t;
+
+#define BUS_DMA_TAG_VALID(t) ((t) != (bus_dma_tag_t)0)
+
+/*
+ * bus_dma_segment_t
+ *
+ * Describes a single contiguous DMA transaction. Values
+ * are suitable for programming into DMA registers.
+ */
+struct arm32_bus_dma_segment {
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_addr_t ds_addr; /* DMA address */
+ bus_size_t ds_len; /* length of transfer */
+};
+typedef struct arm32_bus_dma_segment bus_dma_segment_t;
+
+/*
+ * arm32_dma_range
+ *
+ * This structure describes a valid DMA range.
+ */
+struct arm32_dma_range {
+ bus_addr_t dr_sysbase; /* system base address */
+ bus_addr_t dr_busbase; /* appears here on bus */
+ bus_size_t dr_len; /* length of range */
+};
+
+/*
+ * bus_dma_tag_t
+ *
+ * A machine-dependent opaque type describing the implementation of
+ * DMA for a given bus.
+ */
+
+struct arm32_bus_dma_tag {
+ /*
+ * DMA range for this tag. If the page doesn't fall within
+ * one of these ranges, an error is returned. The caller
+ * may then decide what to do with the transfer. If the
+ * range pointer is NULL, it is ignored.
+ */
+ struct arm32_dma_range *_ranges;
+ int _nranges;
+
+ /*
+ * Opaque cookie for use by back-end.
+ */
+ void *_cookie;
+
+ /*
+ * DMA mapping methods.
+ */
+ int (*_dmamap_create)(bus_dma_tag_t, bus_size_t, int,
+ bus_size_t, bus_size_t, int, bus_dmamap_t *);
+ void (*_dmamap_destroy)(bus_dma_tag_t, bus_dmamap_t);
+ int (*_dmamap_load)(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+ int (*_dmamap_load_mbuf)(bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+ int (*_dmamap_load_uio)(bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int);
+ int (*_dmamap_load_raw)(bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+ void (*_dmamap_unload)(bus_dma_tag_t, bus_dmamap_t);
+ void (*_dmamap_sync_pre)(bus_dma_tag_t, bus_dmamap_t,
+ bus_addr_t, bus_size_t, int);
+ void (*_dmamap_sync_post)(bus_dma_tag_t, bus_dmamap_t,
+ bus_addr_t, bus_size_t, int);
+
+ /*
+ * DMA memory utility functions.
+ */
+ int (*_dmamem_alloc)(bus_dma_tag_t, bus_size_t, bus_size_t,
+ bus_size_t, bus_dma_segment_t *, int, int *, int);
+ void (*_dmamem_free)(bus_dma_tag_t,
+ bus_dma_segment_t *, int);
+ int (*_dmamem_map)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, size_t, void **, int);
+ void (*_dmamem_unmap)(bus_dma_tag_t, void *, size_t);
+ paddr_t (*_dmamem_mmap)(bus_dma_tag_t, bus_dma_segment_t *,
+ int, off_t, int, int);
+
+ /*
+ * DMA tag utility functions
+ */
+ int (*_dmatag_subregion)(bus_dma_tag_t, bus_addr_t, bus_addr_t,
+ bus_dma_tag_t *, int);
+ void (*_dmatag_destroy)(bus_dma_tag_t);
+
+ /*
+ * State for bounce buffers
+ */
+ int _tag_needs_free;
+ int (*_may_bounce)(bus_dma_tag_t, bus_dmamap_t, int, int *);
+};
+
+/*
+ * bus_dmamap_t
+ *
+ * Describes a DMA mapping.
+ */
+struct arm32_bus_dmamap {
+ /*
+ * PRIVATE MEMBERS: not for use by machine-independent code.
+ */
+ bus_size_t _dm_size; /* largest DMA transfer mappable */
+ int _dm_segcnt; /* number of segs this map can map */
+ bus_size_t _dm_maxmaxsegsz; /* fixed largest possible segment */
+ bus_size_t _dm_boundary; /* don't cross this */
+ int _dm_flags; /* misc. flags */
+
+ void *_dm_origbuf; /* pointer to original buffer */
+ int _dm_buftype; /* type of buffer */
+ struct vmspace *_dm_vmspace; /* vmspace that owns the mapping */
+
+ void *_dm_cookie; /* cookie for bus-specific functions */
+
+ /*
+ * PUBLIC MEMBERS: these are used by machine-independent code.
+ */
+ bus_size_t dm_maxsegsz; /* largest possible segment */
+ bus_size_t dm_mapsize; /* size of the mapping */
+ int dm_nsegs; /* # valid segments in mapping */
+ bus_dma_segment_t dm_segs[1]; /* segments; variable length */
+};
+
+/* _dm_buftype */
+#define _BUS_DMA_BUFTYPE_INVALID 0
+#define _BUS_DMA_BUFTYPE_LINEAR 1
+#define _BUS_DMA_BUFTYPE_MBUF 2
+#define _BUS_DMA_BUFTYPE_UIO 3
+#define _BUS_DMA_BUFTYPE_RAW 4
+
+#ifdef _ARM32_BUS_DMA_PRIVATE
+#define _BUS_AVAIL_END physical_end
+/*
+ * Cookie used for bounce buffers. A pointer to one of these it stashed in
+ * the DMA map.
+ */
+struct arm32_bus_dma_cookie {
+ int id_flags; /* flags; see below */
+
+ /*
+ * Information about the original buffer used during
+ * DMA map syncs. Note that origibuflen is only used
+ * for ID_BUFTYPE_LINEAR.
+ */
+ union {
+ void *un_origbuf; /* pointer to orig buffer if
+ bouncing */
+ char *un_linearbuf;
+ struct mbuf *un_mbuf;
+ struct uio *un_uio;
+ } id_origbuf_un;
+#define id_origbuf id_origbuf_un.un_origbuf
+#define id_origlinearbuf id_origbuf_un.un_linearbuf
+#define id_origmbuf id_origbuf_un.un_mbuf
+#define id_origuio id_origbuf_un.un_uio
+ bus_size_t id_origbuflen; /* ...and size */
+
+ void *id_bouncebuf; /* pointer to the bounce buffer */
+ bus_size_t id_bouncebuflen; /* ...and size */
+ int id_nbouncesegs; /* number of valid bounce segs */
+ bus_dma_segment_t id_bouncesegs[0]; /* array of bounce buffer
+ physical memory segments */
+};
+
+/* id_flags */
+#define _BUS_DMA_IS_BOUNCING 0x04 /* is bouncing current xfer */
+#define _BUS_DMA_HAS_BOUNCE 0x02 /* has bounce buffers */
+#endif /* _ARM32_BUS_DMA_PRIVATE */
+#define _BUS_DMA_MIGHT_NEED_BOUNCE 0x01 /* may need bounce buffers */
+
+#endif /* _ARM32_BUS_DEFS_H_ */
--- /dev/null
+/* $NetBSD: bus_funcs.h,v 1.3 2012/09/18 05:47:27 matt Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_BUS_FUNCS_H_
+#define _ARM32_BUS_FUNCS_H_
+
+/*
+ * Utility macros; INTERNAL USE ONLY.
+ */
+#define __bs_c(a,b) __CONCAT(a,b)
+#define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size)
+
+#define __bs_rs(sz, t, h, o) \
+ (*(t)->__bs_opname(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws(sz, t, h, o, v) \
+ (*(t)->__bs_opname(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define __bs_opname_s(op,size) __bs_c(__bs_c(__bs_c(__bs_c(bs_,op),_),size),_s)
+#define __bs_rs_s(sz, t, h, o) \
+ (*(t)->__bs_opname_s(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws_s(sz, t, h, o, v) \
+ (*(t)->__bs_opname_s(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle_s(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname_s(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set_s(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname_s(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy_s(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname_s(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+#endif
+
+/*
+ * Mapping and unmapping operations.
+ */
+#define bus_space_map(t, a, s, c, hp) \
+ (*(t)->bs_map)((t)->bs_cookie, (a), (s), (c), (hp))
+#define bus_space_unmap(t, h, s) \
+ (*(t)->bs_unmap)((t)->bs_cookie, (h), (s))
+#define bus_space_subregion(t, h, o, s, hp) \
+ (*(t)->bs_subregion)((t)->bs_cookie, (h), (o), (s), (hp))
+
+
+/*
+ * Allocation and deallocation operations.
+ */
+#define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \
+ (*(t)->bs_alloc)((t)->bs_cookie, (rs), (re), (s), (a), (b), \
+ (c), (ap), (hp))
+#define bus_space_free(t, h, s) \
+ (*(t)->bs_free)((t)->bs_cookie, (h), (s))
+
+/*
+ * Get kernel virtual address for ranges mapped BUS_SPACE_MAP_LINEAR.
+ */
+#define bus_space_vaddr(t, h) \
+ (*(t)->bs_vaddr)((t)->bs_cookie, (h))
+
+/*
+ * MMap bus space for a user application.
+ */
+#define bus_space_mmap(t, a, o, p, f) \
+ (*(t)->bs_mmap)((t)->bs_cookie, (a), (o), (p), (f))
+
+/*
+ * Bus barrier operations.
+ */
+#define bus_space_barrier(t, h, o, l, f) \
+ (*(t)->bs_barrier)((t)->bs_cookie, (h), (o), (l), (f))
+
+/*
+ * Bus read (single) operations.
+ */
+#define bus_space_read_1(t, h, o) __bs_rs(1,(t),(h),(o))
+#define bus_space_read_2(t, h, o) __bs_rs(2,(t),(h),(o))
+#define bus_space_read_4(t, h, o) __bs_rs(4,(t),(h),(o))
+#define bus_space_read_8(t, h, o) __bs_rs(8,(t),(h),(o))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_read_stream_1(t, h, o) __bs_rs_s(1,(t),(h),(o))
+#define bus_space_read_stream_2(t, h, o) __bs_rs_s(2,(t),(h),(o))
+#define bus_space_read_stream_4(t, h, o) __bs_rs_s(4,(t),(h),(o))
+#define bus_space_read_stream_8(t, h, o) __bs_rs_s(8,(t),(h),(o))
+#endif
+
+
+/*
+ * Bus read multiple operations.
+ */
+#define bus_space_read_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Bus read region operations.
+ */
+#define bus_space_read_region_1(t, h, o, a, c) \
+ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_2(t, h, o, a, c) \
+ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_4(t, h, o, a, c) \
+ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_8(t, h, o, a, c) \
+ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Bus write (single) operations.
+ */
+#define bus_space_write_1(t, h, o, v) __bs_ws(1,(t),(h),(o),(v))
+#define bus_space_write_2(t, h, o, v) __bs_ws(2,(t),(h),(o),(v))
+#define bus_space_write_4(t, h, o, v) __bs_ws(4,(t),(h),(o),(v))
+#define bus_space_write_8(t, h, o, v) __bs_ws(8,(t),(h),(o),(v))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_write_stream_1(t, h, o, v) __bs_ws_s(1,(t),(h),(o),(v))
+#define bus_space_write_stream_2(t, h, o, v) __bs_ws_s(2,(t),(h),(o),(v))
+#define bus_space_write_stream_4(t, h, o, v) __bs_ws_s(4,(t),(h),(o),(v))
+#define bus_space_write_stream_8(t, h, o, v) __bs_ws_s(8,(t),(h),(o),(v))
+#endif
+
+
+/*
+ * Bus write multiple operations.
+ */
+#define bus_space_write_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Bus write region operations.
+ */
+#define bus_space_write_region_1(t, h, o, a, c) \
+ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_2(t, h, o, a, c) \
+ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_4(t, h, o, a, c) \
+ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_8(t, h, o, a, c) \
+ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c))
+#ifdef __BUS_SPACE_HAS_STREAM_METHODS
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,8,(t),(h),(o),(a),(c))
+#endif
+
+
+/*
+ * Set multiple operations.
+ */
+#define bus_space_set_multi_1(t, h, o, v, c) \
+ __bs_set(sm,1,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_2(t, h, o, v, c) \
+ __bs_set(sm,2,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_4(t, h, o, v, c) \
+ __bs_set(sm,4,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_8(t, h, o, v, c) \
+ __bs_set(sm,8,(t),(h),(o),(v),(c))
+
+/*
+ * Set region operations.
+ */
+#define bus_space_set_region_1(t, h, o, v, c) \
+ __bs_set(sr,1,(t),(h),(o),(v),(c))
+#define bus_space_set_region_2(t, h, o, v, c) \
+ __bs_set(sr,2,(t),(h),(o),(v),(c))
+#define bus_space_set_region_4(t, h, o, v, c) \
+ __bs_set(sr,4,(t),(h),(o),(v),(c))
+#define bus_space_set_region_8(t, h, o, v, c) \
+ __bs_set(sr,8,(t),(h),(o),(v),(c))
+
+/*
+ * Copy operations.
+ */
+#define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \
+ __bs_copy(1, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \
+ __bs_copy(2, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \
+ __bs_copy(4, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \
+ __bs_copy(8, t, h1, o1, h2, o2, c)
+
+/*
+ * Macros to provide prototypes for all the functions used in the
+ * bus_space structure
+ */
+
+#define bs_map_proto(f) \
+int __bs_c(f,_bs_map)(void *t, bus_addr_t addr, \
+ bus_size_t size, int cacheable, bus_space_handle_t *bshp);
+
+#define bs_unmap_proto(f) \
+void __bs_c(f,_bs_unmap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t size);
+
+#define bs_subregion_proto(f) \
+int __bs_c(f,_bs_subregion)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, bus_size_t size, \
+ bus_space_handle_t *nbshp);
+
+#define bs_alloc_proto(f) \
+int __bs_c(f,_bs_alloc)(void *t, bus_addr_t rstart, \
+ bus_addr_t rend, bus_size_t size, bus_size_t align, \
+ bus_size_t boundary, int cacheable, bus_addr_t *addrp, \
+ bus_space_handle_t *bshp);
+
+#define bs_free_proto(f) \
+void __bs_c(f,_bs_free)(void *t, bus_space_handle_t bsh, \
+ bus_size_t size);
+
+#define bs_vaddr_proto(f) \
+void * __bs_c(f,_bs_vaddr)(void *t, bus_space_handle_t bsh);
+
+#define bs_mmap_proto(f) \
+paddr_t __bs_c(f,_bs_mmap)(void *, bus_addr_t, off_t, int, int);
+
+#define bs_barrier_proto(f) \
+void __bs_c(f,_bs_barrier)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, bus_size_t len, int flags);
+
+#define bs_r_1_proto(f) \
+uint8_t __bs_c(f,_bs_r_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_r_2_proto(f) \
+uint16_t __bs_c(f,_bs_r_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset); \
+uint16_t __bs_c(f,_bs_r_2_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_r_4_proto(f) \
+uint32_t __bs_c(f,_bs_r_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset); \
+uint32_t __bs_c(f,_bs_r_4_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_r_8_proto(f) \
+uint64_t __bs_c(f,_bs_r_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset); \
+uint64_t __bs_c(f,_bs_r_8_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset);
+
+#define bs_w_1_proto(f) \
+void __bs_c(f,_bs_w_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint8_t value);
+
+#define bs_w_2_proto(f) \
+void __bs_c(f,_bs_w_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t value); \
+void __bs_c(f,_bs_w_2_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t value);
+
+#define bs_w_4_proto(f) \
+void __bs_c(f,_bs_w_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t value); \
+void __bs_c(f,_bs_w_4_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t value);
+
+#define bs_w_8_proto(f) \
+void __bs_c(f,_bs_w_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t value); \
+void __bs_c(f,_bs_w_8_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t value);
+
+#define bs_rm_1_proto(f) \
+void __bs_c(f,_bs_rm_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint8_t *addr, bus_size_t count);
+
+#define bs_rm_2_proto(f) \
+void __bs_c(f,_bs_rm_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t *addr, bus_size_t count); \
+void __bs_c(f,_bs_rm_2_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t *addr, bus_size_t count);
+
+#define bs_rm_4_proto(f) \
+void __bs_c(f,_bs_rm_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t *addr, bus_size_t count); \
+void __bs_c(f,_bs_rm_4_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t *addr, bus_size_t count);
+
+#define bs_rm_8_proto(f) \
+void __bs_c(f,_bs_rm_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t *addr, bus_size_t count); \
+void __bs_c(f,_bs_rm_8_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t *addr, bus_size_t count);
+
+#define bs_wm_1_proto(f) \
+void __bs_c(f,_bs_wm_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint8_t *addr, bus_size_t count); \
+
+#define bs_wm_2_proto(f) \
+void __bs_c(f,_bs_wm_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint16_t *addr, bus_size_t count); \
+void __bs_c(f,_bs_wm_2_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint16_t *addr, bus_size_t count);
+
+#define bs_wm_4_proto(f) \
+void __bs_c(f,_bs_wm_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint32_t *addr, bus_size_t count); \
+void __bs_c(f,_bs_wm_4_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint32_t *addr, bus_size_t count);
+
+#define bs_wm_8_proto(f) \
+void __bs_c(f,_bs_wm_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint64_t *addr, bus_size_t count); \
+void __bs_c(f,_bs_wm_8_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint64_t *addr, bus_size_t count);
+
+#define bs_rr_1_proto(f) \
+void __bs_c(f, _bs_rr_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint8_t *addr, bus_size_t count);
+
+#define bs_rr_2_proto(f) \
+void __bs_c(f, _bs_rr_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t *addr, bus_size_t count); \
+void __bs_c(f, _bs_rr_2_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t *addr, bus_size_t count);
+
+#define bs_rr_4_proto(f) \
+void __bs_c(f, _bs_rr_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t *addr, bus_size_t count); \
+void __bs_c(f, _bs_rr_4_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t *addr, bus_size_t count);
+
+#define bs_rr_8_proto(f) \
+void __bs_c(f, _bs_rr_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t *addr, bus_size_t count); \
+void __bs_c(f, _bs_rr_8_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t *addr, bus_size_t count);
+
+#define bs_wr_1_proto(f) \
+void __bs_c(f, _bs_wr_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint8_t *addr, bus_size_t count);
+
+#define bs_wr_2_proto(f) \
+void __bs_c(f, _bs_wr_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint16_t *addr, bus_size_t count); \
+void __bs_c(f, _bs_wr_2_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint16_t *addr, bus_size_t count);
+
+#define bs_wr_4_proto(f) \
+void __bs_c(f, _bs_wr_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint32_t *addr, bus_size_t count); \
+void __bs_c(f, _bs_wr_4_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint32_t *addr, bus_size_t count);
+
+#define bs_wr_8_proto(f) \
+void __bs_c(f, _bs_wr_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint64_t *addr, bus_size_t count); \
+void __bs_c(f, _bs_wr_8_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, const uint64_t *addr, bus_size_t count);
+
+#define bs_sm_1_proto(f) \
+void __bs_c(f,_bs_sm_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint8_t value, bus_size_t count);
+
+#define bs_sm_2_proto(f) \
+void __bs_c(f,_bs_sm_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t value, bus_size_t count);
+
+#define bs_sm_4_proto(f) \
+void __bs_c(f,_bs_sm_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t value, bus_size_t count);
+
+#define bs_sm_8_proto(f) \
+void __bs_c(f,_bs_sm_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t value, bus_size_t count);
+
+#define bs_sr_1_proto(f) \
+void __bs_c(f,_bs_sr_1)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint8_t value, bus_size_t count);
+
+#define bs_sr_2_proto(f) \
+void __bs_c(f,_bs_sr_2)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t value, bus_size_t count); \
+void __bs_c(f,_bs_sr_2_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint16_t value, bus_size_t count);
+
+#define bs_sr_4_proto(f) \
+void __bs_c(f,_bs_sr_4)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t value, bus_size_t count); \
+void __bs_c(f,_bs_sr_4_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint32_t value, bus_size_t count);
+
+#define bs_sr_8_proto(f) \
+void __bs_c(f,_bs_sr_8)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t value, bus_size_t count); \
+void __bs_c(f,_bs_sr_8_swap)(void *t, bus_space_handle_t bsh, \
+ bus_size_t offset, uint64_t value, bus_size_t count);
+
+#define bs_c_1_proto(f) \
+void __bs_c(f,_bs_c_1)(void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_c_2_proto(f) \
+void __bs_c(f,_bs_c_2)(void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_c_4_proto(f) \
+void __bs_c(f,_bs_c_4)(void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_c_8_proto(f) \
+void __bs_c(f,_bs_c_8)(void *t, bus_space_handle_t bsh1, \
+ bus_size_t offset1, bus_space_handle_t bsh2, \
+ bus_size_t offset2, bus_size_t count);
+
+#define bs_protos(f) \
+bs_map_proto(f); \
+bs_unmap_proto(f); \
+bs_subregion_proto(f); \
+bs_alloc_proto(f); \
+bs_free_proto(f); \
+bs_vaddr_proto(f); \
+bs_mmap_proto(f); \
+bs_barrier_proto(f); \
+bs_r_1_proto(f); \
+bs_r_2_proto(f); \
+bs_r_4_proto(f); \
+bs_r_8_proto(f); \
+bs_w_1_proto(f); \
+bs_w_2_proto(f); \
+bs_w_4_proto(f); \
+bs_w_8_proto(f); \
+bs_rm_1_proto(f); \
+bs_rm_2_proto(f); \
+bs_rm_4_proto(f); \
+bs_rm_8_proto(f); \
+bs_wm_1_proto(f); \
+bs_wm_2_proto(f); \
+bs_wm_4_proto(f); \
+bs_wm_8_proto(f); \
+bs_rr_1_proto(f); \
+bs_rr_2_proto(f); \
+bs_rr_4_proto(f); \
+bs_rr_8_proto(f); \
+bs_wr_1_proto(f); \
+bs_wr_2_proto(f); \
+bs_wr_4_proto(f); \
+bs_wr_8_proto(f); \
+bs_sm_1_proto(f); \
+bs_sm_2_proto(f); \
+bs_sm_4_proto(f); \
+bs_sm_8_proto(f); \
+bs_sr_1_proto(f); \
+bs_sr_2_proto(f); \
+bs_sr_4_proto(f); \
+bs_sr_8_proto(f); \
+bs_c_1_proto(f); \
+bs_c_2_proto(f); \
+bs_c_4_proto(f); \
+bs_c_8_proto(f);
+
+/* Bus Space DMA macros */
+
+/* Forwards needed by prototypes below. */
+struct mbuf;
+struct uio;
+
+#define bus_dmamap_create(t, s, n, m, b, f, p) \
+ (*(t)->_dmamap_create)((t), (s), (n), (m), (b), (f), (p))
+#define bus_dmamap_destroy(t, p) \
+ (*(t)->_dmamap_destroy)((t), (p))
+#define bus_dmamap_load(t, m, b, s, p, f) \
+ (*(t)->_dmamap_load)((t), (m), (b), (s), (p), (f))
+#define bus_dmamap_load_mbuf(t, m, b, f) \
+ (*(t)->_dmamap_load_mbuf)((t), (m), (b), (f))
+#define bus_dmamap_load_uio(t, m, u, f) \
+ (*(t)->_dmamap_load_uio)((t), (m), (u), (f))
+#define bus_dmamap_load_raw(t, m, sg, n, s, f) \
+ (*(t)->_dmamap_load_raw)((t), (m), (sg), (n), (s), (f))
+#define bus_dmamap_unload(t, p) \
+ (*(t)->_dmamap_unload)((t), (p))
+#define bus_dmamap_sync(t, p, o, l, ops) \
+do { \
+ if (((ops) & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 \
+ && (t)->_dmamap_sync_pre != NULL) \
+ (*(t)->_dmamap_sync_pre)((t), (p), (o), (l), (ops)); \
+ else if (((ops) & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0 \
+ && (t)->_dmamap_sync_post != NULL) \
+ (*(t)->_dmamap_sync_post)((t), (p), (o), (l), (ops)); \
+} while (/*CONSTCOND*/0)
+
+#define bus_dmamem_alloc(t, s, a, b, sg, n, r, f) \
+ (*(t)->_dmamem_alloc)((t), (s), (a), (b), (sg), (n), (r), (f))
+#define bus_dmamem_free(t, sg, n) \
+ (*(t)->_dmamem_free)((t), (sg), (n))
+#define bus_dmamem_map(t, sg, n, s, k, f) \
+ (*(t)->_dmamem_map)((t), (sg), (n), (s), (k), (f))
+#define bus_dmamem_unmap(t, k, s) \
+ (*(t)->_dmamem_unmap)((t), (k), (s))
+#define bus_dmamem_mmap(t, sg, n, o, p, f) \
+ (*(t)->_dmamem_mmap)((t), (sg), (n), (o), (p), (f))
+
+#define bus_dmatag_subregion(t, mna, mxa, nt, f) \
+ (*(t)->_dmatag_subregion)((t), (mna), (mxa), (nt), (f))
+#define bus_dmatag_destroy(t) \
+ (*(t)->_dmatag_destroy)(t)
+
+#ifdef _ARM32_BUS_DMA_PRIVATE
+
+extern paddr_t physical_start, physical_end;
+
+int arm32_dma_range_intersect(struct arm32_dma_range *, int,
+ paddr_t pa, psize_t size, paddr_t *pap, psize_t *sizep);
+
+int _bus_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
+ bus_size_t, int, bus_dmamap_t *);
+void _bus_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
+int _bus_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
+ bus_size_t, struct proc *, int);
+int _bus_dmamap_load_mbuf(bus_dma_tag_t, bus_dmamap_t,
+ struct mbuf *, int);
+int _bus_dmamap_load_uio(bus_dma_tag_t, bus_dmamap_t,
+ struct uio *, int);
+int _bus_dmamap_load_raw(bus_dma_tag_t, bus_dmamap_t,
+ bus_dma_segment_t *, int, bus_size_t, int);
+void _bus_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
+void _bus_dmamap_sync(bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
+ bus_size_t, int);
+
+#ifdef _ARM32_NEED_BUS_DMA_BOUNCE
+#define _BUS_DMAMAP_SYNC_FUNCS \
+ ._dmamap_sync_pre = _bus_dmamap_sync, \
+ ._dmamap_sync_post = _bus_dmamap_sync
+#else
+#define _BUS_DMAMAP_SYNC_FUNCS \
+ ._dmamap_sync_pre = _bus_dmamap_sync
+#endif
+
+#define _BUS_DMAMAP_FUNCS \
+ ._dmamap_create = _bus_dmamap_create, \
+ ._dmamap_destroy = _bus_dmamap_destroy, \
+ ._dmamap_load = _bus_dmamap_load, \
+ ._dmamap_load_mbuf = _bus_dmamap_load_mbuf, \
+ ._dmamap_load_raw = _bus_dmamap_load_raw, \
+ ._dmamap_load_uio = _bus_dmamap_load_uio, \
+ ._dmamap_unload = _bus_dmamap_unload, \
+ _BUS_DMAMAP_SYNC_FUNCS
+
+int _bus_dmamem_alloc(bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags);
+void _bus_dmamem_free(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs);
+int _bus_dmamem_map(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, size_t size, void **kvap, int flags);
+void _bus_dmamem_unmap(bus_dma_tag_t tag, void *kva,
+ size_t size);
+paddr_t _bus_dmamem_mmap(bus_dma_tag_t tag, bus_dma_segment_t *segs,
+ int nsegs, off_t off, int prot, int flags);
+
+#define _BUS_DMAMEM_FUNCS \
+ ._dmamem_alloc = _bus_dmamem_alloc, \
+ ._dmamem_free = _bus_dmamem_free, \
+ ._dmamem_map = _bus_dmamem_map, \
+ ._dmamem_unmap = _bus_dmamem_unmap, \
+ ._dmamem_mmap = _bus_dmamem_mmap
+
+int _bus_dmamem_alloc_range(bus_dma_tag_t tag, bus_size_t size,
+ bus_size_t alignment, bus_size_t boundary,
+ bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags,
+ vaddr_t low, vaddr_t high);
+
+int _bus_dmatag_subregion(bus_dma_tag_t, bus_addr_t, bus_addr_t,
+ bus_dma_tag_t *, int);
+void _bus_dmatag_destroy(bus_dma_tag_t);
+
+#define _BUS_DMATAG_FUNCS \
+ ._dmatag_subregion = _bus_dmatag_subregion, \
+ ._dmatag_destroy = _bus_dmatag_destroy
+
+#endif /* _ARM32_BUS_DMA_PRIVATE */
+
+#endif /* _ARM32_BUS_FUNCS_H_ */
--- /dev/null
+/* $NetBSD: cpu_counter.h,v 1.2 2012/08/29 18:45:40 matt Exp $ */
+
+/*-
+ * Copyright (c) 2012 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_CPU_COUNTER_H_
+#define _ARM_CPU_COUNTER_H_
+
+/*
+ * ARM specific support for CPU counter (ARM11 and Cortex only).
+ * If __HAVE_CPU_COUNTER is defined for any other CPU_*, it will crash.
+ */
+
+#ifdef _KERNEL
+
+#include <sys/cpu.h>
+
+#if defined(CPU_CORTEX) || defined(CPU_ARM11)
+#define cpu_hascounter() (curcpu()->ci_data.cpu_cc_freq != 0)
+#else
+#define cpu_hascounter() false
+#endif
+
+#define cpu_counter() cpu_counter32()
+
+#if defined(CPU_CORTEX) || defined(CPU_ARM11)
+static __inline uint32_t
+cpu_counter32(void)
+{
+#if defined(CPU_CORTEX) && defined(CPU_ARM11)
+ const bool cortex_p = CPU_ID_CORTEX_P(curcpu()->ci_cpu_id);
+#elif defined(CPU_CORTEX)
+ const bool cortex_p = true;
+#elif defined(CPU_ARM11)
+ const bool cortex_p = false;
+#endif
+
+ if (cortex_p)
+ return armreg_pmccntr_read();
+ else
+ return armreg_pmccntrv6_read();
+}
+#endif
+
+static __inline uint64_t
+cpu_frequency(struct cpu_info *ci)
+{
+ return ci->ci_data.cpu_cc_freq;
+}
+
+#endif /* _KERNEL */
+
+#endif /* _ARM_CPU_COUNTER_H_ */
--- /dev/null
+/* $NetBSD: cpuconf.h,v 1.20 2011/03/10 07:47:14 bsh Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_CPUCONF_H_
+#define _ARM_CPUCONF_H_
+
+#if defined(_KERNEL_OPT)
+#include "opt_cputypes.h"
+#include "opt_cpuoptions.h"
+#endif /* _KERNEL_OPT */
+
+#if defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270)
+#define __CPU_XSCALE_PXA2XX
+#endif
+
+#ifdef CPU_XSCALE_PXA2X0
+#warning option CPU_XSCALE_PXA2X0 is obsolete. Use CPU_XSCALE_PXA250 and/or CPU_XSCALE_PXA270.
+#endif
+
+/*
+ * IF YOU CHANGE THIS FILE, MAKE SURE TO UPDATE THE DEFINITION OF
+ * "PMAP_NEEDS_PTE_SYNC" IN <arm/arm32/pmap.h> FOR THE CPU TYPE
+ * YOU ARE ADDING SUPPORT FOR.
+ */
+
+#if 0
+/*
+ * Step 1: Count the number of CPU types configured into the kernel.
+ */
+#if defined(_KERNEL_OPT)
+#define CPU_NTYPES (defined(CPU_ARM2) + defined(CPU_ARM250) + \
+ defined(CPU_ARM3) + \
+ defined(CPU_ARM6) + defined(CPU_ARM7) + \
+ defined(CPU_ARM7TDMI) + \
+ defined(CPU_ARM8) + defined(CPU_ARM9) + \
+ defined(CPU_ARM9E) + \
+ defined(CPU_ARM10) + \
+ defined(CPU_ARM11) + \
+ defined(CPU_ARM1136) + \
+ defined(CPU_ARM1176) + \
+ defined(CPU_ARM11MPCORE) + \
+ defined(CPU_CORTEX) + \
+ defined(CPU_CORTEXA8) + \
+ defined(CPU_CORTEXA9) + \
+ defined(CPU_SA110) + defined(CPU_SA1100) + \
+ defined(CPU_SA1110) + \
+ defined(CPU_FA526) + \
+ defined(CPU_IXP12X0) + \
+ defined(CPU_XSCALE_80200) + \
+ defined(CPU_XSCALE_80321) + \
+ defined(__CPU_XSCALE_PXA2XX) + \
+ defined(CPU_XSCALE_IXP425)) + \
+ defined(CPU_SHEEVA))
+#else
+#define CPU_NTYPES 2
+#endif /* _KERNEL_OPT */
+#endif
+
+/*
+ * Step 2: Determine which ARM architecture versions are configured.
+ */
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3))
+#define ARM_ARCH_2 1
+#else
+#define ARM_ARCH_2 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM6) || defined(CPU_ARM7))
+#define ARM_ARCH_3 1
+#else
+#define ARM_ARCH_3 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
+ defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_FA526) || \
+ defined(CPU_SA1110) || defined(CPU_IXP12X0))
+#define ARM_ARCH_4 1
+#else
+#define ARM_ARCH_4 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM9E) || defined(CPU_ARM10) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425)) || \
+ defined(CPU_SHEEVA)
+#define ARM_ARCH_5 1
+#else
+#define ARM_ARCH_5 0
+#endif
+
+#if defined(CPU_ARM11) || defined(CPU_CORTEXA8) || defined(CPU_ARM11MPCORE)
+#define ARM_ARCH_6 1
+#else
+#define ARM_ARCH_6 0
+#endif
+
+#if defined(CPU_CORTEX)
+#define ARM_ARCH_7 1
+#else
+#define ARM_ARCH_7 0
+#endif
+
+#define ARM_NARCH (ARM_ARCH_2 + ARM_ARCH_3 + ARM_ARCH_4 + \
+ ARM_ARCH_5 + ARM_ARCH_6 + ARM_ARCH_7)
+#if ARM_NARCH == 0
+#error ARM_NARCH is 0
+#endif
+
+#if ARM_ARCH_5 || ARM_ARCH_6 || ARM_ARCH_7
+/*
+ * We could support Thumb code on v4T, but the lack of clean interworking
+ * makes that hard.
+ */
+#define THUMB_CODE
+#endif
+
+/*
+ * Step 3: Define which MMU classes are configured:
+ *
+ * ARM_MMU_MEMC Prehistoric, external memory controller
+ * and MMU for ARMv2 CPUs.
+ *
+ * ARM_MMU_GENERIC Generic ARM MMU, compatible with ARM6.
+ *
+ * ARM_MMU_SA1 StrongARM SA-1 MMU. Compatible with generic
+ * ARM MMU, but has no write-through cache mode.
+ *
+ * ARM_MMU_XSCALE XScale MMU. Compatible with generic ARM
+ * MMU, but also has several extensions which
+ * require different PTE layout to use.
+ *
+ * ARM_MMU_V6C ARM v6 MMU in backward compatible mode.
+ * Compatible with generic ARM MMU, but
+ * also has several extensions which
+ * require different PTE layouts to use.
+ * XP bit in CP15 control reg is cleared.
+ *
+ * ARM_MMU_V6N ARM v6 MMU with XP bit of CP15 control reg
+ * set. New features such as shared-bit
+ * and excute-never bit are available.
+ * Multiprocessor support needs this mode.
+ *
+ * ARM_MMU_V7 ARM v7 MMU.
+ */
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3))
+#define ARM_MMU_MEMC 1
+#else
+#define ARM_MMU_MEMC 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \
+ defined(CPU_ARM8) || defined(CPU_ARM9) || defined(CPU_ARM9E) || \
+ defined(CPU_ARM10) || defined(CPU_FA526)) || defined(CPU_SHEEVA)
+#define ARM_MMU_GENERIC 1
+#else
+#define ARM_MMU_GENERIC 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||\
+ defined(CPU_IXP12X0))
+#define ARM_MMU_SA1 1
+#else
+#define ARM_MMU_SA1 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425))
+#define ARM_MMU_XSCALE 1
+#else
+#define ARM_MMU_XSCALE 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ defined(CPU_ARM11MPCORE) && defined(ARM11MPCORE_COMPAT_MMU) || \
+ defined(CPU_ARM1136) || \
+ defined(CPU_ARM1176) || \
+ defined(CPU_ARM11) && \
+ !defined(CPU_CORTEX) && !defined(CPU_ARM11MPCORE)
+#define ARM_MMU_V6C 1
+#else
+#define ARM_MMU_V6C 0
+#endif
+
+#if !defined(_KERNEL_OPT) || \
+ defined(CPU_ARM11MPCORE) && !defined(ARM11MPCORE_COMPAT_MMU)
+#define ARM_MMU_V6N 1
+#else
+#define ARM_MMU_V6N 0
+#endif
+
+#define ARM_MMU_V6 (ARM_MMU_V6C + ARM_MMU_V6N)
+
+
+#if !defined(_KERNEL_OPT) || \
+ defined(CPU_CORTEX)
+#define ARM_MMU_V7 1
+#else
+#define ARM_MMU_V7 0
+#endif
+
+#define ARM_NMMUS (ARM_MMU_MEMC + ARM_MMU_GENERIC + \
+ ARM_MMU_SA1 + ARM_MMU_XSCALE + \
+ ARM_MMU_V6N + ARM_MMU_V6C + ARM_MMU_V7)
+#if ARM_NMMUS == 0
+#error ARM_NMMUS is 0
+#endif
+
+/*
+ * Step 4: Define features that may be present on a subset of CPUs
+ *
+ * ARM_XSCALE_PMU Performance Monitoring Unit on 80200 and 80321
+ */
+
+#if !defined(_KERNEL_OPT) || \
+ (defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321))
+#define ARM_XSCALE_PMU 1
+#else
+#define ARM_XSCALE_PMU 0
+#endif
+
+#endif /* _ARM_CPUCONF_H_ */
--- /dev/null
+/* cpufunc.h,v 1.40.22.4 2007/11/08 10:59:33 matt Exp */
+
+/*
+ * Copyright (c) 1997 Mark Brinicombe.
+ * Copyright (c) 1997 Causality Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Causality Limited.
+ * 4. The name of Causality Limited may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
+ * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * cpufunc.h
+ *
+ * Prototypes for cpu, mmu and tlb related functions.
+ */
+
+#ifndef _ARM32_CPUFUNC_H_
+#define _ARM32_CPUFUNC_H_
+
+#ifdef _KERNEL
+
+#include <sys/types.h>
+#include <arm/armreg.h>
+#include <arm/cpuconf.h>
+#include <arm/armreg.h>
+
+struct cpu_functions {
+
+ /* CPU functions */
+
+ u_int (*cf_id) (void);
+ void (*cf_cpwait) (void);
+
+ /* MMU functions */
+
+ u_int (*cf_control) (u_int, u_int);
+ void (*cf_domains) (u_int);
+ void (*cf_setttb) (u_int, bool);
+ u_int (*cf_faultstatus) (void);
+ u_int (*cf_faultaddress) (void);
+
+ /* TLB functions */
+
+ void (*cf_tlb_flushID) (void);
+ void (*cf_tlb_flushID_SE) (u_int);
+ void (*cf_tlb_flushI) (void);
+ void (*cf_tlb_flushI_SE) (u_int);
+ void (*cf_tlb_flushD) (void);
+ void (*cf_tlb_flushD_SE) (u_int);
+
+ /*
+ * Cache operations:
+ *
+ * We define the following primitives:
+ *
+ * icache_sync_all Synchronize I-cache
+ * icache_sync_range Synchronize I-cache range
+ *
+ * dcache_wbinv_all Write-back and Invalidate D-cache
+ * dcache_wbinv_range Write-back and Invalidate D-cache range
+ * dcache_inv_range Invalidate D-cache range
+ * dcache_wb_range Write-back D-cache range
+ *
+ * idcache_wbinv_all Write-back and Invalidate D-cache,
+ * Invalidate I-cache
+ * idcache_wbinv_range Write-back and Invalidate D-cache,
+ * Invalidate I-cache range
+ *
+ * Note that the ARM term for "write-back" is "clean". We use
+ * the term "write-back" since it's a more common way to describe
+ * the operation.
+ *
+ * There are some rules that must be followed:
+ *
+ * I-cache Synch (all or range):
+ * The goal is to synchronize the instruction stream,
+ * so you may beed to write-back dirty D-cache blocks
+ * first. If a range is requested, and you can't
+ * synchronize just a range, you have to hit the whole
+ * thing.
+ *
+ * D-cache Write-Back and Invalidate range:
+ * If you can't WB-Inv a range, you must WB-Inv the
+ * entire D-cache.
+ *
+ * D-cache Invalidate:
+ * If you can't Inv the D-cache, you must Write-Back
+ * and Invalidate. Code that uses this operation
+ * MUST NOT assume that the D-cache will not be written
+ * back to memory.
+ *
+ * D-cache Write-Back:
+ * If you can't Write-back without doing an Inv,
+ * that's fine. Then treat this as a WB-Inv.
+ * Skipping the invalidate is merely an optimization.
+ *
+ * All operations:
+ * Valid virtual addresses must be passed to each
+ * cache operation.
+ */
+ void (*cf_icache_sync_all) (void);
+ void (*cf_icache_sync_range) (vaddr_t, vsize_t);
+
+ void (*cf_dcache_wbinv_all) (void);
+ void (*cf_dcache_wbinv_range)(vaddr_t, vsize_t);
+ void (*cf_dcache_inv_range) (vaddr_t, vsize_t);
+ void (*cf_dcache_wb_range) (vaddr_t, vsize_t);
+
+ void (*cf_sdcache_wbinv_range)(vaddr_t, paddr_t, psize_t);
+ void (*cf_sdcache_inv_range) (vaddr_t, paddr_t, psize_t);
+ void (*cf_sdcache_wb_range) (vaddr_t, paddr_t, psize_t);
+
+ void (*cf_idcache_wbinv_all) (void);
+ void (*cf_idcache_wbinv_range)(vaddr_t, vsize_t);
+
+ /* Other functions */
+
+ void (*cf_flush_prefetchbuf) (void);
+ void (*cf_drain_writebuf) (void);
+ void (*cf_flush_brnchtgt_C) (void);
+ void (*cf_flush_brnchtgt_E) (u_int);
+
+ void (*cf_sleep) (int mode);
+
+ /* Soft functions */
+
+ int (*cf_dataabt_fixup) (void *);
+ int (*cf_prefetchabt_fixup) (void *);
+
+ void (*cf_context_switch) (u_int);
+
+ void (*cf_setup) (char *);
+};
+
+extern struct cpu_functions cpufuncs;
+extern u_int cputype;
+
+#define cpu_id() cpufuncs.cf_id()
+
+#define cpu_control(c, e) cpufuncs.cf_control(c, e)
+#define cpu_domains(d) cpufuncs.cf_domains(d)
+#define cpu_setttb(t, f) cpufuncs.cf_setttb(t, f)
+#define cpu_faultstatus() cpufuncs.cf_faultstatus()
+#define cpu_faultaddress() cpufuncs.cf_faultaddress()
+
+#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
+#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
+#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
+#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
+#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
+#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
+
+#define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
+#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
+
+#define cpu_dcache_wbinv_all() cpufuncs.cf_dcache_wbinv_all()
+#define cpu_dcache_wbinv_range(a, s) cpufuncs.cf_dcache_wbinv_range((a), (s))
+#define cpu_dcache_inv_range(a, s) cpufuncs.cf_dcache_inv_range((a), (s))
+#define cpu_dcache_wb_range(a, s) cpufuncs.cf_dcache_wb_range((a), (s))
+
+#define cpu_sdcache_wbinv_range(a, b, s) cpufuncs.cf_sdcache_wbinv_range((a), (b), (s))
+#define cpu_sdcache_inv_range(a, b, s) cpufuncs.cf_sdcache_inv_range((a), (b), (s))
+#define cpu_sdcache_wb_range(a, b, s) cpufuncs.cf_sdcache_wb_range((a), (b), (s))
+
+#define cpu_idcache_wbinv_all() cpufuncs.cf_idcache_wbinv_all()
+#define cpu_idcache_wbinv_range(a, s) cpufuncs.cf_idcache_wbinv_range((a), (s))
+
+#define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
+#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
+#define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
+#define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
+
+#define cpu_sleep(m) cpufuncs.cf_sleep(m)
+
+#define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
+#define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
+#define ABORT_FIXUP_OK 0 /* fixup succeeded */
+#define ABORT_FIXUP_FAILED 1 /* fixup failed */
+#define ABORT_FIXUP_RETURN 2 /* abort handler should return */
+
+#define cpu_context_switch(a) cpufuncs.cf_context_switch(a)
+#define cpu_setup(a) cpufuncs.cf_setup(a)
+
+int set_cpufuncs (void);
+int set_cpufuncs_id (u_int);
+#define ARCHITECTURE_NOT_PRESENT 1 /* known but not configured */
+#define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
+
+void cpufunc_nullop (void);
+int cpufunc_null_fixup (void *);
+int early_abort_fixup (void *);
+int late_abort_fixup (void *);
+u_int cpufunc_id (void);
+u_int cpufunc_control (u_int, u_int);
+void cpufunc_domains (u_int);
+u_int cpufunc_faultstatus (void);
+u_int cpufunc_faultaddress (void);
+
+#if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3)
+void arm3_cache_flush (void);
+#endif /* CPU_ARM2 || CPU_ARM250 || CPU_ARM3 */
+
+#ifdef CPU_ARM2
+u_int arm2_id (void);
+#endif /* CPU_ARM2 */
+
+#ifdef CPU_ARM250
+u_int arm250_id (void);
+#endif
+
+#ifdef CPU_ARM3
+u_int arm3_control (u_int, u_int);
+#endif /* CPU_ARM3 */
+
+#if defined(CPU_ARM6) || defined(CPU_ARM7)
+void arm67_setttb (u_int, bool);
+void arm67_tlb_flush (void);
+void arm67_tlb_purge (u_int);
+void arm67_cache_flush (void);
+void arm67_context_switch (u_int);
+#endif /* CPU_ARM6 || CPU_ARM7 */
+
+#ifdef CPU_ARM6
+void arm6_setup (char *);
+#endif /* CPU_ARM6 */
+
+#ifdef CPU_ARM7
+void arm7_setup (char *);
+#endif /* CPU_ARM7 */
+
+#ifdef CPU_ARM7TDMI
+int arm7_dataabt_fixup (void *);
+void arm7tdmi_setup (char *);
+void arm7tdmi_setttb (u_int, bool);
+void arm7tdmi_tlb_flushID (void);
+void arm7tdmi_tlb_flushID_SE (u_int);
+void arm7tdmi_cache_flushID (void);
+void arm7tdmi_context_switch (u_int);
+#endif /* CPU_ARM7TDMI */
+
+#ifdef CPU_ARM8
+void arm8_setttb (u_int, bool);
+void arm8_tlb_flushID (void);
+void arm8_tlb_flushID_SE (u_int);
+void arm8_cache_flushID (void);
+void arm8_cache_flushID_E (u_int);
+void arm8_cache_cleanID (void);
+void arm8_cache_cleanID_E (u_int);
+void arm8_cache_purgeID (void);
+void arm8_cache_purgeID_E (u_int entry);
+
+void arm8_cache_syncI (void);
+void arm8_cache_cleanID_rng (vaddr_t, vsize_t);
+void arm8_cache_cleanD_rng (vaddr_t, vsize_t);
+void arm8_cache_purgeID_rng (vaddr_t, vsize_t);
+void arm8_cache_purgeD_rng (vaddr_t, vsize_t);
+void arm8_cache_syncI_rng (vaddr_t, vsize_t);
+
+void arm8_context_switch (u_int);
+
+void arm8_setup (char *);
+
+u_int arm8_clock_config (u_int, u_int);
+#endif
+
+#ifdef CPU_FA526
+void fa526_setup (char *);
+void fa526_setttb (u_int, bool);
+void fa526_context_switch (u_int);
+void fa526_cpu_sleep (int);
+void fa526_tlb_flushI_SE (u_int);
+void fa526_tlb_flushID_SE (u_int);
+void fa526_flush_prefetchbuf (void);
+void fa526_flush_brnchtgt_E (u_int);
+
+void fa526_icache_sync_all (void);
+void fa526_icache_sync_range(vaddr_t, vsize_t);
+void fa526_dcache_wbinv_all (void);
+void fa526_dcache_wbinv_range(vaddr_t, vsize_t);
+void fa526_dcache_inv_range (vaddr_t, vsize_t);
+void fa526_dcache_wb_range (vaddr_t, vsize_t);
+void fa526_idcache_wbinv_all(void);
+void fa526_idcache_wbinv_range(vaddr_t, vsize_t);
+#endif
+
+#ifdef CPU_SA110
+void sa110_setup (char *);
+void sa110_context_switch (u_int);
+#endif /* CPU_SA110 */
+
+#if defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa11x0_drain_readbuf (void);
+
+void sa11x0_context_switch (u_int);
+void sa11x0_cpu_sleep (int);
+
+void sa11x0_setup (char *);
+#endif
+
+#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110)
+void sa1_setttb (u_int, bool);
+
+void sa1_tlb_flushID_SE (u_int);
+
+void sa1_cache_flushID (void);
+void sa1_cache_flushI (void);
+void sa1_cache_flushD (void);
+void sa1_cache_flushD_SE (u_int);
+
+void sa1_cache_cleanID (void);
+void sa1_cache_cleanD (void);
+void sa1_cache_cleanD_E (u_int);
+
+void sa1_cache_purgeID (void);
+void sa1_cache_purgeID_E (u_int);
+void sa1_cache_purgeD (void);
+void sa1_cache_purgeD_E (u_int);
+
+void sa1_cache_syncI (void);
+void sa1_cache_cleanID_rng (vaddr_t, vsize_t);
+void sa1_cache_cleanD_rng (vaddr_t, vsize_t);
+void sa1_cache_purgeID_rng (vaddr_t, vsize_t);
+void sa1_cache_purgeD_rng (vaddr_t, vsize_t);
+void sa1_cache_syncI_rng (vaddr_t, vsize_t);
+
+#endif
+
+#ifdef CPU_ARM9
+void arm9_setttb (u_int, bool);
+
+void arm9_tlb_flushID_SE (u_int);
+
+void arm9_icache_sync_all (void);
+void arm9_icache_sync_range (vaddr_t, vsize_t);
+
+void arm9_dcache_wbinv_all (void);
+void arm9_dcache_wbinv_range (vaddr_t, vsize_t);
+void arm9_dcache_inv_range (vaddr_t, vsize_t);
+void arm9_dcache_wb_range (vaddr_t, vsize_t);
+
+void arm9_idcache_wbinv_all (void);
+void arm9_idcache_wbinv_range (vaddr_t, vsize_t);
+
+void arm9_context_switch (u_int);
+
+void arm9_setup (char *);
+
+extern unsigned arm9_dcache_sets_max;
+extern unsigned arm9_dcache_sets_inc;
+extern unsigned arm9_dcache_index_max;
+extern unsigned arm9_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_SHEEVA)
+void arm10_tlb_flushID_SE (u_int);
+void arm10_tlb_flushI_SE (u_int);
+
+void arm10_context_switch (u_int);
+
+void arm10_setup (char *);
+#endif
+
+#if defined(CPU_ARM9E) || defined (CPU_ARM10) || defined(CPU_SHEEVA)
+void armv5_ec_setttb (u_int, bool);
+
+void armv5_ec_icache_sync_all (void);
+void armv5_ec_icache_sync_range (vaddr_t, vsize_t);
+
+void armv5_ec_dcache_wbinv_all (void);
+void armv5_ec_dcache_wbinv_range (vaddr_t, vsize_t);
+void armv5_ec_dcache_inv_range (vaddr_t, vsize_t);
+void armv5_ec_dcache_wb_range (vaddr_t, vsize_t);
+
+void armv5_ec_idcache_wbinv_all (void);
+void armv5_ec_idcache_wbinv_range (vaddr_t, vsize_t);
+#endif
+
+#if defined (CPU_ARM10) || defined (CPU_ARM11MPCORE)
+void armv5_setttb (u_int, bool);
+
+void armv5_icache_sync_all (void);
+void armv5_icache_sync_range (vaddr_t, vsize_t);
+
+void armv5_dcache_wbinv_all (void);
+void armv5_dcache_wbinv_range (vaddr_t, vsize_t);
+void armv5_dcache_inv_range (vaddr_t, vsize_t);
+void armv5_dcache_wb_range (vaddr_t, vsize_t);
+
+void armv5_idcache_wbinv_all (void);
+void armv5_idcache_wbinv_range (vaddr_t, vsize_t);
+
+extern unsigned armv5_dcache_sets_max;
+extern unsigned armv5_dcache_sets_inc;
+extern unsigned armv5_dcache_index_max;
+extern unsigned armv5_dcache_index_inc;
+#endif
+
+#if defined(CPU_ARM11MPCORE)
+void arm11mpcore_setup (char *);
+#endif
+
+#if defined(CPU_ARM11) || defined(CPU_CORTEX)
+void arm11_setttb (u_int, bool);
+
+void arm11_tlb_flushID_SE (u_int);
+void arm11_tlb_flushI_SE (u_int);
+
+void arm11_context_switch (u_int);
+
+void arm11_cpu_sleep (int);
+void arm11_setup (char *string);
+void arm11_tlb_flushID (void);
+void arm11_tlb_flushI (void);
+void arm11_tlb_flushD (void);
+void arm11_tlb_flushD_SE (u_int va);
+
+void armv11_dcache_wbinv_all (void);
+void armv11_idcache_wbinv_all(void);
+
+void arm11_drain_writebuf (void);
+void arm11_sleep (int);
+
+void armv6_setttb (u_int, bool);
+
+void armv6_icache_sync_all (void);
+void armv6_icache_sync_range (vaddr_t, vsize_t);
+
+void armv6_dcache_wbinv_all (void);
+void armv6_dcache_wbinv_range (vaddr_t, vsize_t);
+void armv6_dcache_inv_range (vaddr_t, vsize_t);
+void armv6_dcache_wb_range (vaddr_t, vsize_t);
+
+void armv6_idcache_wbinv_all (void);
+void armv6_idcache_wbinv_range (vaddr_t, vsize_t);
+#endif
+
+#if defined(CPU_CORTEX)
+void armv7_setttb(u_int, bool);
+
+void armv7_icache_sync_range(vaddr_t, vsize_t);
+void armv7_dcache_wb_range(vaddr_t, vsize_t);
+void armv7_dcache_wbinv_range(vaddr_t, vsize_t);
+void armv7_dcache_inv_range(vaddr_t, vsize_t);
+void armv7_idcache_wbinv_range(vaddr_t, vsize_t);
+
+void armv7_dcache_wbinv_all (void);
+void armv7_idcache_wbinv_all(void);
+void armv7_icache_sync_all(void);
+void armv7_cpu_sleep(int);
+void armv7_context_switch(u_int);
+void armv7_tlb_flushID_SE(u_int);
+void armv7_setup (char *string);
+#endif
+
+
+#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
+void arm11x6_setttb (u_int, bool);
+void arm11x6_idcache_wbinv_all (void);
+void arm11x6_dcache_wbinv_all (void);
+void arm11x6_icache_sync_all (void);
+void arm11x6_flush_prefetchbuf (void);
+void arm11x6_icache_sync_range (vaddr_t, vsize_t);
+void arm11x6_idcache_wbinv_range (vaddr_t, vsize_t);
+void arm11x6_setup (char *string);
+void arm11x6_sleep (int); /* no ref. for errata */
+#endif
+#if defined(CPU_ARM1136)
+void arm1136_sleep_rev0 (int); /* for errata 336501 */
+#endif
+
+
+#if defined(CPU_ARM9) || defined(CPU_ARM9E) || defined(CPU_ARM10) || \
+ defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
+ defined(CPU_FA526) || \
+ defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
+ defined(CPU_CORTEX) || defined(CPU_SHEEVA)
+
+void armv4_tlb_flushID (void);
+void armv4_tlb_flushI (void);
+void armv4_tlb_flushD (void);
+void armv4_tlb_flushD_SE (u_int);
+
+void armv4_drain_writebuf (void);
+#endif
+
+#if defined(CPU_IXP12X0)
+void ixp12x0_drain_readbuf (void);
+void ixp12x0_context_switch (u_int);
+void ixp12x0_setup (char *);
+#endif
+
+#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
+ defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \
+ defined(CPU_CORTEX)
+
+void xscale_cpwait (void);
+#define cpu_cpwait() cpufuncs.cf_cpwait()
+
+void xscale_cpu_sleep (int);
+
+u_int xscale_control (u_int, u_int);
+
+void xscale_setttb (u_int, bool);
+
+void xscale_tlb_flushID_SE (u_int);
+
+void xscale_cache_flushID (void);
+void xscale_cache_flushI (void);
+void xscale_cache_flushD (void);
+void xscale_cache_flushD_SE (u_int);
+
+void xscale_cache_cleanID (void);
+void xscale_cache_cleanD (void);
+void xscale_cache_cleanD_E (u_int);
+
+void xscale_cache_clean_minidata (void);
+
+void xscale_cache_purgeID (void);
+void xscale_cache_purgeID_E (u_int);
+void xscale_cache_purgeD (void);
+void xscale_cache_purgeD_E (u_int);
+
+void xscale_cache_syncI (void);
+void xscale_cache_cleanID_rng (vaddr_t, vsize_t);
+void xscale_cache_cleanD_rng (vaddr_t, vsize_t);
+void xscale_cache_purgeID_rng (vaddr_t, vsize_t);
+void xscale_cache_purgeD_rng (vaddr_t, vsize_t);
+void xscale_cache_syncI_rng (vaddr_t, vsize_t);
+void xscale_cache_flushD_rng (vaddr_t, vsize_t);
+
+void xscale_context_switch (u_int);
+
+void xscale_setup (char *);
+#endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 || CPU_CORTEX */
+
+#if defined(CPU_SHEEVA)
+void sheeva_dcache_wbinv_range (vaddr_t, vsize_t);
+void sheeva_dcache_inv_range (vaddr_t, vsize_t);
+void sheeva_dcache_wb_range (vaddr_t, vsize_t);
+void sheeva_idcache_wbinv_range (vaddr_t, vsize_t);
+void sheeva_setup(char *);
+void sheeva_cpu_sleep(int);
+#endif
+
+#define tlb_flush cpu_tlb_flushID
+#define setttb cpu_setttb
+#define drain_writebuf cpu_drain_writebuf
+
+#ifndef cpu_cpwait
+#define cpu_cpwait()
+#endif
+
+/*
+ * Macros for manipulating CPU interrupts
+ */
+#ifdef __PROG32
+static __inline u_int32_t __set_cpsr_c(uint32_t bic, uint32_t eor) __attribute__((__unused__));
+static __inline u_int32_t disable_interrupts(uint32_t mask) __attribute__((__unused__));
+static __inline u_int32_t enable_interrupts(uint32_t mask) __attribute__((__unused__));
+
+static __inline uint32_t
+__set_cpsr_c(uint32_t bic, uint32_t eor)
+{
+ uint32_t tmp, ret;
+
+ __asm volatile(
+ "mrs %0, cpsr\n" /* Get the CPSR */
+ "bic %1, %0, %2\n" /* Clear bits */
+ "eor %1, %1, %3\n" /* XOR bits */
+ "msr cpsr_c, %1\n" /* Set the control field of CPSR */
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (bic), "r" (eor) : "memory");
+
+ return ret;
+}
+
+static __inline uint32_t
+disable_interrupts(uint32_t mask)
+{
+ uint32_t tmp, ret;
+ mask &= (I32_bit | F32_bit);
+
+ __asm volatile(
+ "mrs %0, cpsr\n" /* Get the CPSR */
+ "orr %1, %0, %2\n" /* set bits */
+ "msr cpsr_c, %1\n" /* Set the control field of CPSR */
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (mask)
+ : "memory");
+
+ return ret;
+}
+
+static __inline uint32_t
+enable_interrupts(uint32_t mask)
+{
+ uint32_t ret, tmp;
+ mask &= (I32_bit | F32_bit);
+
+ __asm volatile(
+ "mrs %0, cpsr\n" /* Get the CPSR */
+ "bic %1, %0, %2\n" /* Clear bits */
+ "msr cpsr_c, %1\n" /* Set the control field of CPSR */
+ : "=&r" (ret), "=&r" (tmp)
+ : "r" (mask)
+ : "memory");
+
+ return ret;
+}
+
+#define restore_interrupts(old_cpsr) \
+ (__set_cpsr_c((I32_bit | F32_bit), (old_cpsr) & (I32_bit | F32_bit)))
+
+static inline void cpsie(register_t psw) __attribute__((__unused__));
+static inline register_t cpsid(register_t psw) __attribute__((__unused__));
+
+static inline void
+cpsie(register_t psw)
+{
+#ifdef _ARM_ARCH_6
+ if (!__builtin_constant_p(psw)) {
+ enable_interrupts(psw);
+ return;
+ }
+ switch (psw & (I32_bit|F32_bit)) {
+ case I32_bit: __asm("cpsie\ti"); break;
+ case F32_bit: __asm("cpsie\tf"); break;
+ case I32_bit|F32_bit: __asm("cpsie\tif"); break;
+ }
+#else
+ enable_interrupts(psw);
+#endif
+}
+
+static inline register_t
+cpsid(register_t psw)
+{
+#ifdef _ARM_ARCH_6
+ register_t oldpsw;
+ if (!__builtin_constant_p(psw))
+ return disable_interrupts(psw);
+
+ __asm("mrs %0, cpsr" : "=r"(oldpsw));
+ switch (psw & (I32_bit|F32_bit)) {
+ case I32_bit: __asm("cpsid\ti"); break;
+ case F32_bit: __asm("cpsid\tf"); break;
+ case I32_bit|F32_bit: __asm("cpsid\tif"); break;
+ }
+ return oldpsw;
+#else
+ return disable_interrupts(psw);
+#endif
+}
+
+#else /* ! __PROG32 */
+#define disable_interrupts(mask) \
+ (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
+ (mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
+
+#define enable_interrupts(mask) \
+ (set_r15((mask) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE), 0))
+
+#define restore_interrupts(old_r15) \
+ (set_r15((R15_IRQ_DISABLE | R15_FIQ_DISABLE), \
+ (old_r15) & (R15_IRQ_DISABLE | R15_FIQ_DISABLE)))
+#endif /* __PROG32 */
+
+#ifdef __PROG32
+/* Functions to manipulate the CPSR. */
+u_int SetCPSR(u_int, u_int);
+u_int GetCPSR(void);
+#else
+/* Functions to manipulate the processor control bits in r15. */
+u_int set_r15(u_int, u_int);
+u_int get_r15(void);
+#endif /* __PROG32 */
+
+
+/*
+ * CPU functions from locore.S
+ */
+
+void cpu_reset (void) __dead;
+
+/*
+ * Cache info variables.
+ */
+
+/* PRIMARY CACHE VARIABLES */
+struct arm_cache_info {
+ u_int icache_size;
+ u_int icache_line_size;
+ u_int icache_ways;
+ u_int icache_sets;
+
+ u_int dcache_size;
+ u_int dcache_line_size;
+ u_int dcache_ways;
+ u_int dcache_sets;
+
+ u_int cache_type;
+ bool cache_unified;
+};
+
+extern u_int arm_cache_prefer_mask;
+extern u_int arm_dcache_align;
+extern u_int arm_dcache_align_mask;
+
+extern struct arm_cache_info arm_pcache;
+extern struct arm_cache_info arm_scache;
+#endif /* _KERNEL */
+
+#if defined(_KERNEL) || defined(_KMEMUSER)
+/*
+ * Miscellany
+ */
+
+int get_pc_str_offset (void);
+
+/*
+ * Functions to manipulate cpu r13
+ * (in arm/arm32/setstack.S)
+ */
+
+void set_stackptr (u_int, u_int);
+u_int get_stackptr (u_int);
+
+#endif /* _KERNEL || _KMEMUSER */
+
+#endif /* _ARM32_CPUFUNC_H_ */
+
+/* End of cpufunc.h */
--- /dev/null
+/* $NetBSD: db_machdep.h,v 1.18 2012/02/16 02:26:35 christos Exp $ */
+
+/*
+ * Copyright (c) 1996 Scott K Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+#ifndef _ARM_DB_MACHDEP_H_
+#define _ARM_DB_MACHDEP_H_
+
+/*
+ * Machine-dependent defines for new kernel debugger.
+ */
+#include <sys/types.h>
+#include <uvm/uvm_extern.h>
+#include <arm/armreg.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+
+/* end of mangling */
+
+typedef vaddr_t db_addr_t; /* address - unsigned */
+#define DDB_EXPR_FMT "l" /* expression is long */
+typedef long db_expr_t; /* expression - signed */
+
+typedef trapframe_t db_regs_t;
+
+extern db_regs_t ddb_regs; /* register state */
+#define DDB_REGS (&ddb_regs)
+
+#ifdef __PROG26
+#define PC_REGS(regs) ((regs)->tf_r15 & R15_PC)
+#define PC_ADVANCE(regs) ((regs)->tf_r15 += BKPT_SIZE)
+#else
+#define PC_REGS(regs) ((regs)->tf_pc)
+#define PC_ADVANCE(r) ((r)->tf_r15 += BKPT_SIZE)
+#endif
+
+#define BKPT_ADDR(addr) (addr) /* breakpoint address */
+#if defined(DDB)
+#define BKPT_INST (KERNEL_BREAKPOINT) /* breakpoint instruction */
+#else
+/* breakpoint instruction if we use KGDB, this is used in db_set_temp_breakpoint() */
+#define BKPT_INST (GDB5_BREAKPOINT)
+#endif
+#define BKPT_SIZE (INSN_SIZE) /* size of breakpoint inst */
+#define BKPT_SET(inst, addr) (BKPT_INST)
+
+/*#define FIXUP_PC_AFTER_BREAK(regs) ((regs)->tf_pc -= BKPT_SIZE)*/
+
+#define T_FAULT (0)
+#define T_BREAKPOINT (1)
+
+#define IS_BREAKPOINT_TRAP(type, code) ((type) == T_BREAKPOINT)
+#define IS_WATCHPOINT_TRAP(type, code) (0)
+
+#define inst_trap_return(ins) (0)
+/* ldmxx reg, {..., pc}
+ 01800000 stack mode
+ 000f0000 register
+ 0000ffff register list */
+/* mov pc, reg
+ 0000000f register */
+#define inst_return(ins) (((ins) & 0x0e108000) == 0x08108000 || \
+ ((ins) & 0x0ff0fff0) == 0x01a0f000)
+/* bl ...
+ 00ffffff offset>>2 */
+#define inst_call(ins) (((ins) & 0x0f000000) == 0x0b000000)
+/* b ...
+ 00ffffff offset>>2 */
+/* ldr pc, [pc, reg, lsl #2]
+ 0000000f register */
+#define inst_branch(ins) (((ins) & 0x0f000000) == 0x0a000000 || \
+ ((ins) & 0x0fdffff0) == 0x079ff100 || \
+ ((ins) & 0x0ff0f000) == 0x0590f000)
+#define inst_load(ins) (0)
+#define inst_store(ins) (0)
+#define inst_unconditional_flow_transfer(ins) \
+ ((((ins) & INSN_COND_MASK) == INSN_COND_AL) && \
+ (inst_branch(ins) || inst_call(ins) || inst_return(ins)))
+
+#define getreg_val (0)
+#define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + INSN_SIZE))
+
+#define DB_MACHINE_COMMANDS
+
+#define SOFTWARE_SSTEP
+
+u_int branch_taken(u_int insn, u_int pc, db_regs_t *db_regs);
+int kdb_trap(int, db_regs_t *);
+void db_machine_init(void);
+int db_validate_address(vm_offset_t addr);
+
+#define DB_ELF_SYMBOLS
+#define DB_ELFSIZE 32
+
+/*
+ * kgdb
+ */
+typedef register_t kgdb_reg_t;
+#define KGDB_NUMREGS (16 + 8*3 + 2) /* r0..r15, f0..f7, fps, cpsr
+ * fp-registers are 12 bytes wide */
+#define KGDB_REGNUM_R0 0
+#define KGDB_REGNUM_SPSR 16 + 8*3 + 1
+#define KGDB_BUFLEN 1024
+
+#endif /* _ARM_DB_MACHDEP_H_ */
--- /dev/null
+/* $NetBSD: fiq.h,v 1.1 2001/12/20 01:20:23 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2001 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Jason R. Thorpe for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_FIQ_H_
+#define _ARM_FIQ_H_
+
+#include <sys/queue.h>
+
+struct fiqregs {
+ u_int fr_r8; /* FIQ mode r8 */
+ u_int fr_r9; /* FIQ mode r9 */
+ u_int fr_r10; /* FIQ mode r10 */
+ u_int fr_r11; /* FIQ mode r11 */
+ u_int fr_r12; /* FIQ mode r12 */
+ u_int fr_r13; /* FIQ mode r13 */
+};
+
+struct fiqhandler {
+ TAILQ_ENTRY(fiqhandler) fh_list;/* link in the FIQ handler stack */
+ void *fh_func; /* FIQ handler routine */
+ size_t fh_size; /* size of FIQ handler */
+ int fh_flags; /* flags; see below */
+ struct fiqregs *fh_regs; /* pointer to regs structure */
+};
+
+#define FH_CANPUSH 0x01 /* can push this handler out of the way */
+
+int fiq_claim(struct fiqhandler *);
+void fiq_release(struct fiqhandler *);
+
+void fiq_getregs(struct fiqregs *);
+void fiq_setregs(struct fiqregs *);
+
+#endif /* _ARM_FIQ_H_ */
--- /dev/null
+/* $NetBSD: fp.h,v 1.1 2001/01/10 19:02:06 bjh21 Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * fp.h
+ *
+ * FP info
+ *
+ * Created : 10/10/95
+ */
+
+#ifndef __ARM32_FP_H
+#define __ARM32_FP_H
+
+/*
+ * An extended precision floating point number
+ */
+
+typedef struct fp_extended_precision {
+ u_int32_t fp_exponent;
+ u_int32_t fp_mantissa_hi;
+ u_int32_t fp_mantissa_lo;
+} fp_extended_precision_t;
+
+typedef struct fp_extended_precision fp_reg_t;
+
+/*
+ * Information about the FPE-SP state that is stored in the pcb
+ *
+ * This needs to move and be hidden from userland.
+ */
+
+struct fpe_sp_state {
+ unsigned int fp_flags;
+ unsigned int fp_sr;
+ unsigned int fp_cr;
+ fp_reg_t fp_registers[16];
+};
+
+/*
+ * Type for a saved FP context, if we want to translate the context to a
+ * user-readable form
+ */
+
+typedef struct {
+ u_int32_t fpsr;
+ fp_extended_precision_t regs[8];
+} fp_state_t;
+
+#endif
+
+/* End of fp.h */
--- /dev/null
+/* $NetBSD: frame.h,v 1.17 2012/08/16 17:35:01 matt Exp $ */
+
+/*
+ * Copyright (c) 1994-1997 Mark Brinicombe.
+ * Copyright (c) 1994 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * arm/frame.h - Stack frames structures common to arm26 and arm32
+ */
+
+#ifndef _ARM_FRAME_H_
+#define _ARM_FRAME_H_
+
+#ifndef _LOCORE
+
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+
+/*
+ * Trap frame. Pushed onto the kernel stack on a trap (synchronous exception).
+ */
+
+typedef struct trapframe {
+ register_t tf_spsr; /* Zero on arm26 */
+ register_t tf_fill; /* fill here so r0 will dword aligned */
+ register_t tf_r0;
+ register_t tf_r1;
+ register_t tf_r2;
+ register_t tf_r3;
+ register_t tf_r4;
+ register_t tf_r5;
+ register_t tf_r6;
+ register_t tf_r7;
+ register_t tf_r8;
+ register_t tf_r9;
+ register_t tf_r10;
+ register_t tf_r11;
+ register_t tf_r12;
+ register_t tf_usr_sp;
+ register_t tf_usr_lr;
+ register_t tf_svc_sp; /* Not used on arm26 */
+ register_t tf_svc_lr; /* Not used on arm26 */
+ register_t tf_pc;
+} trapframe_t;
+
+/* Register numbers */
+#define tf_ip tf_r12
+#define tf_r13 tf_usr_sp
+#define tf_r14 tf_usr_lr
+#define tf_r15 tf_pc
+
+#ifdef __PROG32
+#define TRAP_USERMODE(tf) (((tf)->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
+#elif defined(__PROG26)
+#define TRAP_USERMODE(tf) (((tf)->tf_r15 & R15_MODE) == R15_MODE_USR)
+#endif
+
+/*
+ * Signal frame. Pushed onto user stack before calling sigcode.
+ */
+#ifdef COMPAT_16
+struct sigframe_sigcontext {
+ struct sigcontext sf_sc;
+};
+#endif
+
+/* the pointers are use in the trampoline code to locate the ucontext */
+struct sigframe_siginfo {
+ siginfo_t sf_si; /* actual saved siginfo */
+ ucontext_t sf_uc; /* actual saved ucontext */
+};
+
+#ifdef _KERNEL
+__BEGIN_DECLS
+void sendsig_sigcontext(const ksiginfo_t *, const sigset_t *);
+void *getframe(struct lwp *, int, int *);
+__END_DECLS
+#define lwp_trapframe(l) ((l)->l_md.md_tf)
+#define lwp_settrapframe(l, tf) ((l)->l_md.md_tf = (tf))
+#endif
+
+#endif /* _LOCORE */
+
+#endif /* _ARM_FRAME_H_ */
+
+/* End of frame.h */
--- /dev/null
+/* $NetBSD: isa_machdep.h,v 1.9 2012/09/21 14:21:57 matt Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM32_ISA_MACHDEP_H_
+#define _ARM32_ISA_MACHDEP_H_
+
+#include <sys/bus.h>
+#include <dev/isa/isadmavar.h>
+
+/*
+ * Types provided to machine-independent ISA code.
+ */
+struct arm32_isa_chipset {
+ struct isa_dma_state ic_dmastate;
+};
+
+typedef struct arm32_isa_chipset *isa_chipset_tag_t;
+
+struct device; /* XXX */
+struct isabus_attach_args; /* XXX */
+
+/*
+ * Functions provided to machine-independent ISA code.
+ */
+void isa_attach_hook(struct device *, struct device *,
+ struct isabus_attach_args *);
+void isa_detach_hook(isa_chipset_tag_t, device_t);
+const struct evcnt *isa_intr_evcnt(isa_chipset_tag_t ic, int irq);
+void *isa_intr_establish(isa_chipset_tag_t ic, int irq, int type,
+ int level, int (*ih_fun)(void *), void *ih_arg);
+void isa_intr_disestablish(isa_chipset_tag_t ic, void *handler);
+
+#define isa_dmainit(ic, bst, dmat, d) \
+ _isa_dmainit(&(ic)->ic_dmastate, (bst), (dmat), (d))
+#define isa_dmadestroy(ic) \
+ _isa_dmadestroy(&(ic)->ic_dmastate)
+#define isa_dmacascade(ic, c) \
+ _isa_dmacascade(&(ic)->ic_dmastate, (c))
+#define isa_dmamaxsize(ic, c) \
+ _isa_dmamaxsize(&(ic)->ic_dmastate, (c))
+#define isa_dmamap_create(ic, c, s, f) \
+ _isa_dmamap_create(&(ic)->ic_dmastate, (c), (s), (f))
+#define isa_dmamap_destroy(ic, c) \
+ _isa_dmamap_destroy(&(ic)->ic_dmastate, (c))
+#define isa_dmastart(ic, c, a, n, p, f, bf) \
+ _isa_dmastart(&(ic)->ic_dmastate, (c), (a), (n), (p), (f), (bf))
+#define isa_dmaabort(ic, c) \
+ _isa_dmaabort(&(ic)->ic_dmastate, (c))
+#define isa_dmacount(ic, c) \
+ _isa_dmacount(&(ic)->ic_dmastate, (c))
+#define isa_dmafinished(ic, c) \
+ _isa_dmafinished(&(ic)->ic_dmastate, (c))
+#define isa_dmadone(ic, c) \
+ _isa_dmadone(&(ic)->ic_dmastate, (c))
+#define isa_dmafreeze(ic) \
+ _isa_dmafreeze(&(ic)->ic_dmastate)
+#define isa_dmathaw(ic) \
+ _isa_dmathaw(&(ic)->ic_dmastate)
+#define isa_dmamem_alloc(ic, c, s, ap, f) \
+ _isa_dmamem_alloc(&(ic)->ic_dmastate, (c), (s), (ap), (f))
+#define isa_dmamem_free(ic, c, a, s) \
+ _isa_dmamem_free(&(ic)->ic_dmastate, (c), (a), (s))
+#define isa_dmamem_map(ic, c, a, s, kp, f) \
+ _isa_dmamem_map(&(ic)->ic_dmastate, (c), (a), (s), (kp), (f))
+#define isa_dmamem_unmap(ic, c, k, s) \
+ _isa_dmamem_unmap(&(ic)->ic_dmastate, (c), (k), (s))
+#define isa_dmamem_mmap(ic, c, a, s, o, p, f) \
+ _isa_dmamem_mmap(&(ic)->ic_dmastate, (c), (a), (s), (o), (p), (f))
+#define isa_drq_alloc(ic, c) \
+ _isa_drq_alloc(&(ic)->ic_dmastate, c)
+#define isa_drq_free(ic, c) \
+ _isa_drq_free(&(ic)->ic_dmastate, c)
+#define isa_drq_isfree(ic, c) \
+ _isa_drq_isfree(&(ic)->ic_dmastate, (c))
+#define isa_malloc(ic, c, s, p, f) \
+ _isa_malloc(&(ic)->ic_dmastate, (c), (s), (p), (f))
+#define isa_free(a, p) \
+ _isa_free((a), (p))
+#define isa_mappage(m, o, p) \
+ _isa_mappage((m), (o), (p))
+
+/*
+ * ALL OF THE FOLLOWING ARE MACHINE-DEPENDENT, AND SHOULD NOT BE USED
+ * BY PORTABLE CODE.
+ */
+
+extern struct arm32_bus_dma_tag isa_bus_dma_tag;
+
+/* bus space tags */
+extern struct bus_space isa_io_bs_tag;
+extern struct bus_space isa_mem_bs_tag;
+
+/* ISA chipset */
+extern struct arm32_isa_chipset isa_chipset_tag;
+
+/* for pccons.c */
+#define MONO_BASE 0x3B4
+#define MONO_BUF 0x000B0000
+#define CGA_BASE 0x3D4
+#define CGA_BUF 0x000B8000
+#define VGA_BUF 0xA0000
+#define VGA_BUF_LEN (0xBFFFF - 0xA0000)
+
+void isa_init(vaddr_t, vaddr_t);
+void isa_io_init(vaddr_t, vaddr_t);
+void isa_dma_init(void);
+vaddr_t isa_io_data_vaddr(void);
+vaddr_t isa_mem_data_vaddr(void);
+int isa_intr_alloc(isa_chipset_tag_t ic, int mask, int type, int *irq);
+void isa_intr_init(void);
+
+/*
+ * Miscellanous functions.
+ */
+void sysbeep(int, int); /* beep with the system speaker */
+void isa_fillw(u_int val, void *addr, size_t len);
+
+#endif /* _ARM32_ISA_MACHDEP_H_ XXX */
--- /dev/null
+/* $NetBSD: isapnp_machdep.h,v 1.2 2008/04/28 20:23:14 martin Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center and by Christos Zoulas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Functions provided to machine-independent ISA PnP code.
+ */
+int isapnp_map(struct isapnp_softc *);
+void isapnp_unmap(struct isapnp_softc *);
+int isapnp_map_readport(struct isapnp_softc *);
+void isapnp_unmap_readport(struct isapnp_softc *);
--- /dev/null
+/* $NetBSD: kcore.h,v 1.1 2008/01/01 14:06:43 chris Exp $ */
+
+/*
+ * Copyright (c) 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Modified for NetBSD/i386 by Jason R. Thorpe, Numerical Aerospace
+ * Simulation Facility, NASA Ames Research Center.
+ */
+
+#ifndef _ARM_KCORE_H_
+#define _ARM_KCORE_H_
+
+typedef struct cpu_kcore_hdr {
+ uint32_t version; /* structure version */
+ uint32_t flags; /* flags */
+#define KCORE_ARM_APX 0x0001 /* L1 tables are in APX
+ format */
+ uint32_t PAKernelL1Table; /* PA of kernel L1 table */
+ uint32_t PAUserL1Table; /* PA of userland L1 table */
+ uint16_t UserL1TableSize; /* size of User L1 table */
+ uint32_t nmemsegs; /* Number of RAM segments */
+ uint32_t omemsegs; /* offset to memsegs */
+
+ /*
+ * future versions will add fields here.
+ */
+#if 0
+ phys_ram_seg_t memsegs[]; /* RAM segments */
+#endif
+} cpu_kcore_hdr_t;
+
+#endif /* _ARM_KCORE_H_ */
#endif
#if defined(_KERNEL)
-static __inline int
-__swp(int __val, volatile unsigned char *__ptr)
+static __inline __cpu_simple_lock_t
+__swp(__cpu_simple_lock_t __val, volatile __cpu_simple_lock_t *__ptr)
{
-
+#ifdef _ARM_ARCH_6
+ __cpu_simple_lock_t __rv, __tmp;
+ if (sizeof(*__ptr) == 1) {
+ __asm volatile(
+ "1:\t"
+ "ldrexb\t%[__rv], [%[__ptr]]" "\n\t"
+ "cmp\t%[__rv],%[__val]" "\n\t"
+ "strexbne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t"
+ "cmpne\t%[__tmp], #0" "\n\t"
+ "bne\t1b" "\n\t"
+#ifdef _ARM_ARCH_7
+ "dmb"
+#else
+ "mcr\tp15, 0, %[__tmp], c7, c10, 5"
+#endif
+ : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp)
+ : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
+ } else {
+ __asm volatile(
+ "1:\t"
+ "ldrex\t%[__rv], [%[__ptr]]" "\n\t"
+ "cmp\t%[__rv],%[__val]" "\n\t"
+ "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t"
+ "cmpne\t%[__tmp], #0" "\n\t"
+ "bne\t1b" "\n\t"
+#ifdef _ARM_ARCH_7
+ "nop"
+#else
+ "mcr\tp15, 0, %[__tmp], c7, c10, 5"
+#endif
+ : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp)
+ : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
+ }
+ return __rv;
+#else
__asm volatile("swpb %0, %1, [%2]"
: "=&r" (__val) : "r" (__val), "r" (__ptr) : "memory");
return __val;
+#endif
}
#else
+/*
+ * On Cortex-A9 (SMP), SWP no longer guarantees atomic results. Thus we pad
+ * out SWP so that when the A9 generates an undefined exception we can replace
+ * the SWP/MOV instructions with the right LDREX/STREX instructions.
+ *
+ * This is why we force the SWP into the template needed for LDREX/STREX
+ * including the extra instructions and extra register for testing the result.
+ */
static __inline int
__swp(int __val, volatile int *__ptr)
{
-
- __asm volatile("swp %0, %1, [%2]"
- : "=&r" (__val) : "r" (__val), "r" (__ptr) : "memory");
- return __val;
+ int __rv, __tmp;
+ __asm volatile(
+ "1:\t"
+#ifdef _ARM_ARCH_6
+ "ldrex\t%[__rv], [%[__ptr]]" "\n\t"
+ "cmp\t%[__rv],%[__val]" "\n\t"
+ "strexne\t%[__tmp], %[__val], [%[__ptr]]" "\n\t"
+#else
+ "swp\t%[__rv], %[__val], [%[__ptr]]" "\n\t"
+ "cmp\t%[__rv],%[__val]" "\n\t"
+ "movs\t%[__tmp], #0" "\n\t"
+#endif
+ "cmpne\t%[__tmp], #0" "\n\t"
+ "bne\t1b" "\n\t"
+#ifdef _ARM_ARCH_7
+ "dmb"
+#elif defined(_ARM_ARCH_6)
+ "mcr\tp15, 0, %[__tmp], c7, c10, 5"
+#else
+ "nop"
+#endif
+ : [__rv] "=&r" (__rv), [__tmp] "=&r"(__tmp)
+ : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory");
+ return __rv;
}
#endif /* _KERNEL */
--- /dev/null
+/* $NetBSD: netbsd32_machdep.h,v 1.1 2012/08/03 07:59:23 matt Exp $ */
+
+/*-
+ * Copyright (c) 2012 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas <matt@3am-software.com>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_NETBSD32_H_
+#define _ARM_NETBSD32_H_
+
+#include <sys/types.h>
+
+#define NETBSD32_POINTER_TYPE uint32_t
+typedef struct { NETBSD32_POINTER_TYPE i32; } netbsd32_pointer_t;
+
+#ifdef __ARM_EABI__
+#define NETBSD32_INT64_ALIGN __attribute__((__aligned__(4)))
+#else
+#define NETBSD32_INT64_ALIGN __attribute__((__aligned__(8)))
+#endif
+
+typedef netbsd32_pointer_t netbsd32_sigcontextp_t;
+
+/*
+ * The sigcode is ABI neutral.
+ */
+#define netbsd32_sigcode sigcode
+#define netbsd32_esigcode esigcode
+
+/*
+ * Note: syscall_intern and setregs do not care about COMPAT_NETBSD32.
+ */
+#define netbsd32_syscall_intern syscall_intern
+#define netbsd32_setregs setregs
+
+#define VM_MAXUSER_ADDRESS32 VM_MAXUSER_ADDRESS
+#define NETBSD32_MID_MACHINE MID_MACHINE
+#define USRSTACK32 USRSTACK
+#define MAXTSIZ32 MAXTSIZ
+#define DFLDSIZ32 DFLDSIZ
+#define MAXDSIZ32 MAXDSIZ
+#define DFLSSIZ32 DFLSSIZ
+#define MAXSSIZ32 MAXSSIZ
+
+#endif /* _ARM_NETBSD32_H_ */
--- /dev/null
+/* $NetBSD: ofisa_machdep.h,v 1.2 2001/05/30 12:28:40 mrg Exp $ */
+
+/*
+ * Copyright 1998
+ * Digital Equipment Corporation. All rights reserved.
+ *
+ * This software is furnished under license and may be used and
+ * copied only in accordance with the following terms and conditions.
+ * Subject to these conditions, you may download, copy, install,
+ * use, modify and distribute this software in source and/or binary
+ * form. No title or ownership is transferred hereby.
+ *
+ * 1) Any source code used, modified or distributed must reproduce
+ * and retain this copyright notice and list of conditions as
+ * they appear in the source file.
+ *
+ * 2) No right is granted to use any trade name, trademark, or logo of
+ * Digital Equipment Corporation. Neither the "Digital Equipment
+ * Corporation" name nor any trademark or logo of Digital Equipment
+ * Corporation may be used to endorse or promote products derived
+ * from this software without the prior written permission of
+ * Digital Equipment Corporation.
+ *
+ * 3) This software is provided "AS-IS" and any express or implied
+ * warranties, including but not limited to, any implied warranties
+ * of merchantability, fitness for a particular purpose, or
+ * non-infringement are disclaimed. In no event shall DIGITAL be
+ * liable for any damages whatsoever, and in particular, DIGITAL
+ * shall not be liable for special, indirect, consequential, or
+ * incidental damages or damages for lost profits, loss of
+ * revenue or loss of use, whether such damages arise in contract,
+ * negligence, tort, under statute, in equity, at law or otherwise,
+ * even if advised of the possibility of such damage.
+ */
+
+int ofisa_get_isabus_data(int, struct isabus_attach_args *);
+int ofisa_ignore_child(int pphandle, int cphandle);
+
+#if defined(_KERNEL_OPT)
+#include "opt_compat_old_ofw.h"
+#endif
+
+#ifdef COMPAT_OLD_OFW
+
+#define _OFISA_MD_MATCH
+int ofisa_md_match(struct device *, struct cfdata *, void *);
+
+#define _COM_OFISA_MD_MATCH
+#define _COM_OFISA_MD_INTR_FIXUP
+int com_ofisa_md_match(struct device *, struct cfdata *, void *);
+int com_ofisa_md_intr_fixup(struct device *, struct device*, void *,
+ struct ofisa_intr_desc *, int, int);
+
+#define _CS_OFISA_MD_MATCH
+#define _CS_OFISA_MD_REG_FIXUP
+#define _CS_OFISA_MD_INTR_FIXUP
+#define _CS_OFISA_MD_DMA_FIXUP
+#define _CS_OFISA_MD_MEDIA_FIXUP
+int cs_ofisa_md_match(struct device *, struct cfdata *, void *);
+int cs_ofisa_md_reg_fixup(struct device *, struct device *, void *,
+ struct ofisa_reg_desc *, int, int);
+int cs_ofisa_md_intr_fixup(struct device *, struct device *, void *,
+ struct ofisa_intr_desc *, int, int);
+int cs_ofisa_md_dma_fixup(struct device *, struct device *, void *,
+ struct ofisa_dma_desc *, int, int);
+int *cs_ofisa_md_media_fixup(struct device *, struct device *, void *,
+ int *, int *, int *);
+
+#define _LPT_OFISA_MD_MATCH
+#define _LPT_OFISA_MD_INTR_FIXUP
+int lpt_ofisa_md_match(struct device *, struct cfdata *, void *);
+int lpt_ofisa_md_intr_fixup(struct device *, struct device*, void *,
+ struct ofisa_intr_desc *, int, int);
+
+#define _WDC_OFISA_MD_MATCH
+#define _WDC_OFISA_MD_INTR_FIXUP
+int wdc_ofisa_md_match(struct device *, struct cfdata *, void *);
+int wdc_ofisa_md_intr_fixup(struct device *, struct device*, void *,
+ struct ofisa_intr_desc *, int, int);
+
+#endif /* COMPAT_OLD_OFW */
+
+/* The following aren't dependent on old OpenFirmware. */
+#define _CS_OFISA_MD_CFGFLAGS_FIXUP
+int cs_ofisa_md_cfgflags_fixup(struct device *, struct device *,
+ void *);
--- /dev/null
+/* $NetBSD: ofw.h,v 1.3 2009/03/14 14:45:55 dsl Exp $ */
+
+/*
+ * Copyright 1997
+ * Digital Equipment Corporation. All rights reserved.
+ *
+ * This software is furnished under license and may be used and
+ * copied only in accordance with the following terms and conditions.
+ * Subject to these conditions, you may download, copy, install,
+ * use, modify and distribute this software in source and/or binary
+ * form. No title or ownership is transferred hereby.
+ *
+ * 1) Any source code used, modified or distributed must reproduce
+ * and retain this copyright notice and list of conditions as
+ * they appear in the source file.
+ *
+ * 2) No right is granted to use any trade name, trademark, or logo of
+ * Digital Equipment Corporation. Neither the "Digital Equipment
+ * Corporation" name nor any trademark or logo of Digital Equipment
+ * Corporation may be used to endorse or promote products derived
+ * from this software without the prior written permission of
+ * Digital Equipment Corporation.
+ *
+ * 3) This software is provided "AS-IS" and any express or implied
+ * warranties, including but not limited to, any implied warranties
+ * of merchantability, fitness for a particular purpose, or
+ * non-infringement are disclaimed. In no event shall DIGITAL be
+ * liable for any damages whatsoever, and in particular, DIGITAL
+ * shall not be liable for special, indirect, consequential, or
+ * incidental damages or damages for lost profits, loss of
+ * revenue or loss of use, whether such damages arise in contract,
+ * negligence, tort, under statute, in equity, at law or otherwise,
+ * even if advised of the possibility of such damage.
+ */
+
+#ifndef _MACHINE_OFW_H_
+#define _MACHINE_OFW_H_
+
+
+/* Virtual address range reserved for OFW. */
+/* Maybe this should be elsewhere? -JJK */
+#define OFW_VIRT_BASE 0xF7000000
+#define OFW_VIRT_SIZE 0x01000000
+
+
+/* OFW client services handle. */
+typedef int (*ofw_handle_t)(void *);
+
+
+/* Implemented in <ofw/ofw.c> */
+void ofw_init(ofw_handle_t);
+void ofw_boot(int, char *);
+void ofw_getbootinfo(char **, char **);
+void ofw_configmem(void);
+void ofw_configisa(vm_offset_t *, vm_offset_t *);
+void ofw_configisadma(vm_offset_t *);
+int ofw_isadmarangeintersect(vm_offset_t, vm_offset_t,
+ vm_offset_t *, vm_offset_t *);
+vm_offset_t ofw_gettranslation(vm_offset_t);
+vm_offset_t ofw_map(vm_offset_t, vm_size_t, int);
+vm_offset_t ofw_getcleaninfo(void);
+
+#ifdef OFWGENCFG
+/* Implemented in <ofw/ofwgencfg_machdep.c> */
+extern int ofw_handleticks;
+extern void cpu_reboot(int, char *);
+extern void ofrootfound(void);
+#endif
+
+#endif /* !_MACHINE_OFW_H_ */
--- /dev/null
+/* pcb.h,v 1.14.22.2 2007/11/06 23:15:05 matt Exp */
+
+/*
+ * Copyright (c) 2001 Matt Thomas <matt@3am-software.com>.
+ * Copyright (c) 1994 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the RiscBSD team.
+ * 4. The name "RiscBSD" nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM_PCB_H_
+#define _ARM_PCB_H_
+
+#include <machine/frame.h>
+#include <machine/fp.h>
+
+#include <arm/arm32/pte.h>
+#include <arm/reg.h>
+
+struct pcb_arm32 {
+ /*
+ * WARNING!
+ * cpuswitchto.S relies on pcb32_r8 being quad-aligned in struct pcb
+ * (due to the use of "strd" when compiled for XSCALE)
+ */
+ u_int pcb32_r8 __aligned(8); /* used */
+ u_int pcb32_r9; /* used */
+ u_int pcb32_r10; /* used */
+ u_int pcb32_r11; /* used */
+ u_int pcb32_r12; /* used */
+ u_int pcb32_sp; /* used */
+ u_int pcb32_lr;
+ u_int pcb32_pc;
+
+ /*
+ * ARMv6 has two user thread/process id registers which can hold
+ * any 32bit quanttiies.
+ */
+ u_int pcb32_user_pid_rw; /* p15, 0, Rd, c13, c0, 2 */
+ u_int pcb32_user_pid_ro; /* p15, 0, Rd, c13, c0, 3 */
+};
+#define pcb_pagedir pcb_un.un_32.pcb32_pagedir
+#define pcb_pl1vec pcb_un.un_32.pcb32_pl1vec
+#define pcb_l1vec pcb_un.un_32.pcb32_l1vec
+#define pcb_dacr pcb_un.un_32.pcb32_dacr
+#define pcb_cstate pcb_un.un_32.pcb32_cstate
+#define pcb_user_pid_rw pcb_un.un_32.pcb32_user_pid_rw
+#ifdef __PROG32
+#define pcb_sp pcb_un.un_32.pcb32_sp
+#endif
+
+struct pcb_arm26 {
+ struct switchframe *pcb26_sf;
+};
+#define pcb_sf pcb_un.un_26.pcb26_sf
+#ifdef __PROG26
+#define pcb_sp pcb_sf.sf_r13
+#endif
+
+/*
+ * WARNING!
+ * See warning for struct pcb_arm32, above, before changing struct pcb!
+ */
+struct pcb {
+ union {
+ struct pcb_arm32 un_32;
+ struct pcb_arm26 un_26;
+ } pcb_un;
+ void * pcb_onfault; /* On fault handler */
+ struct fpe_sp_state pcb_fpstate; /* FPA Floating Point state */
+ struct vfpreg pcb_vfp; /* VFP registers */
+};
+#define pcb_ff pcb_fpstate /* for arm26 */
+
+/*
+ * No additional data for core dumps.
+ */
+struct md_coredump {
+ int md_empty;
+};
+
+#endif /* _ARM_PCB_H_ */
--- /dev/null
+/* $NetBSD: pci_machdep.h,v 1.9 2012/09/06 02:02:03 matt Exp $ */
+
+/*
+ * Modified for arm32 by Mark Brinicombe
+ *
+ * from: sys/arch/alpha/pci/pci_machdep.h
+ *
+ * Copyright (c) 1996 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#ifndef _ARM_PCI_MACHDEP_H_
+#define _ARM_PCI_MACHDEP_H_
+/*
+ * Machine-specific definitions for PCI autoconfiguration.
+ */
+
+/*
+ * Types provided to machine-independent PCI code
+ */
+typedef struct arm32_pci_chipset *pci_chipset_tag_t;
+typedef u_long pcitag_t;
+typedef u_long pci_intr_handle_t;
+
+/*
+ * Forward declarations.
+ */
+struct pci_attach_args;
+
+/*
+ * arm32-specific PCI structure and type definitions.
+ * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE.
+ */
+struct arm32_pci_chipset {
+ void *pc_conf_v;
+ void (*pc_attach_hook)(device_t, device_t,
+ struct pcibus_attach_args *);
+ int (*pc_bus_maxdevs)(void *, int);
+ pcitag_t (*pc_make_tag)(void *, int, int, int);
+ void (*pc_decompose_tag)(void *, pcitag_t, int *,
+ int *, int *);
+ pcireg_t (*pc_conf_read)(void *, pcitag_t, int);
+ void (*pc_conf_write)(void *, pcitag_t, int, pcireg_t);
+
+ void *pc_intr_v;
+ int (*pc_intr_map)(const struct pci_attach_args *,
+ pci_intr_handle_t *);
+ const char *(*pc_intr_string)(void *, pci_intr_handle_t);
+ const struct evcnt *(*pc_intr_evcnt)(void *, pci_intr_handle_t);
+ void *(*pc_intr_establish)(void *, pci_intr_handle_t,
+ int, int (*)(void *), void *);
+ void (*pc_intr_disestablish)(void *, void *);
+
+#ifdef __HAVE_PCI_CONF_HOOK
+ int (*pc_conf_hook)(void *, int, int, int, pcireg_t);
+#endif
+ void (*pc_conf_interrupt)(void *, int, int, int, int, int *);
+
+ uint32_t pc_cfg_cmd;
+};
+
+/*
+ * Functions provided to machine-independent PCI code.
+ */
+#define pci_attach_hook(p, s, pba) \
+ (*(pba)->pba_pc->pc_attach_hook)((p), (s), (pba))
+#define pci_bus_maxdevs(c, b) \
+ (*(c)->pc_bus_maxdevs)((c)->pc_conf_v, (b))
+#define pci_make_tag(c, b, d, f) \
+ (*(c)->pc_make_tag)((c)->pc_conf_v, (b), (d), (f))
+#define pci_decompose_tag(c, t, bp, dp, fp) \
+ (*(c)->pc_decompose_tag)((c)->pc_conf_v, (t), (bp), (dp), (fp))
+#define pci_conf_read(c, t, r) \
+ (*(c)->pc_conf_read)((c)->pc_conf_v, (t), (r))
+#define pci_conf_write(c, t, r, v) \
+ (*(c)->pc_conf_write)((c)->pc_conf_v, (t), (r), (v))
+#define pci_intr_map(pa, ihp) \
+ (*(pa)->pa_pc->pc_intr_map)((pa), (ihp))
+#define pci_intr_string(c, ih) \
+ (*(c)->pc_intr_string)((c)->pc_intr_v, (ih))
+#define pci_intr_evcnt(c, ih) \
+ (*(c)->pc_intr_evcnt)((c)->pc_intr_v, (ih))
+#define pci_intr_establish(c, ih, l, h, a) \
+ (*(c)->pc_intr_establish)((c)->pc_intr_v, (ih), (l), (h), (a))
+#define pci_intr_disestablish(c, iv) \
+ (*(c)->pc_intr_disestablish)((c)->pc_intr_v, (iv))
+#ifdef __HAVE_PCI_CONF_HOOK
+#define pci_conf_hook(c, b, d, f, id) \
+ (*(c)->pc_conf_hook)((c)->pc_conf_v, (b), (d), (f), (id))
+#endif
+#define pci_conf_interrupt(c, b, d, i, s, p) \
+ (*(c)->pc_conf_interrupt)((c)->pc_conf_v, (b), (d), (i), (s), (p))
+
+#endif /* _ARM_PCI_MACHDEP_H_ */
--- /dev/null
+/* $NetBSD: pio.h,v 1.2 2011/07/01 21:20:33 dyoung Exp $ */
+
+/*
+ * Copyright 1997
+ * Digital Equipment Corporation. All rights reserved.
+ *
+ * This software is furnished under license and may be used and
+ * copied only in accordance with the following terms and conditions.
+ * Subject to these conditions, you may download, copy, install,
+ * use, modify and distribute this software in source and/or binary
+ * form. No title or ownership is transferred hereby.
+ *
+ * 1) Any source code used, modified or distributed must reproduce
+ * and retain this copyright notice and list of conditions as
+ * they appear in the source file.
+ *
+ * 2) No right is granted to use any trade name, trademark, or logo of
+ * Digital Equipment Corporation. Neither the "Digital Equipment
+ * Corporation" name nor any trademark or logo of Digital Equipment
+ * Corporation may be used to endorse or promote products derived
+ * from this software without the prior written permission of
+ * Digital Equipment Corporation.
+ *
+ * 3) This software is provided "AS-IS" and any express or implied
+ * warranties, including but not limited to, any implied warranties
+ * of merchantability, fitness for a particular purpose, or
+ * non-infringement are disclaimed. In no event shall DIGITAL be
+ * liable for any damages whatsoever, and in particular, DIGITAL
+ * shall not be liable for special, indirect, consequential, or
+ * incidental damages or damages for lost profits, loss of
+ * revenue or loss of use, whether such damages arise in contract,
+ * negligence, tort, under statute, in equity, at law or otherwise,
+ * even if advised of the possibility of such damage.
+ */
+
+#ifndef _ARM32_PIO_H_
+#define _ARM32_PIO_H_
+
+#include <sys/bus.h>
+
+extern struct bus_space isa_io_bs_tag;
+
+#define inb(port) bus_space_read_1( &isa_io_bs_tag, (bus_space_handle_t)isa_io_bs_tag.bs_cookie, (port))
+#define outb(port, byte) bus_space_write_1(&isa_io_bs_tag, (bus_space_handle_t)isa_io_bs_tag.bs_cookie, (port), (byte))
+
+#endif /* _ARM32_PIO_H_ */
--- /dev/null
+/* $NetBSD: pmc.h,v 1.3 2002/08/09 05:27:10 thorpej Exp $ */
+
+/*
+ * Copyright (c) 2002 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Allen Briggs for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARM_PMC_H_
+#define _ARM_PMC_H_
+
+#define PMC_CLASS_I80200 0x10000 /* i80200-compatible */
+#define PMC_TYPE_I80200_CCNT 0x10001 /* cycle counter */
+#define PMC_TYPE_I80200_PMCx 0x10002 /* performance counter */
+
+#if defined(_KERNEL)
+
+#include <arm/cpuconf.h>
+
+struct arm_pmc_funcs {
+ void (*fork)(struct proc *p1, struct proc *p2);
+ int (*num_counters)(void);
+ int (*counter_type)(int ctr);
+ void (*save_context)(struct proc *p);
+ void (*restore_context)(struct proc *p);
+ void (*enable_counter)(struct proc *p, int ctr);
+ void (*disable_counter)(struct proc *p, int ctr);
+ void (*accumulate)(struct proc *parent, struct proc *child);
+ void (*process_exit)(struct proc *p);
+ int (*configure_counter)(struct proc *p, int ctr, struct pmc_counter_cfg *cfg);
+ int (*get_counter_val)(struct proc *p, int ctr, int flags, uint64_t *pval);
+ int (*counter_isconfigured)(struct proc *p, int ctr);
+ int (*counter_isrunning)(struct proc *p, int ctr);
+ int (*start_profiling)(int ctr, struct pmc_counter_cfg *cfg);
+ int (*stop_profiling)(int ctr);
+ int (*alloc_kernel_ctr)(int ctr, struct pmc_counter_cfg *cfg);
+ int (*free_kernel_ctr)(int ctr);
+};
+extern struct arm_pmc_funcs *arm_pmc;
+
+#define pmc_md_fork(p1,p2) (arm_pmc->fork((p1),(p2)))
+#define pmc_get_num_counters() (arm_pmc->num_counters())
+#define pmc_get_counter_type(c) (arm_pmc->counter_type(c))
+#define pmc_save_context(p) (arm_pmc->save_context(p))
+#define pmc_restore_context(p) (arm_pmc->restore_context(p))
+#define pmc_enable_counter(p,c) (arm_pmc->enable_counter((p),(c)))
+#define pmc_disable_counter(p,c) (arm_pmc->disable_counter((p),(c)))
+#define pmc_accumulate(p1,p2) (arm_pmc->accumulate((p1),(p2)))
+#define pmc_process_exit(p1) (arm_pmc->process_exit(p))
+#define pmc_counter_isconfigured(p,c) (arm_pmc->counter_isconfigured((p),(c)))
+#define pmc_counter_isrunning(p,c) (arm_pmc->counter_isrunning((p),(c)))
+#define pmc_start_profiling(c,f) (arm_pmc->start_profiling((c),(f)))
+#define pmc_stop_profiling(c) (arm_pmc->stop_profiling((c)))
+#define pmc_alloc_kernel_counter(c,f) (arm_pmc->alloc_kernel_ctr((c),(f)))
+#define pmc_free_kernel_counter(c) (arm_pmc->free_kernel_ctr((c)))
+#define pmc_configure_counter(p,c,f) \
+ (arm_pmc->configure_counter((p),(c),(f)))
+#define pmc_get_counter_value(p,c,f,pv) \
+ (arm_pmc->get_counter_val((p),(c),(f),(pv)))
+
+#define PMC_ENABLED(p) (p)->p_md.pmc_enabled
+
+#endif /* defined(_KERNEL) */
+
+#endif /* _ARM_PMC_H_ */
--- /dev/null
+/* $NetBSD: reg.h,v 1.2 2008/03/15 10:16:43 rearnsha Exp $ */
+
+/*
+ * Copyright (C) 1994, 1995 Frank Lancaster
+ * Copyright (C) 1994, 1995 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * @(#)reg.h 5.5 (Berkeley) 1/18/91
+ */
+
+#ifndef _ARM32_REG_H_
+#define _ARM32_REG_H_
+
+#include <machine/fp.h>
+
+struct reg {
+ unsigned int r[13];
+ unsigned int r_sp;
+ unsigned int r_lr;
+ unsigned int r_pc;
+ unsigned int r_cpsr;
+};
+
+struct fpreg {
+ unsigned int fpr_fpsr;
+ fp_reg_t fpr[8];
+};
+
+struct vfpreg {
+ uint32_t vfp_fpexc;
+ uint32_t vfp_fpscr;
+ uint32_t vfp_fpinst;
+ uint32_t vfp_fpinst2;
+ uint32_t vfp_regs[33]; /* In case we need fstmx format. */
+};
+
+#endif /* !_ARM32_REG_H_ */
--- /dev/null
+/* $NetBSD: swi.h,v 1.1 2002/01/13 15:03:06 bjh21 Exp $ */
+
+/*
+ * This file is in the Public Domain.
+ * Ben Harris, 2002.
+ */
+
+#ifndef _ARM_SWI_H_
+#define _ARM_SWI_H_
+
+#define SWI_OS_MASK 0xf00000
+#define SWI_OS_RISCOS 0x000000
+#define SWI_OS_RISCIX 0x800000
+#define SWI_OS_LINUX 0x900000
+#define SWI_OS_NETBSD 0xa00000
+#define SWI_OS_ARM 0xf00000
+
+#define SWI_IMB 0xf00000
+#define SWI_IMBrange 0xf00001
+
+#endif
+
--- /dev/null
+/* $NetBSD: sysarch.h,v 1.8 2012/08/12 05:05:47 matt Exp $ */
+
+/*
+ * Copyright (c) 1996-1997 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _ARM_SYSARCH_H_
+#define _ARM_SYSARCH_H_
+
+#include <sys/cdefs.h>
+
+/*
+ * Pickup definition of uintptr_t
+ */
+#include <sys/stdint.h>
+
+/*
+ * Architecture specific syscalls (arm)
+ */
+
+#define ARM_SYNC_ICACHE 0
+#define ARM_DRAIN_WRITEBUF 1
+#define ARM_VFP_FPSCR 2
+
+struct arm_sync_icache_args {
+ uintptr_t addr; /* Virtual start address */
+ size_t len; /* Region size */
+};
+
+struct arm_vfp_fpscr_args {
+ uint32_t fpscr_clear; /* bits to clear */
+ uint32_t fpscr_set; /* bits to set */
+};
+
+#ifndef _KERNEL
+__BEGIN_DECLS
+int arm_sync_icache(u_int addr, int len);
+int arm_drain_writebuf(void);
+int sysarch(int, void *);
+__END_DECLS
+#endif
+
+#endif /* !_ARM_SYSARCH_H_ */
--- /dev/null
+/* $NetBSD: trap.h,v 1.8 2008/01/19 13:11:09 chris Exp $ */
+
+/*
+ * Copyright (c) 1995 Mark Brinicombe.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Mark Brinicombe.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * trap.h
+ *
+ * Various trap definitions
+ */
+
+/*
+ * Instructions used for breakpoints.
+ *
+ * These are undefined instructions.
+ * Technically the userspace breakpoint could be a SWI but we want to
+ * keep this the same as IPKDB which needs an undefined instruction as
+ * a break point.
+ *
+ * Ideally ARM would define several standard instruction sequences for
+ * use as breakpoints.
+ *
+ * The BKPT instruction isn't much use to us, since its behaviour is
+ * unpredictable on ARMv3 and lower.
+ *
+ * The ARM ARM says that for maximum compatibility, we should use undefined
+ * instructions that look like 0x.7f...f. .
+ */
+
+#define GDB_BREAKPOINT 0xe6000011 /* Used by GDB 4.x */
+#define IPKDB_BREAKPOINT_DEAD 0xe6000010 /* was used by IPKDB */
+#define GDB5_BREAKPOINT 0xe7ffdefe /* Used by GDB 5.0 */
+#define GDB_THUMB_BREAKPOINT 0xdefe /* Thumb in GDB */
+#define KERNEL_BREAKPOINT 0xe7ffffff /* Used by DDB */
+
+#define KBPT_ASM ".word 0xe7ffdefe"
+
+#define USER_BREAKPOINT GDB_BREAKPOINT
+
+/* End of trap.h */
--- /dev/null
+/* $NetBSD: undefined.h,v 1.12 2009/03/14 14:45:55 dsl Exp $ */
+
+/*
+ * Copyright (c) 1995-1996 Mark Brinicombe.
+ * Copyright (c) 1995 Brini.
+ * All rights reserved.
+ *
+ * This code is derived from software written for Brini by Mark Brinicombe
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Brini.
+ * 4. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * RiscBSD kernel project
+ *
+ * undefined.h
+ *
+ * Undefined instruction types, symbols and prototypes
+ *
+ * Created : 08/02/95
+ */
+
+
+#ifndef _ARM_UNDEFINED_H_
+#define _ARM_UNDEFINED_H_
+#ifdef _KERNEL
+
+#include <sys/queue.h>
+
+typedef int (*undef_handler_t)(unsigned int, unsigned int, trapframe_t *, int);
+
+/*
+ * Enumeration of coprocessor numbers. Values may be duplicated
+ * (the iWMMX coprocessor clashes with the FPA, for example), but
+ * keep this table in numeric order.
+ */
+enum arm_coprocs {
+ FPA_COPROC = 1,
+ FPA_COPROC2 = 2,
+ VFP_COPROC = 10,
+ VFP_COPROC2 = 11,
+ DEBUG_COPROC = 14,
+ SYSTEM_COPROC = 15,
+ /*
+ *The following are not really co-processors, but are on the end
+ * of the unknown instruction table for each coproc.
+ */
+ CORE_UNKNOWN_HANDLER = 16,
+#ifdef THUMB_CODE
+ THUMB_UNKNOWN_HANDLER = 17,
+#endif
+ NUM_UNKNOWN_HANDLERS /* Last entry */
+};
+
+/* Prototypes for undefined.c */
+
+void *install_coproc_handler(int, undef_handler_t);
+void remove_coproc_handler(void *);
+void undefined_init(void);
+
+/*
+ * XXX Stuff below here is for use before malloc() is available. Most code
+ * shouldn't use it.
+ */
+
+struct undefined_handler {
+ LIST_ENTRY(undefined_handler) uh_link;
+ undef_handler_t uh_handler;
+};
+
+/*
+ * Handlers installed using install_coproc_handler_static shouldn't be
+ * removed.
+ */
+void install_coproc_handler_static(int, struct undefined_handler *);
+
+/* Calls up to undefined.c from trap handlers */
+void undefinedinstruction(struct trapframe *);
+
+#endif
+
+/* End of undefined.h */
+
+#endif /* _ARM_UNDEFINED_H_ */
--- /dev/null
+/* $NetBSD: vfpreg.h,v 1.6 2012/09/22 19:45:53 matt Exp $ */
+
+/*
+ * Copyright (c) 2008 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+ * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VFPREG_H
+#define _VFPREG_H
+
+/* FPSID regsiter */
+
+#define VFP_FPSID_IMP_MSK 0xff000000 /* Implementer */
+#define VFP_FPSID_IMP_ARM 0x41000000 /* Implementer: ARM */
+#define VFP_FPSID_SW 0x00800000 /* VFP implemented in SW */
+#define VFP_FPSID_FMT_MSK 0x00600000 /* FLDMX/FSTMX Format */
+#define VFP_FPSID_FMT_1 0x00000000 /* Standard format 1 */
+#define VFP_FPSID_FMT_2 0x00200000 /* Standard format 2 */
+#define VFP_FPSID_FMT_WEIRD 0x00600000 /* Non-standard format */
+#define VFP_FPSID_SP 0x00100000 /* Only single precision */
+#define VFP_FPSID_ARCH_MSK 0x000f0000 /* Architecture */
+#define VFP_FPSID_ARCH_V1 0x00000000 /* Arch VFPv1 */
+#define VFP_FPSID_ARCH_V2 0x00010000 /* Arch VFPv2 */
+#define VFP_FPSID_ARCH_V3_2 0x00020000 /* Arch VFPv3 (subarch v2) */
+#define VFP_FPSID_ARCH_V3 0x00030000 /* Arch VFPv3 (no subarch) */
+#define VFP_FPSID_ARCH_V3_3 0x00040000 /* Arch VFPv3 (subarch v3) */
+#define VFP_FPSID_PART_MSK 0x0000ff00 /* Part number */
+#define VFP_FPSID_PART_VFP10 0x00001000 /* VFP10 */
+#define VFP_FPSID_PART_VFP11 0x00002000 /* VFP11 */
+#define VFP_FPSID_PART_VFP30 0x00003000 /* VFP30 */
+#define VFP_FPSID_VAR_MSK 0x000000f0 /* Variant */
+#define VFP_FPSID_VAR_ARM10 0x000000a0 /* Variant ARM10 */
+#define VFP_FPSID_VAR_ARM11 0x000000b0 /* Variant ARM11 */
+#define VFP_FPSID_REV_MSK 0x0000000f /* Revision */
+
+#define FPU_VFP10_ARM10E 0x410001a0 /* Really a VFPv2 part */
+#define FPU_VFP11_ARM11 0x410120b0
+#define FPU_VFP_CORTEXA5 0x41023050
+#define FPU_VFP_CORTEXA7 0x41023070
+#define FPU_VFP_CORTEXA8 0x410330c0
+#define FPU_VFP_CORTEXA9 0x41033090
+
+#define VFP_FPEXC_EX 0x80000000 /* Exception status bit */
+#define VFP_FPEXC_EN 0x40000000 /* VFP Enable bit */
+#define VFP_FPEXC_FP2V 0x10000000 /* FPINST2 instruction valid */
+#define VFP_FPEXC_VECITR 0x00000700 /* Vector iteration count */
+#define VFP_FPEXC_INV 0x00000080 /* Input exception flag */
+#define VFP_FPEXC_UFC 0x00000080 /* Potential underflow flag */
+#define VFP_FPEXC_OFC 0x00000080 /* Potential overflow flag */
+#define VFP_FPEXC_IOC 0x00000080 /* Potential inv. op. flag */
+
+#define VFP_FPSCR_N 0x80000000 /* set if compare <= result */
+#define VFP_FPSCR_Z 0x40000000 /* set if compare = result */
+#define VFP_FPSCR_C 0x20000000 /* set if compare (=,>=,UNORD) result */
+#define VFP_FPSCR_V 0x10000000 /* set if compare UNORD result */
+#define VFP_FPSCR_DN 0x02000000 /* Default NaN mode */
+#define VFP_FPSCR_FZ 0x01000000 /* Flush-to-zero mode */
+#define VFP_FPSCR_RMODE 0x00c00000 /* Rounding Mode */
+#define VFP_FPSCR_RZ 0x00c00000 /* round towards zero (RZ) */
+#define VFP_FPSCR_RM 0x00800000 /* round towards +INF (RP) */
+#define VFP_FPSCR_RP 0x00400000 /* round towards -INF (RM) */
+#define VFP_FPSCR_RN 0x00000000 /* round to nearest (RN) */
+#define VFP_FPSCR_STRIDE 0x00300000 /* Vector Stride */
+#define VFP_FPSCR_LEN 0x00070000 /* Vector Length */
+#define VFP_FPSCR_IDE 0x00008000 /* Inout Subnormal Exception Enable */
+#define VFP_FPSCR_ESUM 0x00001f00 /* IXE|UFE|OFE|DZE|IOE */
+#define VFP_FPSCR_IXE 0x00001000 /* Inexact Exception Enable */
+#define VFP_FPSCR_UFE 0x00000800 /* Underflow Exception Enable */
+#define VFP_FPSCR_OFE 0x00000400 /* Overflow Exception Enable */
+#define VFP_FPSCR_DZE 0x00000200 /* Inexact Exception Enable */
+#define VFP_FPSCR_IOE 0x00000100 /* Invalid Operation Cumulative Flag */
+#define VFP_FPSCR_IDC 0x00000080 /* Input Subnormal Cumlative Flag */
+#define VFP_FPSCR_CSUM 0x0000001f /* IXC|UFC|OFC|DZC|IOC */
+#define VFP_FPSCR_IXC 0x00000010 /* Inexact Cumulative Flag */
+#define VFP_FPSCR_UFC 0x00000008 /* Underflow Cumulative Flag */
+#define VFP_FPSCR_OFC 0x00000004 /* Overflow Cumulative Flag */
+#define VFP_FPSCR_DZC 0x00000002 /* DivByZero Cumulative Flag */
+#define VFP_FPSCR_IOC 0x00000001 /* Invalid Operation Cumulative Flag */
+
+#endif /* _VFPREG_H */