./usr/lib/libgcc_s.so minix-base
./usr/lib/libgcc_s.so.1 minix-base
./usr/lib/libgcc_s.so.1.0 minix-base
+./usr/lib/libkvm.so minix-base
+./usr/lib/libkvm.so.6 minix-base
+./usr/lib/libkvm.so.6.0 minix-base
./usr/lib/liblua.so minix-base
./usr/lib/liblua.so.1.0 minix-base obsolete
./usr/lib/liblua.so.1 minix-base obsolete
./usr/lib/bc/libhgfs.a minix-comp bitcode
./usr/lib/bc/libiberty.a minix-comp bitcode,binutils
./usr/lib/bc/libinputdriver.a minix-comp bitcode
+./usr/lib/bc/libkvm.a minix-comp bitcode
./usr/lib/bc/libl.a minix-comp bitcode
./usr/lib/bc/liblua.a minix-comp bitcode
./usr/lib/bc/liblwip.a minix-comp bitcode
./usr/lib/libgcov.a minix-comp gcc=45,!gcccmds
./usr/lib/libinputdriver.a minix-comp
./usr/lib/libinputdriver_pic.a minix-comp
+./usr/lib/libkvm.a minix-comp
+./usr/lib/libkvm_pic.a minix-comp
./usr/lib/libl.a minix-comp
./usr/lib/liblua.a minix-comp
./usr/lib/liblua_pic.a minix-comp
./usr/man/man3/keypad.3 minix-man
./usr/man/man3/killchar.3 minix-man
./usr/man/man3/killpg.3 minix-man
+./usr/man/man3/kvm.3 minix-man
+./usr/man/man3/kvm_close.3 minix-man
+./usr/man/man3/kvm_dump.3 minix-man
+./usr/man/man3/kvm_dump_inval.3 minix-man
+./usr/man/man3/kvm_dump_mkheader.3 minix-man
+./usr/man/man3/kvm_dump_wrtheader.3 minix-man
+./usr/man/man3/kvm_getargv.3 minix-man
+./usr/man/man3/kvm_getargv2.3 minix-man
+./usr/man/man3/kvm_getenvv.3 minix-man
+./usr/man/man3/kvm_getenvv2.3 minix-man
+./usr/man/man3/kvm_geterr.3 minix-man
+./usr/man/man3/kvm_getfiles.3 minix-man
+./usr/man/man3/kvm_getkernelname.3 minix-man
+./usr/man/man3/kvm_getloadavg.3 minix-man
+./usr/man/man3/kvm_getlwps.3 minix-man
+./usr/man/man3/kvm_getproc2.3 minix-man
+./usr/man/man3/kvm_getprocs.3 minix-man
+./usr/man/man3/kvm_nlist.3 minix-man
+./usr/man/man3/kvm_open.3 minix-man
+./usr/man/man3/kvm_openfiles.3 minix-man
+./usr/man/man3/kvm_read.3 minix-man
+./usr/man/man3/kvm_write.3 minix-man
./usr/man/man3/l64a.3 minix-man
./usr/man/man3/l64a_r.3 minix-man
./usr/man/man3/labs.3 minix-man
SUBDIR+= \
libbz2 \
libcrypt \
- libm \
+ libkvm libm \
libpci libprop \
libpuffs librmt \
libterminfo \
# closefrom.c confstr.c extattr.c \
# pthread_atfork.c
#
-# To be ported
-# nlist.c nlist_aout.c nlist_coff.c nlist_ecoff.c nlist_elf32.c nlist_elf64.c
-#
# Not useful but portable
# disklabel.c
getpass.c getprogname.c getpwent.c getttyent.c \
getusershell.c glob.c humanize_number.c initdir.c initgroups.c \
isascii.c isatty.c isctype.c lockf.c nftw.c \
- nice.c \
- opendir.c pause.c popen.c posix_spawn_sched.c \
+ nice.c nlist.c nlist_aout.c nlist_coff.c nlist_ecoff.c nlist_elf32.c \
+ nlist_elf64.c opendir.c pause.c popen.c posix_spawn_sched.c \
posix_spawn_fileactions.c posix_spawnp.c psignal.c \
ptree.c pwcache.c pw_scan.c raise.c randomid.c rb.c readdir.c \
rewinddir.c scandir.c seekdir.c setdomainname.c \
--- /dev/null
+# $NetBSD: Makefile,v 1.50 2014/08/10 23:39:08 matt Exp $
+# from: @(#)Makefile 8.1 (Berkeley) 6/4/93
+
+WARNS= 3
+
+USE_FORT?= yes # used primarily by setgid programs
+
+USE_SHLIBDIR= yes
+
+LIB= kvm
+CPPFLAGS+=-DLIBC_SCCS -I${NETBSDSRCDIR}/sys -D_KMEMUSER
+
+SRCS= kvm.c kvm_file.c kvm_getloadavg.c kvm_proc.c
+
+# This library should compile to the same thing on all variants of
+# ${MACHINE} with the same ${MACHINE_ARCH} so shared /usr works.
+# If it is necessary to run different code based on ${MACHINE} then
+# the dispatch on ${MACHINE} should be done at run time (see m68k).
+
+.include <bsd.own.mk>
+.if exists(kvm_${KVM_MACHINE_ARCH}.c)
+SRCS+= kvm_${KVM_MACHINE_ARCH}.c
+.elif exists(kvm_${KVM_MACHINE_CPU}.c)
+SRCS+= kvm_${KVM_MACHINE_CPU}.c
+.elif exists(kvm_${MACHINE_ARCH}.c)
+SRCS+= kvm_${MACHINE_ARCH}.c
+.elif exists(kvm_${MACHINE_CPU}.c)
+SRCS+= kvm_${MACHINE_CPU}.c
+.else
+.BEGIN:
+ @echo no kvm_xx.c for ${MACHINE_ARCH} nor ${MACHINE_CPU}
+ @false
+.endif
+
+.if ${MACHINE_ARCH} == "i386"
+LINTFLAGS+=-w
+SRCS+= kvm_i386pae.c # Hook PAE support in the i386 build
+.endif
+
+# Additional modules needed for m68k
+.if (${MACHINE_ARCH} == "m68k" || ${MACHINE_CPU} == "m68k")
+SRCS+= kvm_m68k_cmn.c kvm_sun2.c kvm_sun3.c kvm_sun3x.c
+.endif
+
+MAN= kvm.3 kvm_dump.3 kvm_geterr.3 kvm_getfiles.3 kvm_getloadavg.3 \
+ kvm_getkernelname.3 \
+ kvm_getprocs.3 kvm_nlist.3 kvm_open.3 kvm_read.3 kvm_getlwps.3
+
+MLINKS+=kvm_getprocs.3 kvm_getargv.3 kvm_getprocs.3 kvm_getenvv.3
+MLINKS+=kvm_getprocs.3 kvm_getproc2.3
+MLINKS+=kvm_getprocs.3 kvm_getargv2.3 kvm_getprocs.3 kvm_getenvv2.3
+MLINKS+=kvm_open.3 kvm_openfiles.3 kvm_open.3 kvm_close.3
+MLINKS+=kvm_read.3 kvm_write.3
+MLINKS+=kvm_dump.3 kvm_dump_mkheader.3 kvm_dump.3 kvm_dump_wrtheader.3
+MLINKS+=kvm_dump.3 kvm_dump_inval.3
+
+# This fixes building with MACHINE==sparc64, MACHINE_ARCH==sparc
+# XXX: this is a hack, but until the sparc MD headers are separated
+# out from the sparc AD headers, it's the easiest solution.
+#
+.if (${MACHINE_ARCH} == "sparc") # {
+.if !make(obj) && !make(clean) && !make(cleandir)
+.BEGIN:
+ @([ -h machine ] || \
+ ln -fs ${NETBSDSRCDIR}/sys/arch/sparc/include machine)
+ @([ -h sparc ] || ln -fs ${NETBSDSRCDIR}/sys/arch/sparc/include sparc)
+.NOPATH: machine sparc
+.endif
+CLEANFILES+= machine sparc
+CPPFLAGS+= -I.
+.endif # }
+
+
+.include <bsd.lib.mk>
--- /dev/null
+.\" $NetBSD: kvm.3,v 1.13 2011/09/13 08:53:15 wiz Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm.3 8.1 (Berkeley) 6/4/93
+.\"
+.Dd September 14, 2011
+.Dt KVM 3
+.Os
+.Sh NAME
+.Nm kvm
+.Nd kernel memory interface
+.Sh LIBRARY
+.Lb libkvm
+.Sh DESCRIPTION
+The
+.Nm
+library provides a uniform interface for accessing kernel virtual memory
+images, including live systems and crash dumps.
+Access to live systems is via
+.Pa /dev/mem
+while crash dumps can be examined via the core file generated by
+.Xr savecore 8 .
+The interface behaves identically in both cases.
+Memory can be read and written, kernel symbol addresses can be
+looked up efficiently, and information about user processes can
+be gathered.
+.Pp
+.Fn kvm_open
+is first called to obtain a descriptor for all subsequent calls.
+.Sh FILES
+.Bl -tag -width /dev/mem -compact
+.It Pa /dev/mem
+interface to physical memory
+.El
+.Sh COMPATIBILITY
+The kvm interface was first introduced in SunOS.
+A considerable number of programs have been developed that use this
+interface, making backward compatibility highly desirable.
+In most respects, the Sun kvm interface is consistent and clean.
+Accordingly, the generic portion of the interface (i.e.,
+.Fn kvm_open ,
+.Fn kvm_close ,
+.Fn kvm_read ,
+.Fn kvm_write ,
+and
+.Fn kvm_nlist )
+has been incorporated into the
+.Bx
+interface.
+Indeed, many kvm applications (i.e., debuggers and statistical monitors)
+use only this subset of the interface.
+.Pp
+The process interface was not kept.
+This is not a portability issue since any code that manipulates
+processes is inherently machine dependent.
+.Pp
+Finally, the Sun kvm error reporting semantics are poorly defined.
+The library can be configured either to print errors to stderr automatically,
+or to print no error messages at all.
+In the latter case, the nature of the error cannot be determined.
+To overcome this, the
+.Bx
+interface includes a routine,
+.Xr kvm_geterr 3 ,
+to return (not print out) the error message corresponding to the most
+recent error condition on the given descriptor.
+.Sh SEE ALSO
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getkernelname 3 ,
+.Xr kvm_getloadavg 3 ,
+.Xr kvm_getlwps 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
--- /dev/null
+/* $NetBSD: kvm.c,v 1.101 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm.c 8.2 (Berkeley) 2/13/94";
+#else
+__RCSID("$NetBSD: kvm.c,v 1.101 2014/02/19 20:21:22 dsl Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/param.h>
+#include <sys/lwp.h>
+#include <sys/proc.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+
+#include <sys/core.h>
+#include <sys/exec.h>
+#include <sys/kcore.h>
+#include <sys/ksyms.h>
+#include <sys/types.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/cpu.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <nlist.h>
+#include <paths.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <kvm.h>
+
+#include "kvm_private.h"
+
+static int _kvm_get_header(kvm_t *);
+static kvm_t *_kvm_open(kvm_t *, const char *, const char *,
+ const char *, int, char *);
+static int clear_gap(kvm_t *, bool (*)(void *, const void *, size_t),
+ void *, size_t);
+static off_t Lseek(kvm_t *, int, off_t, int);
+static ssize_t Pread(kvm_t *, int, void *, size_t, off_t);
+
+char *
+kvm_geterr(kvm_t *kd)
+{
+ return (kd->errbuf);
+}
+
+const char *
+kvm_getkernelname(kvm_t *kd)
+{
+ return kd->kernelname;
+}
+
+/*
+ * Report an error using printf style arguments. "program" is kd->program
+ * on hard errors, and 0 on soft errors, so that under sun error emulation,
+ * only hard errors are printed out (otherwise, programs like gdb will
+ * generate tons of error messages when trying to access bogus pointers).
+ */
+void
+_kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (program != NULL) {
+ (void)fprintf(stderr, "%s: ", program);
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fputc('\n', stderr);
+ } else
+ (void)vsnprintf(kd->errbuf,
+ sizeof(kd->errbuf), fmt, ap);
+
+ va_end(ap);
+}
+
+void
+_kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
+{
+ va_list ap;
+ size_t n;
+
+ va_start(ap, fmt);
+ if (program != NULL) {
+ (void)fprintf(stderr, "%s: ", program);
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, ": %s\n", strerror(errno));
+ } else {
+ char *cp = kd->errbuf;
+
+ (void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
+ n = strlen(cp);
+ (void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
+ strerror(errno));
+ }
+ va_end(ap);
+}
+
+void *
+_kvm_malloc(kvm_t *kd, size_t n)
+{
+ void *p;
+
+ if ((p = malloc(n)) == NULL)
+ _kvm_err(kd, kd->program, "%s", strerror(errno));
+ return (p);
+}
+
+/*
+ * Wrapper around the lseek(2) system call; calls _kvm_syserr() for us
+ * in the event of emergency.
+ */
+static off_t
+Lseek(kvm_t *kd, int fd, off_t offset, int whence)
+{
+ off_t off;
+
+ errno = 0;
+
+ if ((off = lseek(fd, offset, whence)) == -1 && errno != 0) {
+ _kvm_syserr(kd, kd->program, "Lseek");
+ return ((off_t)-1);
+ }
+ return (off);
+}
+
+ssize_t
+_kvm_pread(kvm_t *kd, int fd, void *buf, size_t size, off_t off)
+{
+ ptrdiff_t moff;
+ void *newbuf;
+ size_t dsize;
+ ssize_t rv;
+ off_t doff;
+
+ /* If aligned nothing to do. */
+ if (((off % kd->fdalign) | (size % kd->fdalign)) == 0) {
+ return pread(fd, buf, size, off);
+ }
+
+ /*
+ * Otherwise must buffer. We can't tolerate short reads in this
+ * case (lazy bum).
+ */
+ moff = (ptrdiff_t)off % kd->fdalign;
+ doff = off - moff;
+ dsize = moff + size + kd->fdalign - 1;
+ dsize -= dsize % kd->fdalign;
+ if (kd->iobufsz < dsize) {
+ newbuf = realloc(kd->iobuf, dsize);
+ if (newbuf == NULL) {
+ _kvm_syserr(kd, 0, "cannot allocate I/O buffer");
+ return (-1);
+ }
+ kd->iobuf = newbuf;
+ kd->iobufsz = dsize;
+ }
+ rv = pread(fd, kd->iobuf, dsize, doff);
+ if (rv < size + moff)
+ return -1;
+ memcpy(buf, kd->iobuf + moff, size);
+ return size;
+}
+
+/*
+ * Wrapper around the pread(2) system call; calls _kvm_syserr() for us
+ * in the event of emergency.
+ */
+static ssize_t
+Pread(kvm_t *kd, int fd, void *buf, size_t nbytes, off_t offset)
+{
+ ssize_t rv;
+
+ errno = 0;
+
+ if ((rv = _kvm_pread(kd, fd, buf, nbytes, offset)) != nbytes &&
+ errno != 0)
+ _kvm_syserr(kd, kd->program, "Pread");
+ return (rv);
+}
+
+static kvm_t *
+_kvm_open(kvm_t *kd, const char *uf, const char *mf, const char *sf, int flag,
+ char *errout)
+{
+ struct stat st;
+ int ufgiven;
+
+ kd->pmfd = -1;
+ kd->vmfd = -1;
+ kd->swfd = -1;
+ kd->nlfd = -1;
+ kd->alive = KVM_ALIVE_DEAD;
+ kd->procbase = NULL;
+ kd->procbase_len = 0;
+ kd->procbase2 = NULL;
+ kd->procbase2_len = 0;
+ kd->lwpbase = NULL;
+ kd->lwpbase_len = 0;
+ kd->nbpg = getpagesize();
+ kd->swapspc = NULL;
+ kd->argspc = NULL;
+ kd->argspc_len = 0;
+ kd->argbuf = NULL;
+ kd->argv = NULL;
+ kd->vmst = NULL;
+ kd->vm_page_buckets = NULL;
+ kd->kcore_hdr = NULL;
+ kd->cpu_dsize = 0;
+ kd->cpu_data = NULL;
+ kd->dump_off = 0;
+ kd->fdalign = 1;
+ kd->iobuf = NULL;
+ kd->iobufsz = 0;
+
+ if (flag & KVM_NO_FILES) {
+ kd->alive = KVM_ALIVE_SYSCTL;
+ return(kd);
+ }
+
+ /*
+ * Call the MD open hook. This sets:
+ * usrstack, min_uva, max_uva
+ */
+ if (_kvm_mdopen(kd)) {
+ _kvm_err(kd, kd->program, "md init failed");
+ goto failed;
+ }
+
+ ufgiven = (uf != NULL);
+ if (!ufgiven) {
+#ifdef CPU_BOOTED_KERNEL
+ /* 130 is 128 + '/' + '\0' */
+ static char booted_kernel[130];
+ int mib[2], rc;
+ size_t len;
+
+ mib[0] = CTL_MACHDEP;
+ mib[1] = CPU_BOOTED_KERNEL;
+ booted_kernel[0] = '/';
+ booted_kernel[1] = '\0';
+ len = sizeof(booted_kernel) - 2;
+ rc = sysctl(&mib[0], 2, &booted_kernel[1], &len, NULL, 0);
+ booted_kernel[sizeof(booted_kernel) - 1] = '\0';
+ uf = (booted_kernel[1] == '/') ?
+ &booted_kernel[1] : &booted_kernel[0];
+ if (rc != -1)
+ rc = stat(uf, &st);
+ if (rc != -1 && !S_ISREG(st.st_mode))
+ rc = -1;
+ if (rc == -1)
+#endif /* CPU_BOOTED_KERNEL */
+ uf = _PATH_UNIX;
+ }
+ else if (strlen(uf) >= MAXPATHLEN) {
+ _kvm_err(kd, kd->program, "exec file name too long");
+ goto failed;
+ }
+ if (flag & ~O_RDWR) {
+ _kvm_err(kd, kd->program, "bad flags arg");
+ goto failed;
+ }
+ if (mf == 0)
+ mf = _PATH_MEM;
+ if (sf == 0)
+ sf = _PATH_DRUM;
+
+ /*
+ * Open the kernel namelist. If /dev/ksyms doesn't
+ * exist, open the current kernel.
+ */
+ if (ufgiven == 0)
+ kd->nlfd = open(_PATH_KSYMS, O_RDONLY | O_CLOEXEC, 0);
+ if (kd->nlfd < 0) {
+ if ((kd->nlfd = open(uf, O_RDONLY | O_CLOEXEC, 0)) < 0) {
+ _kvm_syserr(kd, kd->program, "%s", uf);
+ goto failed;
+ }
+ strlcpy(kd->kernelname, uf, sizeof(kd->kernelname));
+ } else {
+ strlcpy(kd->kernelname, _PATH_KSYMS, sizeof(kd->kernelname));
+ /*
+ * We're here because /dev/ksyms was opened
+ * successfully. However, we don't want to keep it
+ * open, so we close it now. Later, we will open
+ * it again, since it will be the only case where
+ * kd->nlfd is negative.
+ */
+ close(kd->nlfd);
+ kd->nlfd = -1;
+ }
+
+ if ((kd->pmfd = open(mf, flag | O_CLOEXEC, 0)) < 0) {
+ _kvm_syserr(kd, kd->program, "%s", mf);
+ goto failed;
+ }
+ if (fstat(kd->pmfd, &st) < 0) {
+ _kvm_syserr(kd, kd->program, "%s", mf);
+ goto failed;
+ }
+ if (S_ISCHR(st.st_mode) && strcmp(mf, _PATH_MEM) == 0) {
+ /*
+ * If this is /dev/mem, open kmem too. (Maybe we should
+ * make it work for either /dev/mem or /dev/kmem -- in either
+ * case you're working with a live kernel.)
+ */
+ if ((kd->vmfd = open(_PATH_KMEM, flag | O_CLOEXEC, 0)) < 0) {
+ _kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
+ goto failed;
+ }
+ kd->alive = KVM_ALIVE_FILES;
+ if ((kd->swfd = open(sf, flag | O_CLOEXEC, 0)) < 0) {
+ if (errno != ENXIO) {
+ _kvm_syserr(kd, kd->program, "%s", sf);
+ goto failed;
+ }
+ /* swap is not configured? not fatal */
+ }
+ } else {
+ kd->fdalign = DEV_BSIZE; /* XXX */
+ /*
+ * This is a crash dump.
+ * Initialize the virtual address translation machinery.
+ *
+ * If there is no valid core header, fail silently here.
+ * The address translations however will fail without
+ * header. Things can be made to run by calling
+ * kvm_dump_mkheader() before doing any translation.
+ */
+ if (_kvm_get_header(kd) == 0) {
+ if (_kvm_initvtop(kd) < 0)
+ goto failed;
+ }
+ }
+ return (kd);
+failed:
+ /*
+ * Copy out the error if doing sane error semantics.
+ */
+ if (errout != 0)
+ (void)strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
+ (void)kvm_close(kd);
+ return (0);
+}
+
+/*
+ * The kernel dump file (from savecore) contains:
+ * kcore_hdr_t kcore_hdr;
+ * kcore_seg_t cpu_hdr;
+ * (opaque) cpu_data; (size is cpu_hdr.c_size)
+ * kcore_seg_t mem_hdr;
+ * (memory) mem_data; (size is mem_hdr.c_size)
+ *
+ * Note: khdr is padded to khdr.c_hdrsize;
+ * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
+ */
+static int
+_kvm_get_header(kvm_t *kd)
+{
+ kcore_hdr_t kcore_hdr;
+ kcore_seg_t cpu_hdr;
+ kcore_seg_t mem_hdr;
+ size_t offset;
+ ssize_t sz;
+
+ /*
+ * Read the kcore_hdr_t
+ */
+ sz = Pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
+ if (sz != sizeof(kcore_hdr))
+ return (-1);
+
+ /*
+ * Currently, we only support dump-files made by the current
+ * architecture...
+ */
+ if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
+ (CORE_GETMID(kcore_hdr) != MID_MACHINE))
+ return (-1);
+
+ /*
+ * Currently, we only support exactly 2 segments: cpu-segment
+ * and data-segment in exactly that order.
+ */
+ if (kcore_hdr.c_nseg != 2)
+ return (-1);
+
+ /*
+ * Save away the kcore_hdr. All errors after this
+ * should do a to "goto fail" to deallocate things.
+ */
+ kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
+ memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
+ offset = kcore_hdr.c_hdrsize;
+
+ /*
+ * Read the CPU segment header
+ */
+ sz = Pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
+ if (sz != sizeof(cpu_hdr))
+ goto fail;
+ if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
+ (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
+ goto fail;
+ offset += kcore_hdr.c_seghdrsize;
+
+ /*
+ * Read the CPU segment DATA.
+ */
+ kd->cpu_dsize = cpu_hdr.c_size;
+ kd->cpu_data = _kvm_malloc(kd, cpu_hdr.c_size);
+ if (kd->cpu_data == NULL)
+ goto fail;
+ sz = Pread(kd, kd->pmfd, kd->cpu_data, cpu_hdr.c_size, (off_t)offset);
+ if (sz != cpu_hdr.c_size)
+ goto fail;
+ offset += cpu_hdr.c_size;
+
+ /*
+ * Read the next segment header: data segment
+ */
+ sz = Pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
+ if (sz != sizeof(mem_hdr))
+ goto fail;
+ offset += kcore_hdr.c_seghdrsize;
+
+ if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
+ (CORE_GETFLAG(mem_hdr) != CORE_DATA))
+ goto fail;
+
+ kd->dump_off = offset;
+ return (0);
+
+fail:
+ if (kd->kcore_hdr != NULL) {
+ free(kd->kcore_hdr);
+ kd->kcore_hdr = NULL;
+ }
+ if (kd->cpu_data != NULL) {
+ free(kd->cpu_data);
+ kd->cpu_data = NULL;
+ kd->cpu_dsize = 0;
+ }
+ return (-1);
+}
+
+/*
+ * The format while on the dump device is: (new format)
+ * kcore_seg_t cpu_hdr;
+ * (opaque) cpu_data; (size is cpu_hdr.c_size)
+ * kcore_seg_t mem_hdr;
+ * (memory) mem_data; (size is mem_hdr.c_size)
+ */
+int
+kvm_dump_mkheader(kvm_t *kd, off_t dump_off)
+{
+ kcore_seg_t cpu_hdr;
+ size_t hdr_size;
+ ssize_t sz;
+
+ if (kd->kcore_hdr != NULL) {
+ _kvm_err(kd, kd->program, "already has a dump header");
+ return (-1);
+ }
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, kd->program, "don't use on live kernel");
+ return (-1);
+ }
+
+ /*
+ * Validate new format crash dump
+ */
+ sz = Pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), dump_off);
+ if (sz != sizeof(cpu_hdr)) {
+ _kvm_err(kd, 0, "read %zx bytes at offset %"PRIx64
+ " for cpu_hdr instead of requested %zu",
+ sz, dump_off, sizeof(cpu_hdr));
+ return (-1);
+ }
+ if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
+ || (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
+ _kvm_err(kd, 0, "invalid magic in cpu_hdr");
+ return (0);
+ }
+ hdr_size = ALIGN(sizeof(cpu_hdr));
+
+ /*
+ * Read the CPU segment.
+ */
+ kd->cpu_dsize = cpu_hdr.c_size;
+ kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
+ if (kd->cpu_data == NULL) {
+ _kvm_err(kd, kd->program, "no cpu_data");
+ goto fail;
+ }
+ sz = Pread(kd, kd->pmfd, kd->cpu_data, cpu_hdr.c_size,
+ dump_off + hdr_size);
+ if (sz != cpu_hdr.c_size) {
+ _kvm_err(kd, kd->program, "size %zu != cpu_hdr.csize %"PRIu32,
+ sz, cpu_hdr.c_size);
+ goto fail;
+ }
+ hdr_size += kd->cpu_dsize;
+
+ /*
+ * Leave phys mem pointer at beginning of memory data
+ */
+ kd->dump_off = dump_off + hdr_size;
+ if (Lseek(kd, kd->pmfd, kd->dump_off, SEEK_SET) == -1) {
+ _kvm_err(kd, kd->program, "failed to seek to %" PRId64,
+ (int64_t)kd->dump_off);
+ goto fail;
+ }
+
+ /*
+ * Create a kcore_hdr.
+ */
+ kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
+ if (kd->kcore_hdr == NULL) {
+ _kvm_err(kd, kd->program, "failed to allocate header");
+ goto fail;
+ }
+
+ kd->kcore_hdr->c_hdrsize = ALIGN(sizeof(kcore_hdr_t));
+ kd->kcore_hdr->c_seghdrsize = ALIGN(sizeof(kcore_seg_t));
+ kd->kcore_hdr->c_nseg = 2;
+ CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
+
+ /*
+ * Now that we have a valid header, enable translations.
+ */
+ if (_kvm_initvtop(kd) == 0)
+ /* Success */
+ return (hdr_size);
+
+fail:
+ if (kd->kcore_hdr != NULL) {
+ free(kd->kcore_hdr);
+ kd->kcore_hdr = NULL;
+ }
+ if (kd->cpu_data != NULL) {
+ free(kd->cpu_data);
+ kd->cpu_data = NULL;
+ kd->cpu_dsize = 0;
+ }
+ return (-1);
+}
+
+static int
+clear_gap(kvm_t *kd, bool (*write_buf)(void *, const void *, size_t),
+ void *cookie, size_t size)
+{
+ char buf[1024];
+ size_t len;
+
+ (void)memset(buf, 0, size > sizeof(buf) ? sizeof(buf) : size);
+
+ while (size > 0) {
+ len = size > sizeof(buf) ? sizeof(buf) : size;
+ if (!(*write_buf)(cookie, buf, len)) {
+ _kvm_syserr(kd, kd->program, "clear_gap");
+ return -1;
+ }
+ size -= len;
+ }
+
+ return 0;
+}
+
+/*
+ * Write the dump header by calling write_buf with cookie as first argument.
+ */
+int
+kvm_dump_header(kvm_t *kd, bool (*write_buf)(void *, const void *, size_t),
+ void *cookie, int dumpsize)
+{
+ kcore_seg_t seghdr;
+ long offset;
+ size_t gap;
+
+ if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
+ _kvm_err(kd, kd->program, "no valid dump header(s)");
+ return (-1);
+ }
+
+ /*
+ * Write the generic header
+ */
+ offset = 0;
+ if (!(*write_buf)(cookie, kd->kcore_hdr, sizeof(kcore_hdr_t))) {
+ _kvm_syserr(kd, kd->program, "kvm_dump_header");
+ return (-1);
+ }
+ offset += kd->kcore_hdr->c_hdrsize;
+ gap = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
+ if (clear_gap(kd, write_buf, cookie, gap) == -1)
+ return (-1);
+
+ /*
+ * Write the CPU header
+ */
+ CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
+ seghdr.c_size = ALIGN(kd->cpu_dsize);
+ if (!(*write_buf)(cookie, &seghdr, sizeof(seghdr))) {
+ _kvm_syserr(kd, kd->program, "kvm_dump_header");
+ return (-1);
+ }
+ offset += kd->kcore_hdr->c_seghdrsize;
+ gap = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
+ if (clear_gap(kd, write_buf, cookie, gap) == -1)
+ return (-1);
+
+ if (!(*write_buf)(cookie, kd->cpu_data, kd->cpu_dsize)) {
+ _kvm_syserr(kd, kd->program, "kvm_dump_header");
+ return (-1);
+ }
+ offset += seghdr.c_size;
+ gap = seghdr.c_size - kd->cpu_dsize;
+ if (clear_gap(kd, write_buf, cookie, gap) == -1)
+ return (-1);
+
+ /*
+ * Write the actual dump data segment header
+ */
+ CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
+ seghdr.c_size = dumpsize;
+ if (!(*write_buf)(cookie, &seghdr, sizeof(seghdr))) {
+ _kvm_syserr(kd, kd->program, "kvm_dump_header");
+ return (-1);
+ }
+ offset += kd->kcore_hdr->c_seghdrsize;
+ gap = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
+ if (clear_gap(kd, write_buf, cookie, gap) == -1)
+ return (-1);
+
+ return (int)offset;
+}
+
+static bool
+kvm_dump_header_stdio(void *cookie, const void *buf, size_t len)
+{
+ return fwrite(buf, len, 1, (FILE *)cookie) == 1;
+}
+
+int
+kvm_dump_wrtheader(kvm_t *kd, FILE *fp, int dumpsize)
+{
+ return kvm_dump_header(kd, kvm_dump_header_stdio, fp, dumpsize);
+}
+
+kvm_t *
+kvm_openfiles(const char *uf, const char *mf, const char *sf,
+ int flag, char *errout)
+{
+ kvm_t *kd;
+
+ if ((kd = malloc(sizeof(*kd))) == NULL) {
+ (void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
+ return (0);
+ }
+ kd->program = 0;
+ return (_kvm_open(kd, uf, mf, sf, flag, errout));
+}
+
+kvm_t *
+kvm_open(const char *uf, const char *mf, const char *sf, int flag,
+ const char *program)
+{
+ kvm_t *kd;
+
+ if ((kd = malloc(sizeof(*kd))) == NULL) {
+ (void)fprintf(stderr, "%s: %s\n",
+ program ? program : getprogname(), strerror(errno));
+ return (0);
+ }
+ kd->program = program;
+ return (_kvm_open(kd, uf, mf, sf, flag, NULL));
+}
+
+int
+kvm_close(kvm_t *kd)
+{
+ int error = 0;
+
+ if (kd->pmfd >= 0)
+ error |= close(kd->pmfd);
+ if (kd->vmfd >= 0)
+ error |= close(kd->vmfd);
+ if (kd->nlfd >= 0)
+ error |= close(kd->nlfd);
+ if (kd->swfd >= 0)
+ error |= close(kd->swfd);
+ if (kd->vmst)
+ _kvm_freevtop(kd);
+ kd->cpu_dsize = 0;
+ if (kd->cpu_data != NULL)
+ free(kd->cpu_data);
+ if (kd->kcore_hdr != NULL)
+ free(kd->kcore_hdr);
+ if (kd->procbase != 0)
+ free(kd->procbase);
+ if (kd->procbase2 != 0)
+ free(kd->procbase2);
+ if (kd->lwpbase != 0)
+ free(kd->lwpbase);
+ if (kd->swapspc != 0)
+ free(kd->swapspc);
+ if (kd->argspc != 0)
+ free(kd->argspc);
+ if (kd->argbuf != 0)
+ free(kd->argbuf);
+ if (kd->argv != 0)
+ free(kd->argv);
+ if (kd->iobuf != 0)
+ free(kd->iobuf);
+ free(kd);
+
+ return (error);
+}
+
+int
+kvm_nlist(kvm_t *kd, struct nlist *nl)
+{
+ int rv, nlfd;
+
+ /*
+ * kd->nlfd might be negative when we get here, and in that
+ * case that means that we're using /dev/ksyms.
+ * So open it again, just for the time we retrieve the list.
+ */
+ if (kd->nlfd < 0) {
+ nlfd = open(_PATH_KSYMS, O_RDONLY | O_CLOEXEC, 0);
+ if (nlfd < 0) {
+ _kvm_err(kd, 0, "failed to open %s", _PATH_KSYMS);
+ return (nlfd);
+ }
+ } else
+ nlfd = kd->nlfd;
+
+ /*
+ * Call the nlist(3) routines to retrieve the given namelist.
+ */
+ rv = __fdnlist(nlfd, nl);
+
+ if (rv == -1)
+ _kvm_err(kd, 0, "bad namelist");
+
+ if (kd->nlfd < 0)
+ close(nlfd);
+
+ return (rv);
+}
+
+int
+kvm_dump_inval(kvm_t *kd)
+{
+ struct nlist nl[2];
+ paddr_t pa;
+ size_t dsize;
+ off_t doff;
+ void *newbuf;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, kd->program, "clearing dump on live kernel");
+ return (-1);
+ }
+ nl[0].n_name = "_dumpmag";
+ nl[1].n_name = NULL;
+
+ if (kvm_nlist(kd, nl) == -1) {
+ _kvm_err(kd, 0, "bad namelist");
+ return (-1);
+ }
+ if (_kvm_kvatop(kd, (vaddr_t)nl[0].n_value, &pa) == 0)
+ return (-1);
+
+ errno = 0;
+ dsize = MAX(kd->fdalign, sizeof(u_long));
+ if (kd->iobufsz < dsize) {
+ newbuf = realloc(kd->iobuf, dsize);
+ if (newbuf == NULL) {
+ _kvm_syserr(kd, 0, "cannot allocate I/O buffer");
+ return (-1);
+ }
+ kd->iobuf = newbuf;
+ kd->iobufsz = dsize;
+ }
+ memset(kd->iobuf, 0, dsize);
+ doff = _kvm_pa2off(kd, pa);
+ doff -= doff % kd->fdalign;
+ if (pwrite(kd->pmfd, kd->iobuf, dsize, doff) == -1) {
+ _kvm_syserr(kd, 0, "cannot invalidate dump - pwrite");
+ return (-1);
+ }
+ return (0);
+}
+
+ssize_t
+kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
+{
+ int cc;
+ void *cp;
+
+ if (ISKMEM(kd)) {
+ /*
+ * We're using /dev/kmem. Just read straight from the
+ * device and let the active kernel do the address translation.
+ */
+ errno = 0;
+ cc = _kvm_pread(kd, kd->vmfd, buf, len, (off_t)kva);
+ if (cc < 0) {
+ _kvm_syserr(kd, 0, "kvm_read");
+ return (-1);
+ } else if (cc < len)
+ _kvm_err(kd, kd->program, "short read");
+ return (cc);
+ } else if (ISSYSCTL(kd)) {
+ _kvm_err(kd, kd->program, "kvm_open called with KVM_NO_FILES, "
+ "can't use kvm_read");
+ return (-1);
+ } else {
+ if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
+ _kvm_err(kd, kd->program, "no valid dump header");
+ return (-1);
+ }
+ cp = buf;
+ while (len > 0) {
+ paddr_t pa;
+ off_t foff;
+
+ cc = _kvm_kvatop(kd, (vaddr_t)kva, &pa);
+ if (cc == 0)
+ return (-1);
+ if (cc > len)
+ cc = len;
+ foff = _kvm_pa2off(kd, pa);
+ errno = 0;
+ cc = _kvm_pread(kd, kd->pmfd, cp, (size_t)cc, foff);
+ if (cc < 0) {
+ _kvm_syserr(kd, kd->program, "kvm_read");
+ break;
+ }
+ /*
+ * If kvm_kvatop returns a bogus value or our core
+ * file is truncated, we might wind up seeking beyond
+ * the end of the core file in which case the read will
+ * return 0 (EOF).
+ */
+ if (cc == 0)
+ break;
+ cp = (char *)cp + cc;
+ kva += cc;
+ len -= cc;
+ }
+ return ((char *)cp - (char *)buf);
+ }
+ /* NOTREACHED */
+}
+
+ssize_t
+kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
+{
+ int cc;
+
+ if (ISKMEM(kd)) {
+ /*
+ * Just like kvm_read, only we write.
+ */
+ errno = 0;
+ cc = pwrite(kd->vmfd, buf, len, (off_t)kva);
+ if (cc < 0) {
+ _kvm_syserr(kd, 0, "kvm_write");
+ return (-1);
+ } else if (cc < len)
+ _kvm_err(kd, kd->program, "short write");
+ return (cc);
+ } else if (ISSYSCTL(kd)) {
+ _kvm_err(kd, kd->program, "kvm_open called with KVM_NO_FILES, "
+ "can't use kvm_write");
+ return (-1);
+ } else {
+ _kvm_err(kd, kd->program,
+ "kvm_write not implemented for dead kernels");
+ return (-1);
+ }
+ /* NOTREACHED */
+}
--- /dev/null
+/* $NetBSD: kvm_aarch64.c,v 1.1 2014/08/10 05:47:37 matt Exp $ */
+
+/*-
+ * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/user.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <machine/kcore.h>
+#include <machine/pte.h>
+#include <machine/vmparam.h>
+
+#include <limits.h>
+#include <db.h>
+#include <stdlib.h>
+
+#include "kvm_private.h"
+
+__RCSID("$NetBSD: kvm_aarch64.c,v 1.1 2014/08/10 05:47:37 matt Exp $");
+
+/*ARGSUSED*/
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ return;
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+ return (0);
+}
+
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return(0);
+ }
+
+ if ((va & AARCH64_KSEG_MASK) != AARCH64_KSEG_START) {
+ /*
+ * Bogus address (not in KV space): punt.
+ */
+ _kvm_err(kd, 0, "invalid kernel virtual address");
+lose:
+ *pa = -1;
+ return 0;
+ }
+
+ const cpu_kcore_hdr_t * const cpu_kh = kd->cpu_data;
+ const u_int tg1 =__SHIFTOUT(cpu_kh->kh_tcr1, TCR_TG1);
+ const u_int t1siz = __SHIFTOUT(cpu_kh->kh_tcr1, TCR_T1SZ);
+
+ /*
+ * Real kernel virtual address: do the translation.
+ */
+
+ u_int va_bits;
+ u_int page_shift;
+
+ switch (tg1) {
+ case TCR_TG_4KB:
+ va_bits = t1siz + 36;
+ page_shift = 12;
+ break;
+ case TCR_TG_16KB:
+ va_bits = 48;
+ page_shift = 14;
+ break;
+ case TCR_TG_64KB:
+ va_bits = t1siz + 38;
+ page_shift = 16;
+ break;
+ default:
+ goto lose;
+ }
+
+ const size_t page_size = 1 << page_shift;
+ const uint64_t page_mask = (page_size - 1);
+ const uint64_t page_addr = __BITS(47, 0) & ~page_mask;
+ const uint64_t pte_mask = page_mask >> 3;
+ const u_int pte_shift = page_shift - 3;
+
+ /* how many level of page tables do we have? */
+ u_int level = (48 + page_shift - 1) / page_shift;
+
+ /* restrict va to the valid VA bits */
+ va &= (1LL << va_bits) - 1;
+
+ u_int addr_shift = page_shift + (level - 1) * pte_shift;
+
+ /* clear out the unused low bits of the table address */
+ paddr_t pte_addr = (cpu_kh->kh_ttbr1 & TTBR_BADDR);
+ pte_addr &= ~((8L << (va_bits - addr_shift)) - 1);
+
+ for (;;) {
+ pt_entry_t pte;
+
+ /* now index into the pte table */
+ pte_addr += 8 * ((va >> addr_shift) & pte_mask);
+
+ /* Find and read the PTE. */
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
+ _kvm_pa2off(kd, pte_addr)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read pte");
+ goto lose;
+ }
+
+ /* Find and read the L2 PTE. */
+ if ((pte & LX_VALID) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid pte)");
+ goto lose;
+ }
+
+ if ((pte & LX_TYPE) == LX_TYPE_BLK) {
+ const paddr_t blk_mask = ((1L << addr_shift) - 1);
+
+ *pa = (pte & page_addr & ~blk_mask) | (va & blk_mask);
+ return 0;
+ }
+
+ if (level == page_shift) {
+ *pa = (pte & page_addr) | (va & page_mask);
+ return 0;
+ }
+
+ /*
+ * Read next level of page table
+ */
+
+ pte_addr = pte & page_addr;
+ addr_shift -= pte_shift;
+ }
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ const cpu_kcore_hdr_t * const cpu_kh = kd->cpu_data;
+ off_t off = 0;
+
+ for (const phys_ram_seg_t *ramsegs = cpu_kh->kh_ramsegs;
+ ramsegs->size != 0; ramsegs++) {
+ if (pa >= ramsegs->start
+ && pa < ramsegs->start + ramsegs->size) {
+ off += pa - ramsegs->start;
+ break;
+ }
+ off += ramsegs->size;
+ }
+
+ return kd->dump_off + off;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_alpha.c,v 1.27 2014/02/19 20:21:22 dsl Exp $ */
+
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+#define __KVM_ALPHA_PRIVATE /* see <machine/pte.h> */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/kcore.h>
+#include <machine/pmap.h>
+#include <machine/vmparam.h>
+
+#include <limits.h>
+#include <db.h>
+#include <stdlib.h>
+
+#include "kvm_private.h"
+
+__RCSID("$NetBSD: kvm_alpha.c,v 1.27 2014/02/19 20:21:22 dsl Exp $");
+
+/*ARGSUSED*/
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ return;
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+ return (0);
+}
+
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ alpha_pt_entry_t pte;
+ u_long pteoff, page_off;
+ int rv;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return(0);
+ }
+
+ cpu_kh = kd->cpu_data;
+ page_off = va & (cpu_kh->page_size - 1);
+
+ if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
+ /*
+ * Direct-mapped address: just convert it.
+ */
+
+ *pa = ALPHA_K0SEG_TO_PHYS(va);
+ rv = cpu_kh->page_size - page_off;
+ } else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) {
+ /*
+ * Real kernel virtual address: do the translation.
+ */
+
+ /* Find and read the L1 PTE. */
+ pteoff = cpu_kh->lev1map_pa +
+ l1pte_index(va) * sizeof(alpha_pt_entry_t);
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
+ _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read L1 PTE");
+ goto lose;
+ }
+
+ /* Find and read the L2 PTE. */
+ if ((pte & ALPHA_PTE_VALID) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid L1 PTE)");
+ goto lose;
+ }
+ pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
+ l2pte_index(va) * sizeof(alpha_pt_entry_t);
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
+ _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read L2 PTE");
+ goto lose;
+ }
+
+ /* Find and read the L3 PTE. */
+ if ((pte & ALPHA_PTE_VALID) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid L2 PTE)");
+ goto lose;
+ }
+ pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
+ l3pte_index(va) * sizeof(alpha_pt_entry_t);
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
+ _kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read L3 PTE");
+ goto lose;
+ }
+
+ /* Fill in the PA. */
+ if ((pte & ALPHA_PTE_VALID) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid L3 PTE)");
+ goto lose;
+ }
+ *pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off;
+ rv = cpu_kh->page_size - page_off;
+ } else {
+ /*
+ * Bogus address (not in KV space): punt.
+ */
+
+ _kvm_err(kd, 0, "invalid kernel virtual address");
+lose:
+ *pa = -1;
+ rv = 0;
+ }
+
+ return (rv);
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ramsegs;
+ off_t off;
+ int i;
+
+ cpu_kh = kd->cpu_data;
+ ramsegs = (phys_ram_seg_t *)((char *)cpu_kh + ALIGN(sizeof *cpu_kh));
+
+ off = 0;
+ for (i = 0; i < cpu_kh->nmemsegs; i++) {
+ if (pa >= ramsegs[i].start &&
+ (pa - ramsegs[i].start) < ramsegs[i].size) {
+ off += (pa - ramsegs[i].start);
+ break;
+ }
+ off += ramsegs[i].size;
+ }
+
+ return (kd->dump_off + off);
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_arm.c,v 1.6 2010/09/20 23:23:16 jym Exp $ */
+
+/*-
+ * Copyright (C) 1996 Wolfgang Solfrank.
+ * Copyright (C) 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: kvm_powerpc.c,v 1.3 1997/09/19 04:00:23 thorpej Exp
+ */
+
+/*
+ * arm32 machine dependent routines for kvm.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+__RCSID("$NetBSD: kvm_arm.c,v 1.6 2010/09/20 23:23:16 jym Exp $");
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <arm/kcore.h>
+#include <arm/arm32/pte.h>
+
+#include <stdlib.h>
+#include <db.h>
+#include <limits.h>
+#include <kvm.h>
+
+#include <unistd.h>
+
+#include "kvm_private.h"
+
+void
+_kvm_freevtop(kvm_t * kd)
+{
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+int
+_kvm_initvtop(kvm_t * kd)
+{
+ return 0;
+}
+
+int
+_kvm_kvatop(kvm_t * kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ pd_entry_t pde;
+ pt_entry_t pte;
+ paddr_t pde_pa, pte_pa;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return (0);
+ }
+ cpu_kh = kd->cpu_data;
+
+ if (cpu_kh->version != 1) {
+ _kvm_err(kd, 0, "unsupported kcore structure version");
+ return 0;
+ }
+ if (cpu_kh->flags != 0) {
+ _kvm_err(kd, 0, "kcore flags not supported");
+ return 0;
+ }
+ /*
+ * work out which L1 table we need
+ */
+ if (va >= (cpu_kh->UserL1TableSize << 17))
+ pde_pa = cpu_kh->PAKernelL1Table;
+ else
+ pde_pa = cpu_kh->PAUserL1Table;
+
+ /*
+ * work out the offset into the L1 Table
+ */
+ pde_pa += ((va >> 20) * sizeof(pd_entry_t));
+
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pde, sizeof(pd_entry_t),
+ _kvm_pa2off(kd, pde_pa)) != sizeof(pd_entry_t)) {
+ _kvm_syserr(kd, 0, "could not read L1 entry");
+ return (0);
+ }
+ /*
+ * next work out what kind of record it is
+ */
+ switch (pde & L1_TYPE_MASK) {
+ case L1_TYPE_S:
+ *pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET);
+ return L1_S_SIZE - (va & L1_S_OFFSET);
+ case L1_TYPE_C:
+ pte_pa = (pde & L1_C_ADDR_MASK)
+ | ((va & 0xff000) >> 10);
+ break;
+ case L1_TYPE_F:
+ pte_pa = (pde & L1_S_ADDR_MASK)
+ | ((va & 0xffc00) >> 8);
+ break;
+ default:
+ _kvm_syserr(kd, 0, "L1 entry is invalid");
+ return (0);
+ }
+
+ /*
+ * locate the pte and load it
+ */
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pt_entry_t),
+ _kvm_pa2off(kd, pte_pa)) != sizeof(pt_entry_t)) {
+ _kvm_syserr(kd, 0, "could not read L2 entry");
+ return (0);
+ }
+ switch (pte & L2_TYPE_MASK) {
+ case L2_TYPE_L:
+ *pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
+ return (L2_L_SIZE - (va & L2_L_OFFSET));
+ case L2_TYPE_S:
+ *pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
+ return (L2_S_SIZE - (va & L2_S_OFFSET));
+ case L2_TYPE_T:
+ *pa = (pte & L2_T_FRAME) | (va & L2_T_OFFSET);
+ return (L2_T_SIZE - (va & L2_T_OFFSET));
+ default:
+ _kvm_syserr(kd, 0, "L2 entry is invalid");
+ return (0);
+ }
+
+ _kvm_err(kd, 0, "vatop not yet implemented!");
+ return 0;
+}
+
+off_t
+_kvm_pa2off(kvm_t * kd, u_long pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ramsegs;
+ off_t off;
+ int i;
+
+ cpu_kh = kd->cpu_data;
+ ramsegs = (void *) ((char *) (void *) cpu_kh + cpu_kh->omemsegs);
+
+ off = 0;
+ for (i = 0; i < cpu_kh->nmemsegs; i++) {
+ if (pa >= ramsegs[i].start &&
+ (pa - ramsegs[i].start) < ramsegs[i].size) {
+ off += (pa - ramsegs[i].start);
+ break;
+ }
+ off += ramsegs[i].size;
+ }
+ return (kd->dump_off + off);
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. arm)
+ */
+int
+_kvm_mdopen(kvm_t * kd)
+{
+ uintptr_t max_uva;
+ extern struct ps_strings *__ps_strings;
+
+#if 0 /* XXX - These vary across arm machines... */
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+#endif
+ /* This is somewhat hack-ish, but it works. */
+ max_uva = (uintptr_t) (__ps_strings + 1);
+ kd->usrstack = max_uva;
+ kd->max_uva = max_uva;
+ kd->min_uva = 0;
+
+ return (0);
+}
--- /dev/null
+.\" $NetBSD: kvm_dump.3,v 1.15 2009/10/20 19:10:09 snj Exp $
+.\"
+.\" Copyright (c) 1996 Leo Weppelman
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+.\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\"
+.Dd March 17, 1996
+.Dt KVM_DUMP 3
+.Os
+.Sh NAME
+.Nm kvm_dump_mkheader ,
+.Nm kvm_dump_wrtheader ,
+.Nm kvm_dump_inval
+.Nd crash dump support functions
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft int
+.Fn kvm_dump_mkheader "kvm_t *kd" "off_t dump_off"
+.Ft int
+.Fn kvm_dump_wrtheader "kvm_t *kd" "FILE *fp" "int dumpsize"
+.Ft int
+.Fn kvm_dump_inval "kvm_t *kd"
+.Sh DESCRIPTION
+First note that the functions described here were designed to be used by
+.Xr savecore 8 .
+.Pp
+The function
+.Fn kvm_dump_mkheader
+checks if the physical memory file associated with
+.Fa kd
+contains a valid crash dump header as generated by a dumping kernel.
+When a valid header is found,
+.Fn kvm_dump_mkheader
+initializes the internal kvm data structures as if a crash dump generated by
+the
+.Xr savecore 8
+program was opened.
+This has the intentional side effect of enabling the
+address translation machinery.
+.Pp
+A call to
+.Fn kvm_dump_mkheader
+will most likely be followed by a call to
+.Fn kvm_dump_wrtheader .
+This function takes care of generating the generic header, the CORE_CPU
+section and the section header of the CORE_DATA section.
+The data is written to the file pointed at by
+.Fa fp .
+The
+.Fa dumpsize
+argument is only used to properly the set the segment size of the CORE_DATA
+section.
+Note that this function assumes that
+.Fa fp
+is positioned at file location 0.
+This function will not seek and therefore allows
+.Fa fp
+to be a file pointer obtained by
+.Fn zopen .
+.Pp
+The
+.Fn kvm_dump_inval
+function clears the magic number in the physical memory file associated with
+.Fa kd .
+The address translations must be enabled for this to work (thus assuming
+that
+.Fn kvm_dump_mkheader
+was called earlier in the sequence).
+.Sh RETURN VALUES
+All functions except
+.Fn kvm_dump_mkheader
+return 0 on success, -1 on failure.
+The function
+.Fn kvm_dump_mkheader
+returns the size of the headers present before the actual dumpdata starts.
+If no valid headers were found but no fatal errors occurred, 0 is returned.
+On fatal errors the return value is -1.
+.Pp
+In the case of failure,
+.Xr kvm_geterr 3
+can be used to retrieve the cause of the error.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_open 3
+.Sh HISTORY
+These functions first appeared in
+.Nx 1.2 .
--- /dev/null
+/* $NetBSD: kvm_file.c,v 1.29 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_file.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_file.c,v 1.29 2014/02/19 20:21:22 dsl Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * File list interface for kvm. pstat, fstat and netstat are
+ * users of this code, so we've factored it out into a separate module.
+ * Thus, we keep this grunge out of the other kvm applications (i.e.,
+ * most other applications are interested only in open/close/read/nlist).
+ */
+
+#define _KERNEL
+#include <sys/types.h>
+#undef _KERNEL
+#include <sys/param.h>
+#include <sys/lwp.h>
+#include <sys/proc.h>
+#include <sys/exec.h>
+#define _KERNEL
+#include <sys/file.h>
+#undef _KERNEL
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <sys/sysctl.h>
+
+#include <limits.h>
+#include <ndbm.h>
+#include <paths.h>
+#include <string.h>
+
+#include "kvm_private.h"
+
+static int
+kvm_deadfiles(kvm_t *, int, int, long, int);
+
+/*
+ * Get file structures.
+ */
+/*ARGSUSED*/
+static int
+kvm_deadfiles(kvm_t *kd, int op, int arg, long ofhead, int numfiles)
+{
+ size_t buflen = kd->argspc_len, n = 0;
+ struct file *fp;
+ struct filelist fhead;
+ char *where = kd->argspc;
+
+ /*
+ * first copyout filehead
+ */
+ if (buflen < sizeof(fhead) ||
+ KREAD(kd, (u_long)ofhead, &fhead)) {
+ _kvm_err(kd, kd->program, "can't read filehead");
+ return (0);
+ }
+ buflen -= sizeof(fhead);
+ where += sizeof(fhead);
+ (void)memcpy(kd->argspc, &fhead, sizeof(fhead));
+
+ /*
+ * followed by an array of file structures
+ */
+ for (fp = fhead.lh_first; fp != 0; fp = fp->f_list.le_next) {
+ if (buflen > sizeof(struct file)) {
+ if (KREAD(kd, (u_long)fp,
+ ((struct file *)(void *)where))) {
+ _kvm_err(kd, kd->program, "can't read kfp");
+ return (0);
+ }
+ buflen -= sizeof(struct file);
+ fp = (struct file *)(void *)where;
+ where += sizeof(struct file);
+ n++;
+ }
+ }
+ if (n != numfiles) {
+ _kvm_err(kd, kd->program, "inconsistent nfiles");
+ return (0);
+ }
+ return (numfiles);
+}
+
+char *
+kvm_getfiles(kvm_t *kd, int op, int arg, int *cnt)
+{
+ size_t size;
+ int mib[2], st;
+ int numfiles;
+ struct file *fp, *fplim;
+ struct filelist fhead;
+
+ if (ISSYSCTL(kd)) {
+ size = 0;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_FILE;
+ st = sysctl(mib, 2, NULL, &size, NULL, 0);
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getprocs");
+ return (0);
+ }
+ KVM_ALLOC(kd, argspc, size);
+ st = sysctl(mib, 2, kd->argspc, &size, NULL, 0);
+ if (st == -1 || size < sizeof(fhead)) {
+ _kvm_syserr(kd, kd->program, "kvm_getfiles");
+ return (0);
+ }
+ (void)memcpy(&fhead, kd->argspc, sizeof(fhead));
+ fp = (struct file *)(void *)(kd->argspc + sizeof(fhead));
+ fplim = (struct file *)(void *)(kd->argspc + size);
+ for (numfiles = 0; fhead.lh_first && (fp < fplim);
+ numfiles++, fp++)
+ fhead.lh_first = fp->f_list.le_next;
+ } else {
+ struct nlist nl[3], *p;
+
+ nl[0].n_name = "_nfiles";
+ nl[1].n_name = "_filehead";
+ nl[2].n_name = 0;
+
+ if (kvm_nlist(kd, nl) != 0) {
+ for (p = nl; p->n_type != 0; ++p)
+ ;
+ _kvm_err(kd, kd->program,
+ "%s: no such symbol", p->n_name);
+ return (0);
+ }
+ if (KREAD(kd, nl[0].n_value, &numfiles)) {
+ _kvm_err(kd, kd->program, "can't read numfiles");
+ return (0);
+ }
+ size = sizeof(fhead) + (numfiles + 10) * sizeof(struct file);
+ KVM_ALLOC(kd, argspc, size);
+ numfiles = kvm_deadfiles(kd, op, arg, (long)nl[1].n_value,
+ numfiles);
+ if (numfiles == 0)
+ return (0);
+ }
+ *cnt = numfiles;
+ return (kd->argspc);
+}
--- /dev/null
+.\" $NetBSD: kvm_geterr.3,v 1.9 2009/03/10 23:49:07 joerg Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm_geterr.3 8.1 (Berkeley) 6/4/93
+.\"
+.Dd June 4, 1993
+.Dt KVM_GETERR 3
+.Os
+.Sh NAME
+.Nm kvm_geterr
+.Nd get error message on kvm descriptor
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft char *
+.Fn kvm_geterr "kvm_t *kd"
+.Sh DESCRIPTION
+This function returns a string describing the most recent error condition
+on the descriptor
+.Fa kd .
+The results are undefined if the most recent
+.Xr kvm 3
+library call did not produce an error.
+The string returned is stored in memory owned by
+.Xr kvm 3
+so the message should be copied out and saved elsewhere if necessary.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh BUGS
+This routine cannot be used to access error conditions due to a failed
+.Fn kvm_openfiles
+call, since failure is indicated by returning a
+.Dv NULL
+descriptor.
+Therefore, errors on open are output to the special error buffer
+passed to
+.Fn kvm_openfiles .
+This option is not available to
+.Fn kvm_open .
--- /dev/null
+.\" $NetBSD: kvm_getfiles.3,v 1.11 2009/03/10 23:49:07 joerg Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm_getfiles.3 8.2 (Berkeley) 4/19/94
+.\"
+.Dd April 19, 1994
+.Dt KVM_GETFILES 3
+.Os
+.Sh NAME
+.Nm kvm_getfiles
+.Nd survey open files
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.In sys/kinfo.h
+.Fd #define _KERNEL
+.In sys/file.h
+.Fd #undef _KERNEL
+.\" .Fa kvm_t *kd
+.Ft char *
+.Fn kvm_getfiles "kvm_t *kd" "int op" "int arg" "int *cnt"
+.Sh DESCRIPTION
+.Fn kvm_getfiles
+returns a (sub-)set of the open files in the kernel indicated by
+.Fa kd .
+The
+.Fa op
+and
+.Fa arg
+arguments constitute a predicate which limits the set of files
+returned.
+No predicates are currently defined.
+.Pp
+The number of processes found is returned in the reference parameter
+.Fa cnt .
+The files are returned as a contiguous array of file structures,
+preceded by the address of the first file entry in the kernel.
+This memory is owned by kvm and is not guaranteed to be persistent across
+subsequent kvm library calls.
+Data should be copied out if it needs to be saved.
+.Sh RETURN VALUES
+.Fn kvm_getfiles
+will return
+.Dv NULL
+on failure.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh BUGS
+This routine does not belong in the kvm interface.
--- /dev/null
+.\" $NetBSD: kvm_getkernelname.3,v 1.2 2011/09/13 08:53:10 wiz Exp $
+.\"
+.\"
+.\" Copyright (c) 2011 The NetBSD Foundation, Inc.
+.\" All rights reserved.
+.\"
+.\" This code is derived from software contributed to The NetBSD Foundation
+.\" by Christos Zoulas.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. All advertising materials mentioning features or use of this software
+.\" must display the following acknowledgement:
+.\" This product includes software developed by the NetBSD
+.\" Foundation, Inc. and its contributors.
+.\" 4. Neither the name of The NetBSD Foundation nor the names of its
+.\" contributors may be used to endorse or promote products derived
+.\" from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+.\" BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd September 14, 2011
+.Dt KVM_GETKERNELNAME 3
+.Os
+.Sh NAME
+.Nm kvm_getkernelname
+.Nd get kernel name of opened kvm descriptor
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft const char *
+.Fn kvm_getkernelname "kvm_t *kd"
+.Sh DESCRIPTION
+This function returns a string containing the kernel name used from the kvm
+descriptor obtained by a previous
+.Xr kvm_open 3
+or
+.Xr kvm_openfiles 3
+call.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
--- /dev/null
+.\" $NetBSD: kvm_getloadavg.3,v 1.11 2009/03/10 23:49:07 joerg Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm_getloadavg.3 8.1 (Berkeley) 6/4/93
+.\"
+.Dd August 18, 2002
+.Dt KVM_GETLOADAVG 3
+.Os
+.Sh NAME
+.Nm kvm_getloadavg
+.Nd get system load averages
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In sys/resource.h
+.In kvm.h
+.Ft int
+.Fn kvm_getloadavg "kvm_t *kd" "double loadavg[]" "int nelem"
+.Sh DESCRIPTION
+The
+.Fn kvm_getloadavg
+function returns the number of processes in the system run queue
+of the kernel indicated by
+.Fa kd ,
+averaged over various periods of time.
+Up to
+.Fa nelem
+samples are retrieved and assigned to successive elements of
+.Fa loadavg Ns Bq .
+The system imposes a maximum of 3 samples, representing averages
+over the last 1, 5, and 15 minutes, respectively.
+.Sh RETURN VALUES
+If the load average was unobtainable, \-1 is returned; otherwise,
+the number of samples actually retrieved is returned.
+.Sh SEE ALSO
+.Xr uptime 1 ,
+.Xr getloadavg 3 ,
+.Xr kvm 3 ,
+.Xr kvm_open 3 ,
+.Xr sysctl 3
--- /dev/null
+/* $NetBSD: kvm_getloadavg.c,v 1.11 2012/03/21 10:10:36 matt Exp $ */
+
+/*-
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_getloadavg.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_getloadavg.c,v 1.11 2012/03/21 10:10:36 matt Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/lwp.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <uvm/uvm_param.h>
+
+#include <db.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <nlist.h>
+#include <kvm.h>
+#include <stdlib.h>
+
+#include "kvm_private.h"
+
+static struct nlist nl[] = {
+ { .n_name = "_averunnable" },
+#define X_AVERUNNABLE 0
+ { .n_name = "_fscale" },
+#define X_FSCALE 1
+ { .n_name = "" },
+};
+
+/*
+ * kvm_getloadavg() -- Get system load averages, from live or dead kernels.
+ *
+ * Put `nelem' samples into `loadavg' array.
+ * Return number of samples retrieved, or -1 on error.
+ */
+int
+kvm_getloadavg(kvm_t *kd, double loadavg[], int nelem)
+{
+ struct loadavg loadinfo;
+ struct nlist *p;
+ int fscale, i;
+
+ if (ISALIVE(kd))
+ return (getloadavg(loadavg, nelem));
+
+ if (kvm_nlist(kd, nl) != 0) {
+ for (p = nl; p->n_type != 0; ++p);
+ _kvm_err(kd, kd->program,
+ "%s: no such symbol", p->n_name);
+ return (-1);
+ }
+
+ if (KREAD(kd, nl[X_AVERUNNABLE].n_value, &loadinfo)) {
+ _kvm_err(kd, kd->program, "can't read averunnable");
+ return (-1);
+ }
+
+ /*
+ * Old kernels have fscale separately; if not found assume
+ * running new format.
+ */
+ if (!KREAD(kd, nl[X_FSCALE].n_value, &fscale))
+ loadinfo.fscale = fscale;
+
+ nelem = MIN(nelem, sizeof(loadinfo.ldavg) / sizeof(fixpt_t));
+ for (i = 0; i < nelem; i++)
+ loadavg[i] = (double) loadinfo.ldavg[i] / loadinfo.fscale;
+ return (nelem);
+}
--- /dev/null
+.\" $NetBSD: kvm_getlwps.3,v 1.6 2009/03/10 23:49:07 joerg Exp $
+.\"
+.\"Copyright (c) 2002 The NetBSD Foundation, Inc.
+.\"All rights reserved.
+.\"
+.\"This code is derived from software contributed to The NetBSD Foundation
+.\"by Nathan J. Williams.
+.\"
+.\"Redistribution and use in source and binary forms, with or without
+.\"modification, are permitted provided that the following conditions
+.\"are met:
+.\"1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\"2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\"THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+.\"``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+.\"TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+.\"PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+.\"BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\"CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\"SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\"INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\"CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\"ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\"POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd February 10, 2004
+.Dt KVM_GETLWPS 3
+.Os
+.Sh NAME
+.Nm kvm_getlwps
+.Nd access state of LWPs belonging to a user process
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.In sys/param.h
+.In sys/sysctl.h
+.\" .Fa kvm_t *kd
+.Ft struct kinfo_lwp *
+.Fn kvm_getlwps "kvm_t *kd" "int pid" "u_long procaddr" "int *elemsize" "int *cnt"
+.Sh DESCRIPTION
+.Fn kvm_getlwps
+returns the set of LWPs belonging to the process specified by
+.Fa pid
+or
+.Fa procaddr
+in the kernel indicated by
+.Fa kd .
+The number of LWPs found is returned in the reference parameter
+.Fa cnt .
+The LWPs are returned as a contiguous array of
+.Sy kinfo_lwp
+structures.
+This memory is locally allocated, and subsequent calls to
+.Fn kvm_getlwps
+and
+.Fn kvm_close
+will overwrite this storage.
+.Pp
+Only the first
+.Fa elemsize
+bytes of each array entry are returned.
+If the size of the
+.Sy kinfo_lwp
+structure increases in size in a future release of
+.Nx
+the kernel will only return the requested amount of data for
+each array entry and programs that use
+.Fn kvm_getlwps
+will continue to function without the need for recompilation.
+.Pp
+If called against an active kernel, the
+.Fn kvm_getlwps
+function will use the
+.Xr sysctl 3
+interface and return information about the process identified by
+.Fa pid ;
+otherwise the kernel memory device file or swap device will be
+accessed and the process is identified by the location passed in
+.Fa paddr .
+.Sh RETURN VALUES
+.Fn kvm_getlwps
+returns
+.Dv NULL
+on failure.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getproc2 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh BUGS
+These routines do not belong in the kvm interface.
--- /dev/null
+.\" $NetBSD: kvm_getprocs.3,v 1.15 2009/03/10 23:49:07 joerg Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm_getprocs.3 8.1 (Berkeley) 6/4/93
+.\"
+.Dd February 10, 2004
+.Dt KVM_GETPROCS 3
+.Os
+.Sh NAME
+.Nm kvm_getprocs ,
+.Nm kvm_getargv ,
+.Nm kvm_getenvv
+.Nd access user process state
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.In sys/param.h
+.In sys/sysctl.h
+.\" .Fa kvm_t *kd
+.Ft struct kinfo_proc *
+.Fn kvm_getprocs "kvm_t *kd" "int op" "int arg" "int *cnt"
+.Ft char **
+.Fn kvm_getargv "kvm_t *kd" "const struct kinfo_proc *p" "int nchr"
+.Ft char **
+.Fn kvm_getenvv "kvm_t *kd" "const struct kinfo_proc *p" "int nchr"
+.Ft struct kinfo_proc2 *
+.Fn kvm_getproc2 "kvm_t *kd" "int op" "int arg" "int elemsize" "int *cnt"
+.Ft char **
+.Fn kvm_getargv2 "kvm_t *kd" "const struct kinfo_proc2 *p" "int nchr"
+.Ft char **
+.Fn kvm_getenvv2 "kvm_t *kd" "const struct kinfo_proc2 *p" "int nchr"
+.Sh DESCRIPTION
+.Fn kvm_getprocs
+returns a (sub-)set of active processes in the kernel indicated by
+.Fa kd .
+The
+.Fa op
+and
+.Fa arg
+arguments constitute a predicate
+which limits the set of processes returned.
+The value of
+.Fa op
+describes the filtering predicate as follows:
+.Pp
+.Bl -tag -width 20n -offset indent -compact
+.It Sy KERN_PROC_ALL
+all processes
+.It Sy KERN_PROC_PID
+processes with process id
+.Fa arg
+.It Sy KERN_PROC_PGRP
+processes with process group
+.Fa arg
+.It Sy KERN_PROC_SESSION
+processes with session id
+.Fa arg
+.It Sy KERN_PROC_TTY
+processes with tty device
+.Fa arg
+.It Sy KERN_PROC_UID
+processes with effective user id
+.Fa arg
+.It Sy KERN_PROC_RUID
+processes with real user id
+.Fa arg
+.It Sy KERN_PROC_GID
+processes with effective group id
+.Fa arg
+.It Sy KERN_PROC_RGID
+processes with real group id
+.Fa arg
+.El
+.Pp
+The number of processes found is returned in the reference parameter
+.Fa cnt .
+The processes are returned as a contiguous array of
+.Sy kinfo_proc
+structures.
+This memory is locally allocated, and subsequent calls to
+.Fn kvm_getprocs
+and
+.Fn kvm_close
+will overwrite this storage.
+.Pp
+If the
+.Fa op
+argument for
+.Fn kvm_getprocs
+is
+.Sy KERN_PROC_TTY ,
+.Fa arg
+can also be
+.Sy KERN_PROC_TTY_NODEV
+to select processes with no controlling tty and
+.Sy KERN_PROC_TTY_REVOKE
+to select processes which have had their controlling tty
+revoked.
+.Pp
+.Fn kvm_getargv
+returns a null-terminated argument vector that corresponds to the
+command line arguments passed to process indicated by
+.Fa p .
+Most likely, these arguments correspond to the values passed to
+.Xr exec 3
+on process creation.
+This information is, however,
+deliberately under control of the process itself.
+Note that the original command name can be found, unaltered,
+in the p_comm field of the process structure returned by
+.Fn kvm_getprocs .
+.Pp
+The
+.Fa nchr
+argument indicates the maximum number of characters, including null bytes,
+to use in building the strings.
+If this amount is exceeded, the string
+causing the overflow is truncated and the partial result is returned.
+This is handy for programs like
+.Xr ps 1
+and
+.Xr w 1
+that print only a one line summary of a command and should not copy
+out large amounts of text only to ignore it.
+If
+.Fa nchr
+is zero, no limit is imposed and all argument strings are returned in
+their entirety.
+.Pp
+The memory allocated to the argv pointers and string storage
+is owned by the kvm library.
+Subsequent
+.Fn kvm_getprocs
+and
+.Xr kvm_close 3
+calls will clobber this storage.
+.Pp
+The
+.Fn kvm_getenvv
+function is similar to
+.Fn kvm_getargv
+but returns the vector of environment strings.
+This data is also alterable by the process.
+.Pp
+.Fn kvm_getproc2
+is similar to
+.Fn kvm_getprocs
+but returns an array of
+.Sy kinfo_proc2
+structures.
+Additionally, only the first
+.Fa elemsize
+bytes of each array entry are returned.
+If the size of the
+.Sy kinfo_proc2
+structure increases in size in a future release of
+.Nx
+the kernel will only return the requested amount of data for
+each array entry and programs that use
+.Fn kvm_getproc2
+will continue to function without the need for recompilation.
+.Pp
+The
+.Fn kvm_getargv2
+and
+.Fn kvm_getenvv2
+are equivalents to the
+.Fn kvm_getargv
+and
+.Fn kvm_getenvv
+functions but use a
+.Sy kinfo_proc2
+structure to specify the process.
+.Pp
+If called against an active kernel, the
+.Fn kvm_getproc2 ,
+.Fn kvm_getargv2 ,
+and
+.Fn kvm_getenvv2
+functions will use the
+.Xr sysctl 3
+interface and do not require access to the kernel memory device
+file or swap device.
+.Sh RETURN VALUES
+.Fn kvm_getprocs ,
+.Fn kvm_getargv ,
+.Fn kvm_getenvv ,
+.Fn kvm_getproc2 ,
+.Fn kvm_getargv2 ,
+and
+.Fn kvm_getenvv2
+all return
+.Dv NULL
+on failure.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh BUGS
+These routines do not belong in the kvm interface.
--- /dev/null
+/* $NetBSD: kvm_hppa.c,v 1.7 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_hppa.c,v 1.7 2014/02/19 20:21:22 dsl Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * hppa machine dependent routines for kvm.
+ * XXX fredette - largely unimplemented so far. what is here
+ * is lifted and disabled.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+#include <machine/kcore.h>
+#include <machine/pmap.h>
+#include <machine/pte.h>
+#include <machine/vmparam.h>
+
+#ifndef btop
+#define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
+#define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
+#endif
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+
+ /* Not actually used for anything right now, but safe. */
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return 0;
+}
+
+/*
+ * Translate a kernel virtual address to a physical address.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+#if 0
+ cpu_kcore_hdr_t *cpu_kh;
+ u_long page_off;
+ pd_entry_t pde;
+ pt_entry_t pte;
+ u_long pde_pa, pte_pa;
+#endif
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return 0;
+ }
+
+ _kvm_syserr(kd, 0, "could not read PTE");
+
+#if 0
+ cpu_kh = kd->cpu_data;
+ page_off = va & PGOFSET;
+
+ /*
+ * Find and read the page directory entry.
+ */
+ pde_pa = cpu_kh->ptdpaddr + (pdei(va) * sizeof(pd_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
+ _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
+ _kvm_syserr(kd, 0, "could not read PDE");
+ goto lose;
+ }
+
+ /*
+ * Find and read the page table entry.
+ */
+ if ((pde & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PDE)");
+ goto lose;
+ }
+ pte_pa = (pde & PG_FRAME) + (ptei(va) * sizeof(pt_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pte),
+ _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read PTE");
+ goto lose;
+ }
+
+ /*
+ * Validate the PTE and return the physical address.
+ */
+ if ((pte & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PTE)");
+ goto lose;
+ }
+ *pa = (pte & PG_FRAME) + page_off;
+ return (int)(NBPG - page_off);
+
+ lose:
+#endif
+ *pa = (u_long)~0L;
+ return 0;
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+#if 0
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ramsegs;
+ off_t off;
+ int i;
+
+ cpu_kh = kd->cpu_data;
+ ramsegs = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
+
+ off = 0;
+ for (i = 0; i < cpu_kh->nmemsegs; i++) {
+ if (pa >= ramsegs[i].start &&
+ (pa - ramsegs[i].start) < ramsegs[i].size) {
+ off += (pa - ramsegs[i].start);
+ break;
+ }
+ off += ramsegs[i].size;
+ }
+
+ return kd->dump_off + off;
+#endif
+ return 0;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return 0;
+}
--- /dev/null
+/* $NetBSD: kvm_i386.c,v 1.30 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_i386.c,v 1.30 2014/02/19 20:21:22 dsl Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * i386 machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+#include <i386/kcore.h>
+#include <i386/pmap.h>
+#include <i386/pte.h>
+#include <i386/vmparam.h>
+
+#ifndef btop
+#define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
+#define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
+#endif
+
+/*
+ * Indicates whether PAE is in use for the kernel image
+ * 0: native i386 memory mappings
+ * 1: i386 PAE mappings
+ */
+static int i386_use_pae;
+
+int _kvm_kvatop_i386(kvm_t *, vaddr_t, paddr_t *);
+int _kvm_kvatop_i386pae(kvm_t *, vaddr_t, paddr_t *);
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+
+ /* Not actually used for anything right now, but safe. */
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+ cpu_kcore_hdr_t *cpu_kh = kd->cpu_data;
+
+ i386_use_pae = 0; /* default: non PAE mode */
+ if ((cpu_kh->pdppaddr & I386_KCORE_PAE) == I386_KCORE_PAE)
+ i386_use_pae = 1;
+
+ return 0;
+}
+
+/*
+ * Translate a kernel virtual address to a physical address.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return 0;
+ }
+
+ switch (i386_use_pae) {
+ default:
+ case 0:
+ return _kvm_kvatop_i386(kd, va, pa);
+ case 1:
+ return _kvm_kvatop_i386pae(kd, va, pa);
+ }
+
+}
+
+/*
+ * Used to translate a virtual address to a physical address for systems
+ * with PAE mode disabled. Only two levels of virtual memory pages are
+ * dereferenced (L2 PDEs, then L1 PTEs).
+ */
+int
+_kvm_kvatop_i386(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ u_long page_off;
+ pd_entry_t pde;
+ pt_entry_t pte;
+ paddr_t pde_pa, pte_pa;
+
+ cpu_kh = kd->cpu_data;
+ page_off = va & PGOFSET;
+
+ /*
+ * Find and read the page directory entry.
+ * pdppaddr being PAGE_SIZE aligned, we mask the option bits.
+ */
+ pde_pa = (cpu_kh->pdppaddr & PG_FRAME) + (pl2_pi(va) * sizeof(pde));
+ if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
+ _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
+ _kvm_syserr(kd, 0, "could not read PDE");
+ goto lose;
+ }
+
+ /*
+ * Find and read the page table entry.
+ */
+ if ((pde & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PDE)");
+ goto lose;
+ }
+ if ((pde & PG_PS) != 0) {
+ /*
+ * This is a 4MB page.
+ */
+ page_off = va & ~PG_LGFRAME;
+ *pa = (pde & PG_LGFRAME) + page_off;
+ return (int)(NBPD_L2 - page_off);
+ }
+ pte_pa = (pde & PG_FRAME) + (pl1_pi(va) * sizeof(pt_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pte),
+ _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read PTE");
+ goto lose;
+ }
+
+ /*
+ * Validate the PTE and return the physical address.
+ */
+ if ((pte & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PTE)");
+ goto lose;
+ }
+ *pa = (pte & PG_FRAME) + page_off;
+ return (int)(NBPG - page_off);
+
+ lose:
+ *pa = (paddr_t)~0L;
+ return 0;
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ramsegs;
+ off_t off;
+ int i;
+
+ cpu_kh = kd->cpu_data;
+ ramsegs = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
+
+ off = 0;
+ for (i = 0; i < cpu_kh->nmemsegs; i++) {
+ if (pa >= ramsegs[i].start &&
+ (pa - ramsegs[i].start) < ramsegs[i].size) {
+ off += (pa - ramsegs[i].start);
+ break;
+ }
+ off += ramsegs[i].size;
+ }
+
+ return (kd->dump_off + off);
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return 0;
+}
--- /dev/null
+/* $NetBSD: kvm_i386pae.c,v 1.2 2014/02/19 20:21:22 dsl Exp $ */
+
+/*
+ * Copyright (c) 2010 Jean-Yves Migeon.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__RCSID("$NetBSD: kvm_i386pae.c,v 1.2 2014/02/19 20:21:22 dsl Exp $");
+
+/*
+ * This will expose PAE functions, macros, definitions and constants.
+ * Note: this affects all virtual memory related functions. Only their
+ * PAE versions can be used below.
+ */
+#define PAE
+
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+#include <i386/kcore.h>
+#include <i386/pmap.h>
+#include <i386/pte.h>
+#include <i386/vmparam.h>
+
+int _kvm_kvatop_i386pae(kvm_t *, vaddr_t, paddr_t *);
+
+/*
+ * Used to translate a virtual address to a physical address for systems
+ * running under PAE mode. Three levels of virtual memory pages are handled
+ * here: the per-CPU L3 page, the 4 L2 PDs and the PTs.
+ */
+int
+_kvm_kvatop_i386pae(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ u_long page_off;
+ pd_entry_t pde;
+ pt_entry_t pte;
+ paddr_t pde_pa, pte_pa;
+
+ cpu_kh = kd->cpu_data;
+ page_off = va & PGOFSET;
+
+ /*
+ * Find and read the PDE. Ignore the L3, as it is only a per-CPU
+ * page, not needed for kernel VA => PA translations.
+ * Remember that the 4 L2 pages are contiguous, so it is safe
+ * to increment pdppaddr to compute the address of the PDE.
+ * pdppaddr being PAGE_SIZE aligned, we mask the option bits.
+ */
+ pde_pa = (cpu_kh->pdppaddr & PG_FRAME) + (pl2_pi(va) * sizeof(pde));
+ if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
+ _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
+ _kvm_syserr(kd, 0, "could not read PDE");
+ goto lose;
+ }
+
+ /*
+ * Find and read the page table entry.
+ */
+ if ((pde & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PDE)");
+ goto lose;
+ }
+ if ((pde & PG_PS) != 0) {
+ /*
+ * This is a 2MB page.
+ */
+ page_off = va & ((vaddr_t)~PG_LGFRAME);
+ *pa = (pde & PG_LGFRAME) + page_off;
+ return (int)(NBPD_L2 - page_off);
+ }
+
+ pte_pa = (pde & PG_FRAME) + (pl1_pi(va) * sizeof(pt_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pte),
+ _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read PTE");
+ goto lose;
+ }
+
+ /*
+ * Validate the PTE and return the physical address.
+ */
+ if ((pte & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PTE)");
+ goto lose;
+ }
+ *pa = (pte & PG_FRAME) + page_off;
+ return (int)(NBPG - page_off);
+
+lose:
+ *pa = (paddr_t)~0L;
+ return 0;
+
+}
--- /dev/null
+/* $NetBSD: kvm_m68k.c,v 1.19 2014/01/27 21:00:01 matt Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Gordon W. Ross and Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Run-time kvm dispatcher for m68k machines.
+ * The actual MD code is in the files:
+ * kvm_m68k_cmn.c kvm_sun3.c ...
+ *
+ * Note: This file has to build on ALL m68k machines,
+ * so do NOT include any <machine/[*].h> files here.
+ */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/kcore.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <limits.h>
+#include <nlist.h>
+#include <kvm.h>
+#include <db.h>
+
+#include <m68k/kcore.h>
+
+#include "kvm_private.h"
+#include "kvm_m68k.h"
+
+__RCSID("$NetBSD: kvm_m68k.c,v 1.19 2014/01/27 21:00:01 matt Exp $");
+
+struct name_ops {
+ const char *name;
+ struct kvm_ops *ops;
+};
+
+/*
+ * Match specific kcore types first, falling into a default.
+ */
+static struct name_ops optbl[] = {
+ { "sun2", &_kvm_ops_sun2 },
+ { "sun3", &_kvm_ops_sun3 },
+ { "sun3x", &_kvm_ops_sun3x },
+ { NULL, &_kvm_ops_cmn },
+};
+
+/*
+ * Prepare for translation of kernel virtual addresses into offsets
+ * into crash dump files. This is where we do the dispatch work.
+ */
+int
+_kvm_initvtop(kvm_t *kd)
+{
+ cpu_kcore_hdr_t *h;
+ struct name_ops *nop;
+ struct vmstate *vm;
+
+ vm = (struct vmstate *)_kvm_malloc(kd, sizeof (*vm));
+ if (vm == 0)
+ return (-1);
+
+ kd->vmst = vm;
+
+ /*
+ * Use the machine name in the kcore header to determine
+ * our ops vector. When we reach an ops vector with
+ * no name, we've found a default.
+ */
+ h = kd->cpu_data;
+ h->name[sizeof(h->name) - 1] = '\0'; /* sanity */
+ for (nop = optbl; nop->name != NULL; nop++)
+ if (strcmp(nop->name, h->name) == 0)
+ break;
+
+ vm->ops = nop->ops;
+
+ /*
+ * Compute pgshift and pgofset.
+ */
+ for (vm->pgshift = 0; (1 << vm->pgshift) < h->page_size; vm->pgshift++)
+ /* nothing */ ;
+ if ((1 << vm->pgshift) != h->page_size)
+ goto bad;
+ vm->pgofset = h->page_size - 1;
+
+ if ((vm->ops->initvtop)(kd) < 0)
+ goto bad;
+
+ return (0);
+
+ bad:
+ kd->vmst = NULL;
+ free(vm);
+ return (-1);
+}
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ (kd->vmst->ops->freevtop)(kd);
+ free(kd->vmst);
+}
+
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pap)
+{
+ return ((kd->vmst->ops->kvatop)(kd, va, pap));
+}
+
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ return ((kd->vmst->ops->pa2off)(kd, pa));
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+ u_long max_uva;
+ extern struct ps_strings *__ps_strings;
+
+#if 0 /* XXX - These vary across m68k machines... */
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+#endif
+ /* This is somewhat hack-ish, but it works. */
+ max_uva = (u_long) (__ps_strings + 1);
+ kd->usrstack = max_uva;
+ kd->max_uva = max_uva;
+ kd->min_uva = 0;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_m68k.h,v 1.6 2010/09/20 23:23:16 jym Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Gordon W. Ross and Jason R. Thorpe.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+struct kvm_ops {
+ int (*initvtop)(kvm_t *);
+ void (*freevtop)(kvm_t *);
+ int (*kvatop) (kvm_t *, vaddr_t, paddr_t *);
+ off_t (*pa2off) (kvm_t *, paddr_t);
+};
+
+struct vmstate {
+ struct kvm_ops *ops; /* ops vector */
+ u_int32_t pgshift; /* log2(page_size) */
+ u_int32_t pgofset; /* mask to find offset into page */
+ void *private; /* private to the bottom layer */
+};
+
+extern struct kvm_ops _kvm_ops_cmn;
+extern struct kvm_ops _kvm_ops_sun2;
+extern struct kvm_ops _kvm_ops_sun3;
+extern struct kvm_ops _kvm_ops_sun3x;
--- /dev/null
+/* $NetBSD: kvm_m68k_cmn.c,v 1.18 2014/03/04 06:38:08 matt Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1997 Jason R. Thorpe. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_m68k_cmn.c,v 1.18 2014/03/04 06:38:08 matt Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * Common m68k machine dependent routines for kvm.
+ *
+ * Note: This file has to build on ALL m68k machines,
+ * so do NOT include any <machine / *.h> files here.
+ */
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kcore.h>
+
+#include <unistd.h>
+#include <limits.h>
+#include <nlist.h>
+#include <kvm.h>
+#include <db.h>
+
+#include <m68k/cpu.h>
+#include <m68k/kcore.h>
+#include <m68k/m68k.h>
+
+#include "kvm_private.h"
+#include "kvm_m68k.h"
+
+int _kvm_cmn_initvtop(kvm_t *);
+void _kvm_cmn_freevtop(kvm_t *);
+int _kvm_cmn_kvatop(kvm_t *, vaddr_t, paddr_t *);
+off_t _kvm_cmn_pa2off(kvm_t *, paddr_t);
+
+struct kvm_ops _kvm_ops_cmn = {
+ _kvm_cmn_initvtop,
+ _kvm_cmn_freevtop,
+ _kvm_cmn_kvatop,
+ _kvm_cmn_pa2off };
+
+static int vatop_030(kvm_t *, uint32_t, vaddr_t, paddr_t *);
+static int vatop_040(kvm_t *, uint32_t, vaddr_t, paddr_t *);
+
+#define _kvm_btop(v, a) (((unsigned)(a)) >> (v)->pgshift)
+
+void
+_kvm_cmn_freevtop(kvm_t *kd)
+{
+ /* No private state information to keep. */
+}
+
+int
+_kvm_cmn_initvtop(kvm_t *kd)
+{
+ /* No private state information to keep. */
+ return (0);
+}
+
+int
+_kvm_cmn_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct m68k_kcore_hdr *m = &h->un._m68k;
+ int (*vtopf)(kvm_t *, uint32_t, vaddr_t, paddr_t *);
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return (0);
+ }
+
+ /*
+ * 68040 and 68060 use same translation functions,
+ * as do 68030, 68851, HP MMU.
+ */
+ if (m->mmutype == MMU_68040 || m->mmutype == MMU_68060)
+ vtopf = vatop_040;
+ else
+ vtopf = vatop_030;
+
+ return ((*vtopf)(kd, m->sysseg_pa, va, pa));
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_cmn_pa2off(kvm_t *kd, u_long pa)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct m68k_kcore_hdr *m = &h->un._m68k;
+ phys_ram_seg_t *rsp;
+ off_t off;
+ int i;
+
+ off = 0;
+ rsp = m->ram_segs;
+ for (i = 0; i < M68K_NPHYS_RAM_SEGS && rsp[i].size != 0; i++) {
+ if (pa >= rsp[i].start &&
+ pa < (rsp[i].start + rsp[i].size)) {
+ pa -= rsp[i].start;
+ break;
+ }
+ off += rsp[i].size;
+ }
+ return (kd->dump_off + off + pa);
+}
+
+/*****************************************************************
+ * Local stuff...
+ */
+
+static int
+vatop_030(kvm_t *kd, uint32_t stpa, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct m68k_kcore_hdr *m = &h->un._m68k;
+ struct vmstate *vm = kd->vmst;
+ paddr_t addr;
+ uint32_t ste, pte;
+ u_int p, offset;
+
+ offset = va & vm->pgofset;
+
+ /*
+ * We may be called before address translation is initialized.
+ * This is typically used to find the dump magic number. This
+ * means we do not yet have the kernel page tables available,
+ * so we must to a simple relocation.
+ */
+ if (va < m->relocend) {
+ *pa = (va - h->kernbase) + m->reloc;
+ return (h->page_size - offset);
+ }
+
+ addr = stpa + ((va >> m->sg_ishift) * sizeof(u_int32_t));
+
+ /*
+ * Can't use KREAD to read kernel segment table entries.
+ * Fortunately it is 1-to-1 mapped so we don't have to.
+ */
+ if (stpa == m->sysseg_pa) {
+ if (_kvm_pread(kd, kd->pmfd, &ste, sizeof(ste),
+ _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
+ goto invalid;
+ } else if (KREAD(kd, addr, &ste))
+ goto invalid;
+ if ((ste & m->sg_v) == 0) {
+ _kvm_err(kd, 0, "invalid segment (%x)", ste);
+ return(0);
+ }
+ p = _kvm_btop(vm, va & m->sg_pmask);
+ addr = (ste & m->sg_frame) + (p * sizeof(u_int32_t));
+
+ /*
+ * Address from STE is a physical address so don't use kvm_read.
+ */
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
+ _kvm_cmn_pa2off(kd, addr)) != sizeof(pte))
+ goto invalid;
+ addr = pte & m->pg_frame;
+ if ((pte & m->pg_v) == 0) {
+ _kvm_err(kd, 0, "page not valid");
+ return (0);
+ }
+ *pa = addr + offset;
+
+ return (h->page_size - offset);
+invalid:
+ _kvm_err(kd, 0, "invalid address (%lx)", va);
+ return (0);
+}
+
+static int
+vatop_040(kvm_t *kd, uint32_t stpa, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct m68k_kcore_hdr *m = &h->un._m68k;
+ struct vmstate *vm = kd->vmst;
+ paddr_t addr;
+ uint32_t stpa2;
+ uint32_t ste, pte;
+ u_int offset;
+
+ offset = va & vm->pgofset;
+
+ /*
+ * We may be called before address translation is initialized.
+ * This is typically used to find the dump magic number. This
+ * means we do not yet have the kernel page tables available,
+ * so we must to a simple relocation.
+ */
+ if (va < m->relocend) {
+ *pa = (va - h->kernbase) + m->reloc;
+ return (h->page_size - offset);
+ }
+
+ addr = stpa + ((va >> m->sg40_shift1) * sizeof(u_int32_t));
+
+ /*
+ * Can't use KREAD to read kernel segment table entries.
+ * Fortunately it is 1-to-1 mapped so we don't have to.
+ */
+ if (stpa == m->sysseg_pa) {
+ if (_kvm_pread(kd, kd->pmfd, &ste, sizeof(ste),
+ _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
+ goto invalid;
+ } else if (KREAD(kd, addr, &ste))
+ goto invalid;
+ if ((ste & m->sg_v) == 0) {
+ _kvm_err(kd, 0, "invalid level 1 descriptor (%x)",
+ ste);
+ return((off_t)0);
+ }
+ stpa2 = (ste & m->sg40_addr1);
+ addr = stpa2 + (((va & m->sg40_mask2) >> m->sg40_shift2) *
+ sizeof(u_int32_t));
+
+ /*
+ * Address from level 1 STE is a physical address,
+ * so don't use kvm_read.
+ */
+ if (_kvm_pread(kd, kd->pmfd, &ste, sizeof(ste),
+ _kvm_cmn_pa2off(kd, addr)) != sizeof(ste))
+ goto invalid;
+ if ((ste & m->sg_v) == 0) {
+ _kvm_err(kd, 0, "invalid level 2 descriptor (%x)",
+ ste);
+ return((off_t)0);
+ }
+ stpa2 = (ste & m->sg40_addr2);
+ addr = stpa2 + (((va & m->sg40_mask3) >> m->sg40_shift3) *
+ sizeof(u_int32_t));
+
+ /*
+ * Address from STE is a physical address so don't use kvm_read.
+ */
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
+ _kvm_cmn_pa2off(kd, addr)) != sizeof(pte))
+ goto invalid;
+ addr = pte & m->pg_frame;
+ if ((pte & m->pg_v) == 0) {
+ _kvm_err(kd, 0, "page not valid");
+ return (0);
+ }
+ *pa = addr + offset;
+
+ return (h->page_size - offset);
+
+invalid:
+ _kvm_err(kd, 0, "invalid address (%lx)", va);
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_mips.c,v 1.22 2014/02/19 20:21:22 dsl Exp $ */
+
+/*
+ * Copyright (c) 1994, 1995 Carnegie-Mellon University.
+ * All rights reserved.
+ *
+ * Author: Chris G. Demetriou
+ *
+ * Permission to use, copy, modify and distribute this software and
+ * its documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
+ * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ */
+
+/*
+ * Modified for NetBSD/mips by Jason R. Thorpe, Numerical Aerospace
+ * Simulation Facility, NASA Ames Research Center.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+__RCSID("$NetBSD: kvm_mips.c,v 1.22 2014/02/19 20:21:22 dsl Exp $");
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * MIPS machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <machine/kcore.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+#include <mips/cpuregs.h>
+#include <mips/vmparam.h>
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+
+ /* Not actually used for anything right now, but safe. */
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return (0);
+}
+
+/*
+ * Translate a kernel virtual address to a physical address.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ int page_off;
+ u_int pte;
+ paddr_t pte_pa;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return((off_t)0);
+ }
+
+ cpu_kh = kd->cpu_data;
+ page_off = va & PGOFSET;
+
+#ifdef _LP64
+ if (MIPS_XKPHYS_P(va)) {
+ /*
+ * Direct-mapped cached address: just convert it.
+ */
+ *pa = MIPS_XKPHYS_TO_PHYS(va);
+ return (NBPG - page_off);
+ }
+
+ if (va < MIPS_XKPHYS_START) {
+ /*
+ * XUSEG (user virtual address space) - invalid.
+ */
+ _kvm_err(kd, 0, "invalid kernel virtual address");
+ goto lose;
+ }
+#else
+ if (va < MIPS_KSEG0_START) {
+ /*
+ * KUSEG (user virtual address space) - invalid.
+ */
+ _kvm_err(kd, 0, "invalid kernel virtual address");
+ goto lose;
+ }
+#endif
+
+ if (MIPS_KSEG0_P(va)) {
+ /*
+ * Direct-mapped cached address: just convert it.
+ */
+ *pa = MIPS_KSEG0_TO_PHYS(va);
+ return (NBPG - page_off);
+ }
+
+ if (MIPS_KSEG1_P(va)) {
+ /*
+ * Direct-mapped uncached address: just convert it.
+ */
+ *pa = MIPS_KSEG1_TO_PHYS(va);
+ return (NBPG - page_off);
+ }
+
+#ifdef _LP64
+ if (va >= MIPS_KSEG2_START) {
+ /*
+ * KUSEG (user virtual address space) - invalid.
+ */
+ _kvm_err(kd, 0, "invalid kernel virtual address");
+ goto lose;
+ }
+#endif
+
+ /*
+ * We now know that we're a KSEG2 (kernel virtually mapped)
+ * address. Translate the address using the pmap's kernel
+ * page table.
+ */
+
+ /*
+ * Step 1: Make sure the kernel page table has a translation
+ * for the address.
+ */
+#ifdef _LP64
+ if (va >= (MIPS_XKSEG_START + (cpu_kh->sysmapsize * NBPG))) {
+ _kvm_err(kd, 0, "invalid XKSEG address");
+ goto lose;
+ }
+#else
+ if (va >= (MIPS_KSEG2_START + (cpu_kh->sysmapsize * NBPG))) {
+ _kvm_err(kd, 0, "invalid KSEG2 address");
+ goto lose;
+ }
+#endif
+
+ /*
+ * Step 2: Locate and read the PTE.
+ */
+ pte_pa = cpu_kh->sysmappa +
+ (((va - MIPS_KSEG2_START) >> PGSHIFT) * sizeof(u_int));
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte),
+ _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read PTE");
+ goto lose;
+ }
+
+ /*
+ * Step 3: Validate the PTE and return the physical address.
+ */
+ if ((pte & cpu_kh->pg_v) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PTE)");
+ goto lose;
+ }
+ *pa = (((pte & cpu_kh->pg_frame) >> cpu_kh->pg_shift) << PGSHIFT) +
+ page_off;
+ return (NBPG - page_off);
+
+ lose:
+ *pa = -1;
+ return (0);
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ramsegs;
+ off_t off;
+ int i;
+
+ cpu_kh = kd->cpu_data;
+ ramsegs = (phys_ram_seg_t *)((char *)cpu_kh + ALIGN(sizeof *cpu_kh));
+
+ off = 0;
+ for (i = 0; i < cpu_kh->nmemsegs; i++) {
+ if (pa >= ramsegs[i].start &&
+ (pa - ramsegs[i].start) < ramsegs[i].size) {
+ off += (pa - ramsegs[i].start);
+ break;
+ }
+ off += ramsegs[i].size;
+ }
+
+ return (kd->dump_off + off);
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+.\" $NetBSD: kvm_nlist.3,v 1.11 2003/08/07 16:44:37 agc Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm_nlist.3 8.1 (Berkeley) 6/4/93
+.\"
+.Dd May 11, 2003
+.Dt KVM_NLIST 3
+.Os
+.Sh NAME
+.Nm kvm_nlist
+.Nd retrieve symbol table names from a kernel image
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.In nlist.h
+.Ft int
+.Fn kvm_nlist "kvm_t *kd" "struct nlist *nl"
+.Sh DESCRIPTION
+.Fn kvm_nlist
+retrieves the symbol table entries indicated by the name list argument
+.Fa \&nl .
+This argument points to an array of nlist structures, terminated by
+an entry whose n_name field is
+.Dv NULL
+(see
+.Xr nlist 3 ) .
+Each symbol is looked up using the n_name field, and if found, the
+corresponding n_type and n_value fields are filled in.
+These fields are set to 0 if the symbol is not found.
+.Pp
+If
+.Fa \&kd
+was created by a call to
+.Fn kvm_open
+with a
+.Dv NULL
+executable image name,
+.Fn kvm_nlist
+will use
+.Pa /dev/ksyms
+to retrieve the kernel symbol table.
+.Sh RETURN VALUES
+The
+.Fn kvm_nlist
+function returns the number of invalid entries found.
+If the kernel symbol table was unreadable, -1 is returned.
+.Sh FILES
+.Bl -tag -width /dev/ksyms -compact
+.It Pa /dev/ksyms
+.El
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3 ,
+.Xr ksyms 4
--- /dev/null
+.\" $NetBSD: kvm_open.3,v 1.18 2011/09/12 21:11:32 christos Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm_open.3 8.3 (Berkeley) 4/19/94
+.\"
+.Dd September 14, 2011
+.Dt KVM_OPEN 3
+.Os
+.Sh NAME
+.Nm kvm_open ,
+.Nm kvm_openfiles ,
+.Nm kvm_close
+.Nd initialize kernel virtual memory access
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In fcntl.h
+.In kvm.h
+.Ft kvm_t *
+.Fn kvm_open "const char *execfile" "const char *corefile" "char *swapfile" "int flags" "const char *errstr"
+.Ft kvm_t *
+.Fn kvm_openfiles "const char *execfile" "const char *corefile" "char *swapfile" "int flags" "char *errbuf"
+.Ft int
+.Fn kvm_close "kvm_t *kd"
+.Sh DESCRIPTION
+The functions
+.Fn kvm_open
+and
+.Fn kvm_openfiles
+return a descriptor used to access kernel virtual memory
+via the
+.Xr kvm 3
+library routines.
+Both active kernels and crash dumps are accessible
+through this interface.
+.Pp
+.Fa execfile
+is the executable image of the kernel being examined.
+This file must contain a symbol table.
+If this argument is
+.Dv NULL ,
+the currently running system is assumed; in this case, the functions will
+attempt to use the
+.Xr ksyms 4
+device indicated by
+.Dv _PATH_KSYMS
+in
+.In paths.h ;
+if that fails, then they will use the file indicated by the
+.Xr sysctl 3
+variable
+.Va machdep.booted_kernel ,
+or (if the sysctl information is not available)
+the default kernel path indicated by
+.Dv _PATH_UNIX
+in
+.In paths.h .
+.Pp
+.Fa corefile
+is the kernel memory device file.
+It can be either
+.Pa /dev/mem
+or a crash dump core generated by
+.Xr savecore 8 .
+If
+.Fa corefile
+is
+.Dv NULL ,
+the default indicated by
+.Dv _PATH_MEM
+from
+.In paths.h
+is used.
+.Pp
+.Fa swapfile
+should indicate the swap device.
+If
+.Dv NULL ,
+.Dv _PATH_DRUM
+from
+.In paths.h
+is used.
+.Pp
+The
+.Fa flags
+argument indicates read/write access as in
+.Xr open 2
+and applies only to the core file.
+The only permitted flags from
+.Xr open 2
+are
+.Dv O_RDONLY ,
+.Dv O_WRONLY ,
+and
+.Dv O_RDWR .
+.Pp
+As a special case, a
+.Fa flags
+argument of
+.Dv KVM_NO_FILES
+will initialize the
+.Xr kvm 3
+library for use on active kernels only using
+.Xr sysctl 3
+for retrieving kernel data and ignores the
+.Fa execfile ,
+.Fa corefile
+and
+.Fa swapfile
+arguments.
+Only a small subset of the
+.Xr kvm 3
+library functions are available using this method.
+These are currently
+.Xr kvm_getproc2 3 ,
+.Xr kvm_getargv2 3
+and
+.Xr kvm_getenvv2 3 .
+.Pp
+There are two open routines which differ only with respect to
+the error mechanism.
+One provides backward compatibility with the SunOS kvm library, while the
+other provides an improved error reporting framework.
+.Pp
+The
+.Fn kvm_open
+function is the Sun kvm compatible open call.
+Here, the
+.Fa errstr
+argument indicates how errors should be handled.
+If it is
+.Dv NULL ,
+no errors are reported and the application cannot know the
+specific nature of the failed kvm call.
+If it is not
+.Dv NULL ,
+errors are printed to stderr with
+.Fa errstr
+prepended to the message, as in
+.Xr perror 3 .
+Normally, the name of the program is used here.
+The string is assumed to persist at least until the corresponding
+.Fn kvm_close
+call.
+.Pp
+The
+.Fn kvm_openfiles
+function provides
+.Bx
+style error reporting.
+Here, error messages are not printed out by the library.
+Instead, the application obtains the error message
+corresponding to the most recent kvm library call using
+.Fn kvm_geterr
+(see
+.Xr kvm_geterr 3 ) .
+The results are undefined if the most recent kvm call did not produce
+an error.
+Since
+.Fn kvm_geterr
+requires a kvm descriptor, but the open routines return
+.Dv NULL
+on failure,
+.Fn kvm_geterr
+cannot be used to get the error message if open fails.
+Thus,
+.Fn kvm_openfiles
+will place any error message in the
+.Fa errbuf
+argument.
+This buffer should be _POSIX2_LINE_MAX characters large (from
+.In limits.h ) .
+.Sh RETURN VALUES
+The
+.Fn kvm_open
+and
+.Fn kvm_openfiles
+functions both return a descriptor to be used
+in all subsequent kvm library calls.
+The library is fully re-entrant.
+On failure,
+.Dv NULL
+is returned, in which case
+.Fn kvm_openfiles
+writes the error message into
+.Fa errbuf .
+.Pp
+The
+.Fn kvm_close
+function returns 0 on success and -1 on failure.
+.Sh SEE ALSO
+.Xr open 2 ,
+.Xr kvm 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getkernelname 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_read 3 ,
+.Xr kvm_write 3
+.Sh BUGS
+There should not be two open calls.
+The ill-defined error semantics of the Sun library
+and the desire to have a backward-compatible library for
+.Bx
+left little choice.
--- /dev/null
+/* $NetBSD: kvm_or1k.c,v 1.1 2014/09/03 19:34:26 matt Exp $ */
+
+/*-
+ * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OR1K machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/types.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <db.h>
+#include <limits.h>
+#include <kvm.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "kvm_private.h"
+
+#include <sys/kcore.h>
+#include <machine/kcore.h>
+#include <machine/vmparam.h>
+
+__RCSID("$NetBSD: kvm_or1k.c,v 1.1 2014/09/03 19:34:26 matt Exp $");
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return 0;
+}
+
+/*
+ * Translate a KVA to a PA
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+// cpu_kcore_hdr_t *cpu_kh;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return 0;
+ }
+
+ /* No hit -- no translation */
+ *pa = (u_long)~0UL;
+ return 0;
+}
+
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ram;
+ off_t off;
+ void *e;
+
+ cpu_kh = kd->cpu_data;
+ e = (char *) kd->cpu_data + kd->cpu_dsize;
+ ram = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
+ off = kd->dump_off;
+ do {
+ if (pa >= ram->start && (pa - ram->start) < ram->size) {
+ return off + (pa - ram->start);
+ }
+ ram++;
+ off += ram->size;
+ } while ((void *) ram < e && ram->size);
+
+ _kvm_err(kd, 0, "pa2off failed for pa %#" PRIxPADDR "\n", pa);
+ return (off_t) -1;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_powerpc.c,v 1.13 2014/01/27 21:00:01 matt Exp $ */
+
+/*
+ * Copyright (c) 2005 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Allen Briggs for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*-
+ * Copyright (C) 1996 Wolfgang Solfrank.
+ * Copyright (C) 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PowerPC machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/types.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <db.h>
+#include <limits.h>
+#include <kvm.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "kvm_private.h"
+
+#include <sys/kcore.h>
+#include <machine/kcore.h>
+
+#include <powerpc/spr.h>
+#include <powerpc/oea/spr.h>
+#include <powerpc/oea/bat.h>
+#include <powerpc/oea/pte.h>
+
+__RCSID("$NetBSD: kvm_powerpc.c,v 1.13 2014/01/27 21:00:01 matt Exp $");
+
+static int _kvm_match_601bat(kvm_t *, vaddr_t, paddr_t *, int *);
+static int _kvm_match_bat(kvm_t *, vaddr_t, paddr_t *, int *);
+static int _kvm_match_sr(kvm_t *, vaddr_t, paddr_t *, int *);
+static struct pte *_kvm_scan_pteg(struct pteg *, uint32_t, uint32_t, int);
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return 0;
+}
+
+#define BAT601_SIZE(b) ((((b) << 17) | ~BAT601_BLPI) + 1)
+
+static int
+_kvm_match_601bat(kvm_t *kd, vaddr_t va, paddr_t *pa, int *off)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ u_long pgoff;
+ size_t size;
+ int i, nbat;
+
+ cpu_kh = kd->cpu_data;
+ nbat = 4;
+ for (i=0 ; i<nbat ; i++) {
+ if (!BAT601_VALID_P(cpu_kh->dbatu[i]))
+ continue;
+ if (BAT601_VA_MATCH_P(cpu_kh->dbatu[i], cpu_kh->dbatl[i], va)) {
+ size = BAT601_SIZE(cpu_kh->dbatu[i] & BAT601_BSM);
+ pgoff = va & (size-1);
+ *pa = (cpu_kh->dbatl[i] & BAT601_PBN) + pgoff;
+ *off = size - pgoff;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+#undef BAT601_SIZE
+
+#define BAT_SIZE(b) ((((b) << 15) | ~BAT_EPI) + 1)
+
+static int
+_kvm_match_bat(kvm_t *kd, vaddr_t va, paddr_t *pa, int *off)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ u_long pgoff;
+ size_t size;
+ int i, nbat;
+
+ cpu_kh = kd->cpu_data;
+ /*
+ * Assume that we're looking for data and check only the dbats.
+ */
+ nbat = 8;
+ for (i=0 ; i<nbat ; i++) {
+ if ( ((cpu_kh->dbatu[i] & BAT_Vs) != 0)
+ && (BAT_VA_MATCH_P(cpu_kh->dbatu[i], va))) {
+ size = BAT_SIZE(cpu_kh->dbatu[i] & BAT_BL);
+ pgoff = va & (size-1);
+ *pa = (cpu_kh->dbatl[i] & BAT_RPN) + pgoff;
+ *off = size - pgoff;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+#undef BAT_SIZE
+
+#define SR_VSID_HASH_MASK 0x0007ffff
+
+static struct pte *
+_kvm_scan_pteg(struct pteg *pteg, uint32_t vsid, uint32_t api, int secondary)
+{
+ struct pte *pte;
+ u_long ptehi;
+ int i;
+
+ for (i=0 ; i<8 ; i++) {
+ pte = &pteg->pt[i];
+ ptehi = (u_long) pte->pte_hi;
+ if ((ptehi & PTE_VALID) == 0)
+ continue;
+ if ((ptehi & PTE_HID) != secondary)
+ continue;
+ if (((ptehi & PTE_VSID) >> PTE_VSID_SHFT) != vsid)
+ continue;
+ if (((ptehi & PTE_API) >> PTE_API_SHFT) != api)
+ continue;
+ return pte;
+ }
+ return NULL;
+}
+
+#define HASH_MASK 0x0007ffff
+
+static int
+_kvm_match_sr(kvm_t *kd, vaddr_t va, paddr_t *pa, int *off)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ struct pteg pteg;
+ struct pte *pte;
+ uint32_t sr, pgoff, vsid, pgidx, api, hash;
+ uint32_t htaborg, htabmask, mhash;
+ paddr_t pteg_vaddr;
+
+ cpu_kh = kd->cpu_data;
+
+ sr = cpu_kh->sr[(va >> 28) & 0xf];
+ if ((sr & SR_TYPE) != 0) {
+ /* Direct-store segment (shouldn't be) */
+ return 0;
+ }
+
+ pgoff = va & ADDR_POFF;
+ vsid = sr & SR_VSID;
+ pgidx = (va & ADDR_PIDX) >> ADDR_PIDX_SHFT;
+ api = pgidx >> 10;
+ hash = (vsid & HASH_MASK) ^ pgidx;
+
+ htaborg = cpu_kh->sdr1 & 0xffff0000;
+ htabmask = cpu_kh->sdr1 & 0x1ff;
+
+ mhash = (hash >> 10) & htabmask;
+
+ pteg_vaddr = ( htaborg & 0xfe000000) | ((hash & 0x3ff) << 6)
+ | ((htaborg & 0x01ff0000) | (mhash << 16));
+
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pteg, sizeof(pteg),
+ _kvm_pa2off(kd, pteg_vaddr)) != sizeof(pteg)) {
+ _kvm_syserr(kd, 0, "could not read primary PTEG");
+ return 0;
+ }
+
+ if ((pte = _kvm_scan_pteg(&pteg, vsid, api, 0)) != NULL) {
+ *pa = (pte->pte_lo & PTE_RPGN) | pgoff;
+ *off = NBPG - pgoff;
+ return 1;
+ }
+
+ hash = (~hash) & HASH_MASK;
+ mhash = (hash >> 10) & htabmask;
+
+ pteg_vaddr = ( htaborg & 0xfe000000) | ((hash & 0x3ff) << 6)
+ | ((htaborg & 0x01ff0000) | (mhash << 16));
+
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pteg, sizeof(pteg),
+ _kvm_pa2off(kd, pteg_vaddr)) != sizeof(pteg)) {
+ _kvm_syserr(kd, 0, "could not read secondary PTEG");
+ return 0;
+ }
+
+ if ((pte = _kvm_scan_pteg(&pteg, vsid, api, 0)) != NULL) {
+ *pa = (pte->pte_lo & PTE_RPGN) | pgoff;
+ *off = NBPG - pgoff;
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Translate a KVA to a PA
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ int offs;
+ uint32_t pvr;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return 0;
+ }
+
+ cpu_kh = kd->cpu_data;
+
+ pvr = (cpu_kh->pvr >> 16);
+ if (MPC745X_P(pvr))
+ pvr = MPC7450;
+
+ switch (pvr) {
+ case MPC601:
+ /* Check for a BAT hit first */
+ if (_kvm_match_601bat(kd, va, pa, &offs)) {
+ return offs;
+ }
+
+ /* No BAT hit; check page tables */
+ if (_kvm_match_sr(kd, va, pa, &offs)) {
+ return offs;
+ }
+ break;
+
+ case MPC603:
+ case MPC603e:
+ case MPC603ev:
+ case MPC604:
+ case MPC604ev:
+ case MPC750:
+ case IBM750FX:
+ case MPC7400:
+ case MPC7450:
+ case MPC7410:
+ case MPC8240:
+ case MPC8245:
+ /* Check for a BAT hit first */
+ if (_kvm_match_bat(kd, va, pa, &offs)) {
+ return offs;
+ }
+
+ /* No BAT hit; check page tables */
+ if (_kvm_match_sr(kd, va, pa, &offs)) {
+ return offs;
+ }
+ break;
+
+ default:
+ _kvm_err(kd, 0, "Unsupported CPU type (pvr 0x%08lx)!",
+ (unsigned long) cpu_kh->pvr);
+ break;
+ }
+
+ /* No hit -- no translation */
+ *pa = (paddr_t)~0UL;
+ return 0;
+}
+
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ram;
+ off_t off;
+ void *e;
+
+ cpu_kh = kd->cpu_data;
+ e = (char *) kd->cpu_data + kd->cpu_dsize;
+ ram = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
+ off = kd->dump_off;
+ do {
+ if (pa >= ram->start && (pa - ram->start) < ram->size) {
+ return off + (pa - ram->start);
+ }
+ ram++;
+ off += ram->size;
+ } while ((void *) ram < e && ram->size);
+
+ _kvm_err(kd, 0, "pa2off failed for pa %#" PRIxPADDR "\n", pa);
+ return (off_t) -1;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+ uintptr_t max_uva;
+ extern struct ps_strings *__ps_strings;
+
+#if 0 /* XXX - These vary across powerpc machines... */
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+#endif
+ /* This is somewhat hack-ish, but it works. */
+ max_uva = (uintptr_t) (__ps_strings + 1);
+ kd->usrstack = max_uva;
+ kd->max_uva = max_uva;
+ kd->min_uva = 0;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_powerpc64.c,v 1.6 2014/08/23 02:25:23 matt Exp $ */
+
+/*
+ * Copyright (c) 2005 Wasabi Systems, Inc.
+ * All rights reserved.
+ *
+ * Written by Allen Briggs for Wasabi Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed for the NetBSD Project by
+ * Wasabi Systems, Inc.
+ * 4. The name of Wasabi Systems, Inc. may not be used to endorse
+ * or promote products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/*-
+ * Copyright (C) 1996 Wolfgang Solfrank.
+ * Copyright (C) 1996 TooLs GmbH.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by TooLs GmbH.
+ * 4. The name of TooLs GmbH may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PowerPC machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/types.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <db.h>
+#include <limits.h>
+#include <kvm.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "kvm_private.h"
+
+#include <sys/kcore.h>
+#include <machine/kcore.h>
+
+#include <powerpc/spr.h>
+#include <powerpc/oea/bat.h>
+#include <powerpc/oea/pte.h>
+
+__RCSID("$NetBSD: kvm_powerpc64.c,v 1.6 2014/08/23 02:25:23 matt Exp $");
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return 0;
+}
+
+#define SR_VSID_HASH_MASK 0x0007ffff
+
+#define HASH_MASK 0x0007ffff
+
+/*
+ * Translate a KVA to a PA
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return 0;
+ }
+
+#if 0
+ cpu_kcore_hdr_t *cpu_kh = kd->cpu_data;
+ uint32_t pvr = (cpu_kh->pvr >> 16);
+#endif
+
+
+ /* No hit -- no translation */
+ *pa = (u_long)~0UL;
+ return 0;
+}
+
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ram;
+ off_t off;
+ void *e;
+
+ cpu_kh = kd->cpu_data;
+ e = (char *) kd->cpu_data + kd->cpu_dsize;
+ ram = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
+ off = kd->dump_off;
+ do {
+ if (pa >= ram->start && (pa - ram->start) < ram->size) {
+ return off + (pa - ram->start);
+ }
+ ram++;
+ off += ram->size;
+ } while ((void *) ram < e && ram->size);
+
+ _kvm_err(kd, 0, "pa2off failed for pa 0x%08lx\n", pa);
+ return (off_t) -1;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+ uintptr_t max_uva;
+ extern struct ps_strings *__ps_strings;
+
+#if 0 /* XXX - These vary across powerpc machines... */
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+#endif
+ /* This is somewhat hack-ish, but it works. */
+ max_uva = (uintptr_t) (__ps_strings + 1);
+ kd->usrstack = max_uva;
+ kd->max_uva = max_uva;
+ kd->min_uva = 0;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_private.h,v 1.20 2011/09/12 21:11:32 christos Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kvm_private.h 8.1 (Berkeley) 6/4/93
+ */
+
+struct __kvm {
+ /*
+ * a string to be prepended to error messages
+ * provided for compatibility with sun's interface
+ * if this value is null, errors are saved in errbuf[]
+ */
+ const char *program;
+ char *errp; /* XXX this can probably go away */
+ char errbuf[_POSIX2_LINE_MAX];
+ int pmfd; /* physical memory file (or crash dump) */
+ int vmfd; /* virtual memory file (-1 if crash dump) */
+ int swfd; /* swap file (e.g., /dev/drum) */
+ int nlfd; /* namelist file (e.g., /vmunix) */
+ char alive; /* live kernel? */
+ struct kinfo_proc *procbase;
+ struct kinfo_proc2 *procbase2;
+ struct kinfo_lwp *lwpbase;
+ size_t procbase_len;
+ size_t procbase2_len;
+ size_t lwpbase_len;
+ u_long usrstack; /* address of end of user stack */
+ u_long min_uva, max_uva; /* min/max user virtual address */
+ int nbpg; /* page size */
+ char *swapspc; /* (dynamic) storage for swapped pages */
+ char *argspc, *argbuf; /* (dynamic) storage for argv strings */
+ size_t argspc_len; /* length of the above */
+ char **argv; /* (dynamic) storage for argv pointers */
+ int argc; /* length of above (not actual # present) */
+
+ /*
+ * Header structures for kernel dumps. Only gets filled in for
+ * dead kernels.
+ */
+ struct kcore_hdr *kcore_hdr;
+ size_t cpu_dsize;
+ void *cpu_data;
+ off_t dump_off; /* Where the actual dump starts */
+
+ /*
+ * Kernel virtual address translation state. This only gets filled
+ * in for dead kernels; otherwise, the running kernel (i.e. kmem)
+ * will do the translations for us. It could be big, so we
+ * only allocate it if necessary.
+ */
+ struct vmstate *vmst; /* XXX: should become obsoleted */
+ /*
+ * These kernel variables are used for looking up user addresses,
+ * and are cached for efficiency.
+ */
+ struct pglist *vm_page_buckets;
+ int vm_page_hash_mask;
+ /* Buffer for raw disk I/O. */
+ size_t fdalign;
+ uint8_t *iobuf;
+ size_t iobufsz;
+ char kernelname[MAXPATHLEN];
+};
+
+/* Levels of aliveness */
+#define KVM_ALIVE_DEAD 0 /* dead, working from core file */
+#define KVM_ALIVE_FILES 1 /* alive, working from open kmem/drum */
+#define KVM_ALIVE_SYSCTL 2 /* alive, sysctl-type calls only */
+
+#define ISALIVE(kd) ((kd)->alive != KVM_ALIVE_DEAD)
+#define ISKMEM(kd) ((kd)->alive == KVM_ALIVE_FILES)
+#define ISSYSCTL(kd) ((kd)->alive == KVM_ALIVE_SYSCTL || ISKMEM(kd))
+
+/*
+ * Functions used internally by kvm, but across kvm modules.
+ */
+void _kvm_err(kvm_t *, const char *, const char *, ...)
+ __attribute__((__format__(__printf__, 3, 4)));
+int _kvm_dump_mkheader(kvm_t *, kvm_t *);
+void _kvm_freeprocs(kvm_t *);
+void _kvm_freevtop(kvm_t *);
+int _kvm_mdopen(kvm_t *);
+int _kvm_initvtop(kvm_t *);
+int _kvm_kvatop(kvm_t *, vaddr_t, paddr_t *);
+void *_kvm_malloc(kvm_t *, size_t);
+off_t _kvm_pa2off(kvm_t *, paddr_t);
+void *_kvm_realloc(kvm_t *, void *, size_t);
+void _kvm_syserr(kvm_t *, const char *, const char *, ...)
+ __attribute__((__format__(__printf__, 3, 4)));
+ssize_t _kvm_pread(kvm_t *, int, void *, size_t, off_t);
+
+#define KREAD(kd, addr, obj) \
+ (kvm_read(kd, addr, (obj), sizeof(*obj)) != sizeof(*obj))
+
+#define KVM_ALLOC(kd, member, size) \
+ do { \
+ if (kd->member == NULL) \
+ kd->member = _kvm_malloc(kd, kd->member ## _len = size); \
+ else if (kd->member ## _len < size) \
+ kd->member = _kvm_realloc(kd, kd->member, \
+ kd->member ## _len = size); \
+ if (kd->member == NULL) { \
+ kd->member ## _len = 0; \
+ return (NULL); \
+ } \
+ } while (/*CONSTCOND*/0)
--- /dev/null
+/* $NetBSD: kvm_proc.c,v 1.90 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1998 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Charles M. Hannum.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93";
+#else
+__RCSID("$NetBSD: kvm_proc.c,v 1.90 2014/02/19 20:21:22 dsl Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * Proc traversal interface for kvm. ps and w are (probably) the exclusive
+ * users of this code, so we've factored it out into a separate module.
+ * Thus, we keep this grunge out of the other kvm applications (i.e.,
+ * most other applications are interested only in open/close/read/nlist).
+ */
+
+#include <sys/param.h>
+#include <sys/lwp.h>
+#include <sys/proc.h>
+#include <sys/exec.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/tty.h>
+#include <sys/resourcevar.h>
+#include <sys/mutex.h>
+#include <sys/specificdata.h>
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+#include <uvm/uvm_param.h>
+#include <uvm/uvm_amap.h>
+#include <uvm/uvm_page.h>
+
+#include <sys/sysctl.h>
+
+#include <limits.h>
+#include <db.h>
+#include <paths.h>
+
+#include "kvm_private.h"
+
+/*
+ * Common info from kinfo_proc and kinfo_proc2 used by helper routines.
+ */
+struct miniproc {
+ struct vmspace *p_vmspace;
+ char p_stat;
+ struct proc *p_paddr;
+ pid_t p_pid;
+};
+
+/*
+ * Convert from struct proc and kinfo_proc{,2} to miniproc.
+ */
+#define PTOMINI(kp, p) \
+ do { \
+ (p)->p_stat = (kp)->p_stat; \
+ (p)->p_pid = (kp)->p_pid; \
+ (p)->p_paddr = NULL; \
+ (p)->p_vmspace = (kp)->p_vmspace; \
+ } while (/*CONSTCOND*/0);
+
+#define KPTOMINI(kp, p) \
+ do { \
+ (p)->p_stat = (kp)->kp_proc.p_stat; \
+ (p)->p_pid = (kp)->kp_proc.p_pid; \
+ (p)->p_paddr = (kp)->kp_eproc.e_paddr; \
+ (p)->p_vmspace = (kp)->kp_proc.p_vmspace; \
+ } while (/*CONSTCOND*/0);
+
+#define KP2TOMINI(kp, p) \
+ do { \
+ (p)->p_stat = (kp)->p_stat; \
+ (p)->p_pid = (kp)->p_pid; \
+ (p)->p_paddr = (void *)(long)(kp)->p_paddr; \
+ (p)->p_vmspace = (void *)(long)(kp)->p_vmspace; \
+ } while (/*CONSTCOND*/0);
+
+/*
+ * NetBSD uses kauth(9) to manage credentials, which are stored in kauth_cred_t,
+ * a kernel-only opaque type. This is an embedded version which is *INTERNAL* to
+ * kvm(3) so dumps can be read properly.
+ *
+ * Whenever NetBSD starts exporting credentials to userland consistently (using
+ * 'struct uucred', or something) this will have to be updated again.
+ */
+struct kvm_kauth_cred {
+ u_int cr_refcnt; /* reference count */
+ uint8_t cr_pad[CACHE_LINE_SIZE - sizeof(u_int)];
+ uid_t cr_uid; /* user id */
+ uid_t cr_euid; /* effective user id */
+ uid_t cr_svuid; /* saved effective user id */
+ gid_t cr_gid; /* group id */
+ gid_t cr_egid; /* effective group id */
+ gid_t cr_svgid; /* saved effective group id */
+ u_int cr_ngroups; /* number of groups */
+ gid_t cr_groups[NGROUPS]; /* group memberships */
+ specificdata_reference cr_sd; /* specific data */
+};
+
+/* XXX: What uses these two functions? */
+char *_kvm_uread(kvm_t *, const struct proc *, u_long, u_long *);
+ssize_t kvm_uread(kvm_t *, const struct proc *, u_long, char *,
+ size_t);
+
+static char *_kvm_ureadm(kvm_t *, const struct miniproc *, u_long,
+ u_long *);
+static ssize_t kvm_ureadm(kvm_t *, const struct miniproc *, u_long,
+ char *, size_t);
+
+static char **kvm_argv(kvm_t *, const struct miniproc *, u_long, int, int);
+static int kvm_deadprocs(kvm_t *, int, int, u_long, u_long, int);
+static char **kvm_doargv(kvm_t *, const struct miniproc *, int,
+ void (*)(struct ps_strings *, u_long *, int *));
+static char **kvm_doargv2(kvm_t *, pid_t, int, int);
+static int kvm_proclist(kvm_t *, int, int, struct proc *,
+ struct kinfo_proc *, int);
+static int proc_verify(kvm_t *, u_long, const struct miniproc *);
+static void ps_str_a(struct ps_strings *, u_long *, int *);
+static void ps_str_e(struct ps_strings *, u_long *, int *);
+
+
+static char *
+_kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long va, u_long *cnt)
+{
+ u_long addr, head;
+ u_long offset;
+ struct vm_map_entry vme;
+ struct vm_amap amap;
+ struct vm_anon *anonp, anon;
+ struct vm_page pg;
+ u_long slot;
+
+ if (kd->swapspc == NULL) {
+ kd->swapspc = _kvm_malloc(kd, (size_t)kd->nbpg);
+ if (kd->swapspc == NULL)
+ return (NULL);
+ }
+
+ /*
+ * Look through the address map for the memory object
+ * that corresponds to the given virtual address.
+ * The header just has the entire valid range.
+ */
+ head = (u_long)&p->p_vmspace->vm_map.header;
+ addr = head;
+ for (;;) {
+ if (KREAD(kd, addr, &vme))
+ return (NULL);
+
+ if (va >= vme.start && va < vme.end &&
+ vme.aref.ar_amap != NULL)
+ break;
+
+ addr = (u_long)vme.next;
+ if (addr == head)
+ return (NULL);
+ }
+
+ /*
+ * we found the map entry, now to find the object...
+ */
+ if (vme.aref.ar_amap == NULL)
+ return (NULL);
+
+ addr = (u_long)vme.aref.ar_amap;
+ if (KREAD(kd, addr, &amap))
+ return (NULL);
+
+ offset = va - vme.start;
+ slot = offset / kd->nbpg + vme.aref.ar_pageoff;
+ /* sanity-check slot number */
+ if (slot > amap.am_nslot)
+ return (NULL);
+
+ addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
+ if (KREAD(kd, addr, &anonp))
+ return (NULL);
+
+ addr = (u_long)anonp;
+ if (KREAD(kd, addr, &anon))
+ return (NULL);
+
+ addr = (u_long)anon.an_page;
+ if (addr) {
+ if (KREAD(kd, addr, &pg))
+ return (NULL);
+
+ if (_kvm_pread(kd, kd->pmfd, kd->swapspc, (size_t)kd->nbpg,
+ (off_t)pg.phys_addr) != kd->nbpg)
+ return (NULL);
+ } else {
+ if (kd->swfd < 0 ||
+ _kvm_pread(kd, kd->swfd, kd->swapspc, (size_t)kd->nbpg,
+ (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg)
+ return (NULL);
+ }
+
+ /* Found the page. */
+ offset %= kd->nbpg;
+ *cnt = kd->nbpg - offset;
+ return (&kd->swapspc[(size_t)offset]);
+}
+
+char *
+_kvm_uread(kvm_t *kd, const struct proc *p, u_long va, u_long *cnt)
+{
+ struct miniproc mp;
+
+ PTOMINI(p, &mp);
+ return (_kvm_ureadm(kd, &mp, va, cnt));
+}
+
+/*
+ * Convert credentials located in kernel space address 'cred' and store
+ * them in the appropriate members of 'eproc'.
+ */
+static int
+_kvm_convertcred(kvm_t *kd, u_long cred, struct eproc *eproc)
+{
+ struct kvm_kauth_cred kauthcred;
+ struct ki_pcred *pc = &eproc->e_pcred;
+ struct ki_ucred *uc = &eproc->e_ucred;
+
+ if (KREAD(kd, cred, &kauthcred) != 0)
+ return (-1);
+
+ /* inlined version of kauth_cred_to_pcred, see kauth(9). */
+ pc->p_ruid = kauthcred.cr_uid;
+ pc->p_svuid = kauthcred.cr_svuid;
+ pc->p_rgid = kauthcred.cr_gid;
+ pc->p_svgid = kauthcred.cr_svgid;
+ pc->p_refcnt = kauthcred.cr_refcnt;
+ pc->p_pad = NULL;
+
+ /* inlined version of kauth_cred_to_ucred(), see kauth(9). */
+ uc->cr_ref = kauthcred.cr_refcnt;
+ uc->cr_uid = kauthcred.cr_euid;
+ uc->cr_gid = kauthcred.cr_egid;
+ uc->cr_ngroups = (uint32_t)MIN(kauthcred.cr_ngroups,
+ sizeof(uc->cr_groups) / sizeof(uc->cr_groups[0]));
+ memcpy(uc->cr_groups, kauthcred.cr_groups,
+ uc->cr_ngroups * sizeof(uc->cr_groups[0]));
+
+ return (0);
+}
+
+/*
+ * Read proc's from memory file into buffer bp, which has space to hold
+ * at most maxcnt procs.
+ */
+static int
+kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
+ struct kinfo_proc *bp, int maxcnt)
+{
+ int cnt = 0;
+ int nlwps;
+ struct kinfo_lwp *kl;
+ struct eproc eproc;
+ struct pgrp pgrp;
+ struct session sess;
+ struct tty tty;
+ struct proc proc;
+
+ for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
+ if (KREAD(kd, (u_long)p, &proc)) {
+ _kvm_err(kd, kd->program, "can't read proc at %p", p);
+ return (-1);
+ }
+ if (_kvm_convertcred(kd, (u_long)proc.p_cred, &eproc) != 0) {
+ _kvm_err(kd, kd->program,
+ "can't read proc credentials at %p", p);
+ return (-1);
+ }
+
+ switch (what) {
+
+ case KERN_PROC_PID:
+ if (proc.p_pid != (pid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_UID:
+ if (eproc.e_ucred.cr_uid != (uid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_RUID:
+ if (eproc.e_pcred.p_ruid != (uid_t)arg)
+ continue;
+ break;
+ }
+ /*
+ * We're going to add another proc to the set. If this
+ * will overflow the buffer, assume the reason is because
+ * nprocs (or the proc list) is corrupt and declare an error.
+ */
+ if (cnt >= maxcnt) {
+ _kvm_err(kd, kd->program, "nprocs corrupt");
+ return (-1);
+ }
+ /*
+ * gather eproc
+ */
+ eproc.e_paddr = p;
+ if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
+ _kvm_err(kd, kd->program, "can't read pgrp at %p",
+ proc.p_pgrp);
+ return (-1);
+ }
+ eproc.e_sess = pgrp.pg_session;
+ eproc.e_pgid = pgrp.pg_id;
+ eproc.e_jobc = pgrp.pg_jobc;
+ if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
+ _kvm_err(kd, kd->program, "can't read session at %p",
+ pgrp.pg_session);
+ return (-1);
+ }
+ if ((proc.p_lflag & PL_CONTROLT) && sess.s_ttyp != NULL) {
+ if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
+ _kvm_err(kd, kd->program,
+ "can't read tty at %p", sess.s_ttyp);
+ return (-1);
+ }
+ eproc.e_tdev = (uint32_t)tty.t_dev;
+ eproc.e_tsess = tty.t_session;
+ if (tty.t_pgrp != NULL) {
+ if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
+ _kvm_err(kd, kd->program,
+ "can't read tpgrp at %p",
+ tty.t_pgrp);
+ return (-1);
+ }
+ eproc.e_tpgid = pgrp.pg_id;
+ } else
+ eproc.e_tpgid = -1;
+ } else
+ eproc.e_tdev = (uint32_t)NODEV;
+ eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
+ eproc.e_sid = sess.s_sid;
+ if (sess.s_leader == p)
+ eproc.e_flag |= EPROC_SLEADER;
+ /*
+ * Fill in the old-style proc.p_wmesg by copying the wmesg
+ * from the first available LWP.
+ */
+ kl = kvm_getlwps(kd, proc.p_pid,
+ (u_long)PTRTOUINT64(eproc.e_paddr),
+ sizeof(struct kinfo_lwp), &nlwps);
+ if (kl) {
+ if (nlwps > 0) {
+ strcpy(eproc.e_wmesg, kl[0].l_wmesg);
+ }
+ }
+ (void)kvm_read(kd, (u_long)proc.p_vmspace, &eproc.e_vm,
+ sizeof(eproc.e_vm));
+
+ eproc.e_xsize = eproc.e_xrssize = 0;
+ eproc.e_xccount = eproc.e_xswrss = 0;
+
+ switch (what) {
+
+ case KERN_PROC_PGRP:
+ if (eproc.e_pgid != (pid_t)arg)
+ continue;
+ break;
+
+ case KERN_PROC_TTY:
+ if ((proc.p_lflag & PL_CONTROLT) == 0 ||
+ eproc.e_tdev != (dev_t)arg)
+ continue;
+ break;
+ }
+ memcpy(&bp->kp_proc, &proc, sizeof(proc));
+ memcpy(&bp->kp_eproc, &eproc, sizeof(eproc));
+ ++bp;
+ ++cnt;
+ }
+ return (cnt);
+}
+
+/*
+ * Build proc info array by reading in proc list from a crash dump.
+ * Return number of procs read. maxcnt is the max we will read.
+ */
+static int
+kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc,
+ u_long a_zombproc, int maxcnt)
+{
+ struct kinfo_proc *bp = kd->procbase;
+ int acnt, zcnt;
+ struct proc *p;
+
+ if (KREAD(kd, a_allproc, &p)) {
+ _kvm_err(kd, kd->program, "cannot read allproc");
+ return (-1);
+ }
+ acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
+ if (acnt < 0)
+ return (acnt);
+
+ if (KREAD(kd, a_zombproc, &p)) {
+ _kvm_err(kd, kd->program, "cannot read zombproc");
+ return (-1);
+ }
+ zcnt = kvm_proclist(kd, what, arg, p, bp + acnt,
+ maxcnt - acnt);
+ if (zcnt < 0)
+ zcnt = 0;
+
+ return (acnt + zcnt);
+}
+
+struct kinfo_proc2 *
+kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt)
+{
+ size_t size;
+ int mib[6], st, nprocs;
+ struct pstats pstats;
+
+ if (ISSYSCTL(kd)) {
+ size = 0;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC2;
+ mib[2] = op;
+ mib[3] = arg;
+ mib[4] = (int)esize;
+again:
+ mib[5] = 0;
+ st = sysctl(mib, 6, NULL, &size, NULL, (size_t)0);
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getproc2");
+ return (NULL);
+ }
+
+ mib[5] = (int) (size / esize);
+ KVM_ALLOC(kd, procbase2, size);
+ st = sysctl(mib, 6, kd->procbase2, &size, NULL, (size_t)0);
+ if (st == -1) {
+ if (errno == ENOMEM) {
+ goto again;
+ }
+ _kvm_syserr(kd, kd->program, "kvm_getproc2");
+ return (NULL);
+ }
+ nprocs = (int) (size / esize);
+ } else {
+ char *kp2c;
+ struct kinfo_proc *kp;
+ struct kinfo_proc2 kp2, *kp2p;
+ struct kinfo_lwp *kl;
+ int i, nlwps;
+
+ kp = kvm_getprocs(kd, op, arg, &nprocs);
+ if (kp == NULL)
+ return (NULL);
+
+ size = nprocs * esize;
+ KVM_ALLOC(kd, procbase2, size);
+ kp2c = (char *)(void *)kd->procbase2;
+ kp2p = &kp2;
+ for (i = 0; i < nprocs; i++, kp++) {
+ struct timeval tv;
+
+ kl = kvm_getlwps(kd, kp->kp_proc.p_pid,
+ (u_long)PTRTOUINT64(kp->kp_eproc.e_paddr),
+ sizeof(struct kinfo_lwp), &nlwps);
+
+ if (kl == NULL) {
+ _kvm_syserr(kd, NULL,
+ "kvm_getlwps() failed on process %u\n",
+ kp->kp_proc.p_pid);
+ if (nlwps == 0)
+ return NULL;
+ else
+ continue;
+ }
+
+ /* We use kl[0] as the "representative" LWP */
+ memset(kp2p, 0, sizeof(kp2));
+ kp2p->p_forw = kl[0].l_forw;
+ kp2p->p_back = kl[0].l_back;
+ kp2p->p_paddr = PTRTOUINT64(kp->kp_eproc.e_paddr);
+ kp2p->p_addr = kl[0].l_addr;
+ kp2p->p_fd = PTRTOUINT64(kp->kp_proc.p_fd);
+ kp2p->p_cwdi = PTRTOUINT64(kp->kp_proc.p_cwdi);
+ kp2p->p_stats = PTRTOUINT64(kp->kp_proc.p_stats);
+ kp2p->p_limit = PTRTOUINT64(kp->kp_proc.p_limit);
+ kp2p->p_vmspace = PTRTOUINT64(kp->kp_proc.p_vmspace);
+ kp2p->p_sigacts = PTRTOUINT64(kp->kp_proc.p_sigacts);
+ kp2p->p_sess = PTRTOUINT64(kp->kp_eproc.e_sess);
+ kp2p->p_tsess = 0;
+#if 1 /* XXX: dsl - p_ru was only ever non-zero for zombies */
+ kp2p->p_ru = 0;
+#else
+ kp2p->p_ru = PTRTOUINT64(pstats.p_ru);
+#endif
+
+ kp2p->p_eflag = 0;
+ kp2p->p_exitsig = kp->kp_proc.p_exitsig;
+ kp2p->p_flag = kp->kp_proc.p_flag;
+
+ kp2p->p_pid = kp->kp_proc.p_pid;
+
+ kp2p->p_ppid = kp->kp_eproc.e_ppid;
+ kp2p->p_sid = kp->kp_eproc.e_sid;
+ kp2p->p__pgid = kp->kp_eproc.e_pgid;
+
+ kp2p->p_tpgid = -1 /* XXX NO_PGID! */;
+
+ kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid;
+ kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid;
+ kp2p->p_svuid = kp->kp_eproc.e_pcred.p_svuid;
+ kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid;
+ kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid;
+ kp2p->p_svgid = kp->kp_eproc.e_pcred.p_svgid;
+
+ /*CONSTCOND*/
+ memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups,
+ MIN(sizeof(kp2p->p_groups),
+ sizeof(kp->kp_eproc.e_ucred.cr_groups)));
+ kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups;
+
+ kp2p->p_jobc = kp->kp_eproc.e_jobc;
+ kp2p->p_tdev = kp->kp_eproc.e_tdev;
+ kp2p->p_tpgid = kp->kp_eproc.e_tpgid;
+ kp2p->p_tsess = PTRTOUINT64(kp->kp_eproc.e_tsess);
+
+ kp2p->p_estcpu = 0;
+ bintime2timeval(&kp->kp_proc.p_rtime, &tv);
+ kp2p->p_rtime_sec = (uint32_t)tv.tv_sec;
+ kp2p->p_rtime_usec = (uint32_t)tv.tv_usec;
+ kp2p->p_cpticks = kl[0].l_cpticks;
+ kp2p->p_pctcpu = kp->kp_proc.p_pctcpu;
+ kp2p->p_swtime = kl[0].l_swtime;
+ kp2p->p_slptime = kl[0].l_slptime;
+#if 0 /* XXX thorpej */
+ kp2p->p_schedflags = kp->kp_proc.p_schedflags;
+#else
+ kp2p->p_schedflags = 0;
+#endif
+
+ kp2p->p_uticks = kp->kp_proc.p_uticks;
+ kp2p->p_sticks = kp->kp_proc.p_sticks;
+ kp2p->p_iticks = kp->kp_proc.p_iticks;
+
+ kp2p->p_tracep = PTRTOUINT64(kp->kp_proc.p_tracep);
+ kp2p->p_traceflag = kp->kp_proc.p_traceflag;
+
+ kp2p->p_holdcnt = kl[0].l_holdcnt;
+
+ memcpy(&kp2p->p_siglist,
+ &kp->kp_proc.p_sigpend.sp_set,
+ sizeof(ki_sigset_t));
+ memset(&kp2p->p_sigmask, 0,
+ sizeof(ki_sigset_t));
+ memcpy(&kp2p->p_sigignore,
+ &kp->kp_proc.p_sigctx.ps_sigignore,
+ sizeof(ki_sigset_t));
+ memcpy(&kp2p->p_sigcatch,
+ &kp->kp_proc.p_sigctx.ps_sigcatch,
+ sizeof(ki_sigset_t));
+
+ kp2p->p_stat = kl[0].l_stat;
+ kp2p->p_priority = kl[0].l_priority;
+ kp2p->p_usrpri = kl[0].l_priority;
+ kp2p->p_nice = kp->kp_proc.p_nice;
+
+ kp2p->p_xstat = kp->kp_proc.p_xstat;
+ kp2p->p_acflag = kp->kp_proc.p_acflag;
+
+ /*CONSTCOND*/
+ strncpy(kp2p->p_comm, kp->kp_proc.p_comm,
+ MIN(sizeof(kp2p->p_comm),
+ sizeof(kp->kp_proc.p_comm)));
+
+ strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg,
+ sizeof(kp2p->p_wmesg));
+ kp2p->p_wchan = kl[0].l_wchan;
+ strncpy(kp2p->p_login, kp->kp_eproc.e_login,
+ sizeof(kp2p->p_login));
+
+ kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize;
+ kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize;
+ kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize;
+ kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize;
+ kp2p->p_vm_vsize = kp->kp_eproc.e_vm.vm_map.size
+ / kd->nbpg;
+ /* Adjust mapped size */
+ kp2p->p_vm_msize =
+ (kp->kp_eproc.e_vm.vm_map.size / kd->nbpg) -
+ kp->kp_eproc.e_vm.vm_issize +
+ kp->kp_eproc.e_vm.vm_ssize;
+
+ kp2p->p_eflag = (int32_t)kp->kp_eproc.e_flag;
+
+ kp2p->p_realflag = kp->kp_proc.p_flag;
+ kp2p->p_nlwps = kp->kp_proc.p_nlwps;
+ kp2p->p_nrlwps = kp->kp_proc.p_nrlwps;
+ kp2p->p_realstat = kp->kp_proc.p_stat;
+
+ if (P_ZOMBIE(&kp->kp_proc) ||
+ kp->kp_proc.p_stats == NULL ||
+ KREAD(kd, (u_long)kp->kp_proc.p_stats, &pstats)) {
+ kp2p->p_uvalid = 0;
+ } else {
+ kp2p->p_uvalid = 1;
+
+ kp2p->p_ustart_sec = (u_int32_t)
+ pstats.p_start.tv_sec;
+ kp2p->p_ustart_usec = (u_int32_t)
+ pstats.p_start.tv_usec;
+
+ kp2p->p_uutime_sec = (u_int32_t)
+ pstats.p_ru.ru_utime.tv_sec;
+ kp2p->p_uutime_usec = (u_int32_t)
+ pstats.p_ru.ru_utime.tv_usec;
+ kp2p->p_ustime_sec = (u_int32_t)
+ pstats.p_ru.ru_stime.tv_sec;
+ kp2p->p_ustime_usec = (u_int32_t)
+ pstats.p_ru.ru_stime.tv_usec;
+
+ kp2p->p_uru_maxrss = pstats.p_ru.ru_maxrss;
+ kp2p->p_uru_ixrss = pstats.p_ru.ru_ixrss;
+ kp2p->p_uru_idrss = pstats.p_ru.ru_idrss;
+ kp2p->p_uru_isrss = pstats.p_ru.ru_isrss;
+ kp2p->p_uru_minflt = pstats.p_ru.ru_minflt;
+ kp2p->p_uru_majflt = pstats.p_ru.ru_majflt;
+ kp2p->p_uru_nswap = pstats.p_ru.ru_nswap;
+ kp2p->p_uru_inblock = pstats.p_ru.ru_inblock;
+ kp2p->p_uru_oublock = pstats.p_ru.ru_oublock;
+ kp2p->p_uru_msgsnd = pstats.p_ru.ru_msgsnd;
+ kp2p->p_uru_msgrcv = pstats.p_ru.ru_msgrcv;
+ kp2p->p_uru_nsignals = pstats.p_ru.ru_nsignals;
+ kp2p->p_uru_nvcsw = pstats.p_ru.ru_nvcsw;
+ kp2p->p_uru_nivcsw = pstats.p_ru.ru_nivcsw;
+
+ kp2p->p_uctime_sec = (u_int32_t)
+ (pstats.p_cru.ru_utime.tv_sec +
+ pstats.p_cru.ru_stime.tv_sec);
+ kp2p->p_uctime_usec = (u_int32_t)
+ (pstats.p_cru.ru_utime.tv_usec +
+ pstats.p_cru.ru_stime.tv_usec);
+ }
+
+ memcpy(kp2c, &kp2, esize);
+ kp2c += esize;
+ }
+ }
+ *cnt = nprocs;
+ return (kd->procbase2);
+}
+
+struct kinfo_lwp *
+kvm_getlwps(kvm_t *kd, int pid, u_long paddr, size_t esize, int *cnt)
+{
+ size_t size;
+ int mib[5], nlwps;
+ ssize_t st;
+ struct kinfo_lwp *kl;
+
+ if (ISSYSCTL(kd)) {
+ size = 0;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_LWP;
+ mib[2] = pid;
+ mib[3] = (int)esize;
+ mib[4] = 0;
+again:
+ st = sysctl(mib, 5, NULL, &size, NULL, (size_t)0);
+ if (st == -1) {
+ switch (errno) {
+ case ESRCH: /* Treat this as a soft error; see kvm.c */
+ _kvm_syserr(kd, NULL, "kvm_getlwps");
+ return NULL;
+ default:
+ _kvm_syserr(kd, kd->program, "kvm_getlwps");
+ return NULL;
+ }
+ }
+ mib[4] = (int) (size / esize);
+ KVM_ALLOC(kd, lwpbase, size);
+ st = sysctl(mib, 5, kd->lwpbase, &size, NULL, (size_t)0);
+ if (st == -1) {
+ switch (errno) {
+ case ESRCH: /* Treat this as a soft error; see kvm.c */
+ _kvm_syserr(kd, NULL, "kvm_getlwps");
+ return NULL;
+ case ENOMEM:
+ goto again;
+ default:
+ _kvm_syserr(kd, kd->program, "kvm_getlwps");
+ return NULL;
+ }
+ }
+ nlwps = (int) (size / esize);
+ } else {
+ /* grovel through the memory image */
+ struct proc p;
+ struct lwp l;
+ u_long laddr;
+ void *back;
+ int i;
+
+ st = kvm_read(kd, paddr, &p, sizeof(p));
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getlwps");
+ return (NULL);
+ }
+
+ nlwps = p.p_nlwps;
+ size = nlwps * sizeof(*kd->lwpbase);
+ KVM_ALLOC(kd, lwpbase, size);
+ laddr = (u_long)PTRTOUINT64(p.p_lwps.lh_first);
+ for (i = 0; (i < nlwps) && (laddr != 0); i++) {
+ st = kvm_read(kd, laddr, &l, sizeof(l));
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getlwps");
+ return (NULL);
+ }
+ kl = &kd->lwpbase[i];
+ kl->l_laddr = laddr;
+ kl->l_forw = PTRTOUINT64(l.l_runq.tqe_next);
+ laddr = (u_long)PTRTOUINT64(l.l_runq.tqe_prev);
+ st = kvm_read(kd, laddr, &back, sizeof(back));
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getlwps");
+ return (NULL);
+ }
+ kl->l_back = PTRTOUINT64(back);
+ kl->l_addr = PTRTOUINT64(l.l_addr);
+ kl->l_lid = l.l_lid;
+ kl->l_flag = l.l_flag;
+ kl->l_swtime = l.l_swtime;
+ kl->l_slptime = l.l_slptime;
+ kl->l_schedflags = 0; /* XXX */
+ kl->l_holdcnt = 0;
+ kl->l_priority = l.l_priority;
+ kl->l_usrpri = l.l_priority;
+ kl->l_stat = l.l_stat;
+ kl->l_wchan = PTRTOUINT64(l.l_wchan);
+ if (l.l_wmesg)
+ (void)kvm_read(kd, (u_long)l.l_wmesg,
+ kl->l_wmesg, (size_t)WMESGLEN);
+ kl->l_cpuid = KI_NOCPU;
+ laddr = (u_long)PTRTOUINT64(l.l_sibling.le_next);
+ }
+ }
+
+ *cnt = nlwps;
+ return (kd->lwpbase);
+}
+
+struct kinfo_proc *
+kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt)
+{
+ size_t size;
+ int mib[4], st, nprocs;
+
+ if (ISALIVE(kd)) {
+ size = 0;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = op;
+ mib[3] = arg;
+ st = sysctl(mib, 4, NULL, &size, NULL, (size_t)0);
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getprocs");
+ return (NULL);
+ }
+ KVM_ALLOC(kd, procbase, size);
+ st = sysctl(mib, 4, kd->procbase, &size, NULL, (size_t)0);
+ if (st == -1) {
+ _kvm_syserr(kd, kd->program, "kvm_getprocs");
+ return (NULL);
+ }
+ if (size % sizeof(struct kinfo_proc) != 0) {
+ _kvm_err(kd, kd->program,
+ "proc size mismatch (%lu total, %lu chunks)",
+ (u_long)size, (u_long)sizeof(struct kinfo_proc));
+ return (NULL);
+ }
+ nprocs = (int) (size / sizeof(struct kinfo_proc));
+ } else {
+ struct nlist nl[4], *p;
+
+ (void)memset(nl, 0, sizeof(nl));
+ nl[0].n_name = "_nprocs";
+ nl[1].n_name = "_allproc";
+ nl[2].n_name = "_zombproc";
+ nl[3].n_name = NULL;
+
+ if (kvm_nlist(kd, nl) != 0) {
+ for (p = nl; p->n_type != 0; ++p)
+ continue;
+ _kvm_err(kd, kd->program,
+ "%s: no such symbol", p->n_name);
+ return (NULL);
+ }
+ if (KREAD(kd, nl[0].n_value, &nprocs)) {
+ _kvm_err(kd, kd->program, "can't read nprocs");
+ return (NULL);
+ }
+ size = nprocs * sizeof(*kd->procbase);
+ KVM_ALLOC(kd, procbase, size);
+ nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
+ nl[2].n_value, nprocs);
+ if (nprocs < 0)
+ return (NULL);
+#ifdef notdef
+ size = nprocs * sizeof(struct kinfo_proc);
+ (void)realloc(kd->procbase, size);
+#endif
+ }
+ *cnt = nprocs;
+ return (kd->procbase);
+}
+
+void *
+_kvm_realloc(kvm_t *kd, void *p, size_t n)
+{
+ void *np = realloc(p, n);
+
+ if (np == NULL)
+ _kvm_err(kd, kd->program, "out of memory");
+ return (np);
+}
+
+/*
+ * Read in an argument vector from the user address space of process p.
+ * addr if the user-space base address of narg null-terminated contiguous
+ * strings. This is used to read in both the command arguments and
+ * environment strings. Read at most maxcnt characters of strings.
+ */
+static char **
+kvm_argv(kvm_t *kd, const struct miniproc *p, u_long addr, int narg,
+ int maxcnt)
+{
+ char *np, *cp, *ep, *ap;
+ u_long oaddr = (u_long)~0L;
+ u_long len;
+ size_t cc;
+ char **argv;
+
+ /*
+ * Check that there aren't an unreasonable number of arguments,
+ * and that the address is in user space.
+ */
+ if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva)
+ return (NULL);
+
+ if (kd->argv == NULL) {
+ /*
+ * Try to avoid reallocs.
+ */
+ kd->argc = MAX(narg + 1, 32);
+ kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv));
+ if (kd->argv == NULL)
+ return (NULL);
+ } else if (narg + 1 > kd->argc) {
+ kd->argc = MAX(2 * kd->argc, narg + 1);
+ kd->argv = _kvm_realloc(kd, kd->argv, kd->argc *
+ sizeof(*kd->argv));
+ if (kd->argv == NULL)
+ return (NULL);
+ }
+ if (kd->argspc == NULL) {
+ kd->argspc = _kvm_malloc(kd, (size_t)kd->nbpg);
+ if (kd->argspc == NULL)
+ return (NULL);
+ kd->argspc_len = kd->nbpg;
+ }
+ if (kd->argbuf == NULL) {
+ kd->argbuf = _kvm_malloc(kd, (size_t)kd->nbpg);
+ if (kd->argbuf == NULL)
+ return (NULL);
+ }
+ cc = sizeof(char *) * narg;
+ if (kvm_ureadm(kd, p, addr, (void *)kd->argv, cc) != cc)
+ return (NULL);
+ ap = np = kd->argspc;
+ argv = kd->argv;
+ len = 0;
+ /*
+ * Loop over pages, filling in the argument vector.
+ */
+ while (argv < kd->argv + narg && *argv != NULL) {
+ addr = (u_long)*argv & ~(kd->nbpg - 1);
+ if (addr != oaddr) {
+ if (kvm_ureadm(kd, p, addr, kd->argbuf,
+ (size_t)kd->nbpg) != kd->nbpg)
+ return (NULL);
+ oaddr = addr;
+ }
+ addr = (u_long)*argv & (kd->nbpg - 1);
+ cp = kd->argbuf + (size_t)addr;
+ cc = kd->nbpg - (size_t)addr;
+ if (maxcnt > 0 && cc > (size_t)(maxcnt - len))
+ cc = (size_t)(maxcnt - len);
+ ep = memchr(cp, '\0', cc);
+ if (ep != NULL)
+ cc = ep - cp + 1;
+ if (len + cc > kd->argspc_len) {
+ ptrdiff_t off;
+ char **pp;
+ char *op = kd->argspc;
+
+ kd->argspc_len *= 2;
+ kd->argspc = _kvm_realloc(kd, kd->argspc,
+ kd->argspc_len);
+ if (kd->argspc == NULL)
+ return (NULL);
+ /*
+ * Adjust argv pointers in case realloc moved
+ * the string space.
+ */
+ off = kd->argspc - op;
+ for (pp = kd->argv; pp < argv; pp++)
+ *pp += off;
+ ap += off;
+ np += off;
+ }
+ memcpy(np, cp, cc);
+ np += cc;
+ len += cc;
+ if (ep != NULL) {
+ *argv++ = ap;
+ ap = np;
+ } else
+ *argv += cc;
+ if (maxcnt > 0 && len >= maxcnt) {
+ /*
+ * We're stopping prematurely. Terminate the
+ * current string.
+ */
+ if (ep == NULL) {
+ *np = '\0';
+ *argv++ = ap;
+ }
+ break;
+ }
+ }
+ /* Make sure argv is terminated. */
+ *argv = NULL;
+ return (kd->argv);
+}
+
+static void
+ps_str_a(struct ps_strings *p, u_long *addr, int *n)
+{
+
+ *addr = (u_long)p->ps_argvstr;
+ *n = p->ps_nargvstr;
+}
+
+static void
+ps_str_e(struct ps_strings *p, u_long *addr, int *n)
+{
+
+ *addr = (u_long)p->ps_envstr;
+ *n = p->ps_nenvstr;
+}
+
+/*
+ * Determine if the proc indicated by p is still active.
+ * This test is not 100% foolproof in theory, but chances of
+ * being wrong are very low.
+ */
+static int
+proc_verify(kvm_t *kd, u_long kernp, const struct miniproc *p)
+{
+ struct proc kernproc;
+
+ /*
+ * Just read in the whole proc. It's not that big relative
+ * to the cost of the read system call.
+ */
+ if (kvm_read(kd, kernp, &kernproc, sizeof(kernproc)) !=
+ sizeof(kernproc))
+ return (0);
+ return (p->p_pid == kernproc.p_pid &&
+ (kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
+}
+
+static char **
+kvm_doargv(kvm_t *kd, const struct miniproc *p, int nchr,
+ void (*info)(struct ps_strings *, u_long *, int *))
+{
+ char **ap;
+ u_long addr;
+ int cnt;
+ struct ps_strings arginfo;
+
+ /*
+ * Pointers are stored at the top of the user stack.
+ */
+ if (p->p_stat == SZOMB)
+ return (NULL);
+ cnt = (int)kvm_ureadm(kd, p, kd->usrstack - sizeof(arginfo),
+ (void *)&arginfo, sizeof(arginfo));
+ if (cnt != sizeof(arginfo))
+ return (NULL);
+
+ (*info)(&arginfo, &addr, &cnt);
+ if (cnt == 0)
+ return (NULL);
+ ap = kvm_argv(kd, p, addr, cnt, nchr);
+ /*
+ * For live kernels, make sure this process didn't go away.
+ */
+ if (ap != NULL && ISALIVE(kd) &&
+ !proc_verify(kd, (u_long)p->p_paddr, p))
+ ap = NULL;
+ return (ap);
+}
+
+/*
+ * Get the command args. This code is now machine independent.
+ */
+char **
+kvm_getargv(kvm_t *kd, const struct kinfo_proc *kp, int nchr)
+{
+ struct miniproc p;
+
+ KPTOMINI(kp, &p);
+ return (kvm_doargv(kd, &p, nchr, ps_str_a));
+}
+
+char **
+kvm_getenvv(kvm_t *kd, const struct kinfo_proc *kp, int nchr)
+{
+ struct miniproc p;
+
+ KPTOMINI(kp, &p);
+ return (kvm_doargv(kd, &p, nchr, ps_str_e));
+}
+
+static char **
+kvm_doargv2(kvm_t *kd, pid_t pid, int type, int nchr)
+{
+ size_t bufs;
+ int narg, mib[4];
+ size_t newargspc_len;
+ char **ap, *bp, *endp;
+
+ /*
+ * Check that there aren't an unreasonable number of arguments.
+ */
+ if (nchr > ARG_MAX)
+ return (NULL);
+
+ if (nchr == 0)
+ nchr = ARG_MAX;
+
+ /* Get number of strings in argv */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC_ARGS;
+ mib[2] = pid;
+ mib[3] = type == KERN_PROC_ARGV ? KERN_PROC_NARGV : KERN_PROC_NENV;
+ bufs = sizeof(narg);
+ if (sysctl(mib, 4, &narg, &bufs, NULL, (size_t)0) == -1)
+ return (NULL);
+
+ if (kd->argv == NULL) {
+ /*
+ * Try to avoid reallocs.
+ */
+ kd->argc = MAX(narg + 1, 32);
+ kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv));
+ if (kd->argv == NULL)
+ return (NULL);
+ } else if (narg + 1 > kd->argc) {
+ kd->argc = MAX(2 * kd->argc, narg + 1);
+ kd->argv = _kvm_realloc(kd, kd->argv, kd->argc *
+ sizeof(*kd->argv));
+ if (kd->argv == NULL)
+ return (NULL);
+ }
+
+ newargspc_len = MIN(nchr, ARG_MAX);
+ KVM_ALLOC(kd, argspc, newargspc_len);
+ memset(kd->argspc, 0, (size_t)kd->argspc_len); /* XXX necessary? */
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC_ARGS;
+ mib[2] = pid;
+ mib[3] = type;
+ bufs = kd->argspc_len;
+ if (sysctl(mib, 4, kd->argspc, &bufs, NULL, (size_t)0) == -1)
+ return (NULL);
+
+ bp = kd->argspc;
+ bp[kd->argspc_len-1] = '\0'; /* make sure the string ends with nul */
+ ap = kd->argv;
+ endp = bp + MIN(nchr, bufs);
+
+ while (bp < endp) {
+ *ap++ = bp;
+ /*
+ * XXX: don't need following anymore, or stick check
+ * for max argc in above while loop?
+ */
+ if (ap >= kd->argv + kd->argc) {
+ kd->argc *= 2;
+ kd->argv = _kvm_realloc(kd, kd->argv,
+ kd->argc * sizeof(*kd->argv));
+ ap = kd->argv;
+ }
+ bp += strlen(bp) + 1;
+ }
+ *ap = NULL;
+
+ return (kd->argv);
+}
+
+char **
+kvm_getargv2(kvm_t *kd, const struct kinfo_proc2 *kp, int nchr)
+{
+
+ return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ARGV, nchr));
+}
+
+char **
+kvm_getenvv2(kvm_t *kd, const struct kinfo_proc2 *kp, int nchr)
+{
+
+ return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ENV, nchr));
+}
+
+/*
+ * Read from user space. The user context is given by p.
+ */
+static ssize_t
+kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long uva,
+ char *buf, size_t len)
+{
+ char *cp;
+
+ cp = buf;
+ while (len > 0) {
+ size_t cc;
+ char *dp;
+ u_long cnt;
+
+ dp = _kvm_ureadm(kd, p, uva, &cnt);
+ if (dp == NULL) {
+ _kvm_err(kd, 0, "invalid address (%lx)", uva);
+ return (0);
+ }
+ cc = (size_t)MIN(cnt, len);
+ memcpy(cp, dp, cc);
+ cp += cc;
+ uva += cc;
+ len -= cc;
+ }
+ return (ssize_t)(cp - buf);
+}
+
+ssize_t
+kvm_uread(kvm_t *kd, const struct proc *p, u_long uva, char *buf, size_t len)
+{
+ struct miniproc mp;
+
+ PTOMINI(p, &mp);
+ return (kvm_ureadm(kd, &mp, uva, buf, len));
+}
--- /dev/null
+.\" $NetBSD: kvm_read.3,v 1.8 2003/08/07 16:44:39 agc Exp $
+.\"
+.\" Copyright (c) 1992, 1993
+.\" The Regents of the University of California. All rights reserved.
+.\"
+.\" This code is derived from software developed by the Computer Systems
+.\" Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+.\" BG 91-66 and contributed to Berkeley.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. Neither the name of the University nor the names of its contributors
+.\" may be used to endorse or promote products derived from this software
+.\" without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" @(#)kvm_read.3 8.1 (Berkeley) 6/4/93
+.\"
+.Dd June 4, 1993
+.Dt KVM_READ 3
+.Os
+.Sh NAME
+.Nm kvm_read ,
+.Nm kvm_write
+.Nd read or write kernel virtual memory
+.Sh LIBRARY
+.Lb libkvm
+.Sh SYNOPSIS
+.In kvm.h
+.Ft ssize_t
+.Fn kvm_read "kvm_t *kd" "u_long addr" "void *buf" "size_t nbytes"
+.Ft ssize_t
+.Fn kvm_write "kvm_t *kd" "u_long addr" "const void *buf" "size_t nbytes"
+.Sh DESCRIPTION
+The
+.Fn kvm_read
+and
+.Fn kvm_write
+functions are used to read and write kernel virtual memory (or a crash
+dump file).
+See
+.Fn kvm_open 3
+or
+.Fn kvm_openfiles 3
+for information regarding opening kernel virtual memory and crash dumps.
+.Pp
+The
+.Fn kvm_read
+function transfers
+.Fa nbytes
+bytes of data from
+the kernel space address
+.Fa addr
+to
+.Fa buf .
+Conversely,
+.Fn kvm_write
+transfers data from
+.Fa buf
+to
+.Fa addr .
+Unlike their SunOS counterparts, these functions cannot be used to
+read or write process address spaces.
+.Sh RETURN VALUES
+Upon success, the number of bytes actually transferred is returned.
+Otherwise, -1 is returned.
+.Sh SEE ALSO
+.Xr kvm 3 ,
+.Xr kvm_close 3 ,
+.Xr kvm_getargv 3 ,
+.Xr kvm_getenvv 3 ,
+.Xr kvm_geterr 3 ,
+.Xr kvm_getprocs 3 ,
+.Xr kvm_nlist 3 ,
+.Xr kvm_open 3 ,
+.Xr kvm_openfiles 3
--- /dev/null
+/* $NetBSD: kvm_riscv.c,v 1.1 2014/09/19 17:36:25 matt Exp $ */
+
+/*-
+ * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * OR1K machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/types.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <db.h>
+#include <limits.h>
+#include <kvm.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "kvm_private.h"
+
+#include <sys/kcore.h>
+#include <machine/kcore.h>
+#include <machine/vmparam.h>
+
+__RCSID("$NetBSD: kvm_riscv.c,v 1.1 2014/09/19 17:36:25 matt Exp $");
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return 0;
+}
+
+/*
+ * Translate a KVA to a PA
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+// cpu_kcore_hdr_t *cpu_kh;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return 0;
+ }
+
+ /* No hit -- no translation */
+ *pa = (u_long)~0UL;
+ return 0;
+}
+
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ram;
+ off_t off;
+ void *e;
+
+ cpu_kh = kd->cpu_data;
+ e = (char *) kd->cpu_data + kd->cpu_dsize;
+ ram = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
+ off = kd->dump_off;
+ do {
+ if (pa >= ram->start && (pa - ram->start) < ram->size) {
+ return off + (pa - ram->start);
+ }
+ ram++;
+ off += ram->size;
+ } while ((void *) ram < e && ram->size);
+
+ _kvm_err(kd, 0, "pa2off failed for pa %#" PRIxPADDR "\n", pa);
+ return (off_t) -1;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_sh3.c,v 1.9 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_sh3.c,v 1.9 2014/02/19 20:21:22 dsl Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * SH3 machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+#include <machine/pte.h>
+#include <machine/vmparam.h>
+
+#ifndef btop
+#define btop(x) (((unsigned)(x)) >> PGSHIFT) /* XXX */
+#define ptob(x) ((caddr_t)((x) << PGSHIFT)) /* XXX */
+#endif
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+
+ /* Not actually used for anything right now, but safe. */
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return (0);
+}
+
+/*
+ * Translate a kernel virtual address to a physical address.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ _kvm_err(kd, 0, "vatop not yet implemented!");
+ return 0;
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ _kvm_err(kd, 0, "pa2off not yet implemented!");
+ return 0;
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_sparc.c,v 1.34 2015/10/07 11:56:41 martin Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_sparc.c,v 1.34 2015/10/07 11:56:41 martin Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
+ * vm code will one day obsolete this module.
+ */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/core.h>
+#include <sys/kcore.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <sparc/pmap.h>
+#include <sparc/kcore.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+
+static int cputyp = -1;
+static int pgshift;
+static int nptesg; /* [sun4/sun4c] only */
+
+#undef VA_VPG
+#define VA_VPG(va) ((cputyp == CPU_SUN4C || cputyp == CPU_SUN4M) \
+ ? VA_SUN4C_VPG(va) \
+ : VA_SUN4_VPG(va))
+
+#undef VA_OFF
+#define VA_OFF(va) (va & (kd->nbpg - 1))
+
+int _kvm_kvatop44c(kvm_t *, vaddr_t, paddr_t *);
+int _kvm_kvatop4m (kvm_t *, vaddr_t, paddr_t *);
+int _kvm_kvatop4u (kvm_t *, vaddr_t, paddr_t *);
+
+/*
+ * XXX
+ * taken from /sys/arch/sparc64/include/kcore.h.
+ * this is the same as the sparc one, except for the kphys addition,
+ * so luckily we can use this here...
+ */
+typedef struct sparc64_cpu_kcore_hdr {
+ int cputype; /* CPU type associated with this dump */
+ u_long kernbase; /* copy of KERNBASE goes here */
+ int nmemseg; /* # of physical memory segments */
+ u_long memsegoffset; /* start of memseg array (relative */
+ /* to the start of this header) */
+ int nsegmap; /* # of segmaps following */
+ u_long segmapoffset; /* start of segmap array (relative */
+ /* to the start of this header) */
+ int npmeg; /* # of PMEGs; [sun4/sun4c] only */
+ u_long pmegoffset; /* start of pmeg array (relative */
+ /* to the start of this header) */
+/* SPARC64 stuff */
+ paddr_t kphys; /* Physical address of 4MB locked TLB */
+} sparc64_cpu_kcore_hdr_t;
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ if (kd->vmst != 0) {
+ _kvm_err(kd, kd->program, "_kvm_freevtop: internal error");
+ kd->vmst = 0;
+ }
+}
+
+/*
+ * Prepare for translation of kernel virtual addresses into offsets
+ * into crash dump files. We use the MMU specific goop written at the
+ * front of the crash dump by pmap_dumpmmu().
+ */
+int
+_kvm_initvtop(kvm_t *kd)
+{
+ sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
+
+ switch (cputyp = cpup->cputype) {
+ case CPU_SUN4:
+ case CPU_SUN4U:
+ kd->nbpg = 8196;
+ pgshift = 13;
+ break;
+ case CPU_SUN4C:
+ case CPU_SUN4M:
+ kd->nbpg = 4096;
+ pgshift = 12;
+ break;
+ default:
+ _kvm_err(kd, kd->program, "Unsupported CPU type");
+ return (-1);
+ }
+ nptesg = NBPSG / kd->nbpg;
+ return (0);
+}
+
+/*
+ * Translate a kernel virtual address to a physical address using the
+ * mapping information in kd->vm. Returns the result in pa, and returns
+ * the number of bytes that are contiguously available from this
+ * physical address. This routine is used only for crash dumps.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ if (cputyp == -1)
+ if (_kvm_initvtop(kd) != 0)
+ return (-1);
+
+ switch (cputyp) {
+ case CPU_SUN4:
+ case CPU_SUN4C:
+ return _kvm_kvatop44c(kd, va, pa);
+ break;
+ case CPU_SUN4M:
+ return _kvm_kvatop4m(kd, va, pa);
+ break;
+ case CPU_SUN4U:
+ default:
+ return _kvm_kvatop4u(kd, va, pa);
+ }
+}
+
+/*
+ * (note: sun4 3-level MMU not yet supported)
+ */
+int
+_kvm_kvatop44c(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ int vr, vs, pte;
+ sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
+ struct segmap *sp, *segmaps;
+ int *ptes;
+ int nkreg, nureg;
+ u_long kernbase = cpup->kernbase;
+
+ if (va < kernbase)
+ goto err;
+
+ /*
+ * Layout of CPU segment:
+ * cpu_kcore_hdr_t;
+ * [alignment]
+ * phys_ram_seg_t[cpup->nmemseg];
+ * segmap[cpup->nsegmap];
+ * ptes[cpup->npmegs];
+ */
+ segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
+ ptes = (int *)((int)kd->cpu_data + cpup->pmegoffset);
+ nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
+ nureg = 256 - nkreg;
+
+ vr = VA_VREG(va);
+ vs = VA_VSEG(va);
+
+ sp = &segmaps[(vr-nureg)*NSEGRG + vs];
+ if (sp->sg_npte == 0)
+ goto err;
+ if (sp->sg_pmeg == cpup->npmeg - 1) /* =seginval */
+ goto err;
+ pte = ptes[sp->sg_pmeg * nptesg + VA_VPG(va)];
+ if ((pte & PG_V) != 0) {
+ paddr_t p, off = VA_OFF(va);
+
+ p = (pte & PG_PFNUM) << pgshift;
+ *pa = p + off;
+ return (kd->nbpg - off);
+ }
+err:
+ _kvm_err(kd, 0, "invalid address (%#"PRIxVADDR")", va);
+ return (0);
+}
+
+int
+_kvm_kvatop4m(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
+ int vr, vs;
+ int pte;
+ off_t foff;
+ struct segmap *sp, *segmaps;
+ int nkreg, nureg;
+ u_long kernbase = cpup->kernbase;
+
+ if (va < kernbase)
+ goto err;
+
+ /*
+ * Layout of CPU segment:
+ * cpu_kcore_hdr_t;
+ * [alignment]
+ * phys_ram_seg_t[cpup->nmemseg];
+ * segmap[cpup->nsegmap];
+ */
+ segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset);
+ nkreg = ((int)((-(unsigned)kernbase) / NBPRG));
+ nureg = 256 - nkreg;
+
+ vr = VA_VREG(va);
+ vs = VA_VSEG(va);
+
+ sp = &segmaps[(vr-nureg)*NSEGRG + vs];
+ if (sp->sg_npte == 0)
+ goto err;
+
+ /* XXX - assume page tables in initial kernel DATA or BSS. */
+ foff = _kvm_pa2off(kd, (u_long)&sp->sg_pte[VA_VPG(va)] - kernbase);
+ if (foff == (off_t)-1)
+ return (0);
+
+ if (_kvm_pread(kd, kd->pmfd, &pte, sizeof(pte), foff) != sizeof(pte)) {
+ _kvm_syserr(kd, kd->program, "cannot read pte for "
+ "%#" PRIxVADDR, va);
+ return (0);
+ }
+
+ if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) {
+ long p, off = VA_OFF(va);
+
+ p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
+ *pa = p + off;
+ return (kd->nbpg - off);
+ }
+err:
+ _kvm_err(kd, 0, "invalid address (%#"PRIxVADDR")", va);
+ return (0);
+}
+
+/*
+ * sparc64 pmap's 32-bit page table format
+ */
+int
+_kvm_kvatop4u(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
+ int64_t **segmaps;
+ int64_t *ptes;
+ int64_t pte;
+ int64_t kphys = cpup->kphys;
+ u_long kernbase = cpup->kernbase;
+
+ if (va < kernbase)
+ goto err;
+
+ /*
+ * Kernel layout:
+ *
+ * kernbase:
+ * 4MB locked TLB (text+data+BSS)
+ * Random other stuff.
+ */
+ if (va >= kernbase && va < kernbase + 4*1024*1024)
+ return (va - kernbase) + kphys;
+
+/* XXX: from sparc64/include/pmap.h */
+#define SPARC64_PTSZ (kd->nbpg/8)
+#define SPARC64_STSZ (SPARC64_PTSZ)
+#define SPARC64_PTMASK (SPARC64_PTSZ-1)
+#define SPARC64_PTSHIFT (13)
+#define SPARC64_PDSHIFT (10+SPARC64_PTSHIFT)
+#define SPARC64_STSHIFT (10+SPARC64_PDSHIFT)
+#define SPARC64_STMASK (SPARC64_STSZ-1)
+#define sparc64_va_to_seg(v) (int)((((int64_t)(v))>>SPARC64_STSHIFT)&SPARC64_STMASK)
+#define sparc64_va_to_pte(v) (int)((((int64_t)(v))>>SPARC64_PTSHIFT)&SPARC64_PTMASK)
+
+/* XXX: from sparc64/include/pte.h */
+#define SPARC64_TLB_V 0x8000000000000000LL
+#define SPARC64_TLB_PA_MASK 0x000001ffffffe000LL
+
+ /*
+ * Layout of CPU segment:
+ * cpu_kcore_hdr_t;
+ * [alignment]
+ * phys_ram_seg_t[cpup->nmemseg];
+ * segmap[cpup->nsegmap];
+ */
+ segmaps = (int64_t **)((long)kd->cpu_data + cpup->segmapoffset);
+ ptes = (int64_t *)(intptr_t)_kvm_pa2off(kd,
+ (paddr_t)(intptr_t)segmaps[sparc64_va_to_seg(va)]);
+ pte = ptes[sparc64_va_to_pte(va)];
+ if ((pte & SPARC64_TLB_V) != 0)
+ return ((pte & SPARC64_TLB_PA_MASK) | (va & (kd->nbpg - 1)));
+err:
+ _kvm_err(kd, 0, "invalid address (%#"PRIxVADDR")", va);
+ return (0);
+}
+
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ sparc64_cpu_kcore_hdr_t *cpup = kd->cpu_data;
+ phys_ram_seg_t *mp;
+ off_t off;
+ int nmem;
+
+ /*
+ * Layout of CPU segment:
+ * cpu_kcore_hdr_t;
+ * [alignment]
+ * phys_ram_seg_t[cpup->nmemseg];
+ */
+ mp = (phys_ram_seg_t *)((int)kd->cpu_data + cpup->memsegoffset);
+ off = 0;
+
+ /* Translate (sparse) pfnum to (packed) dump offset */
+ for (nmem = cpup->nmemseg; --nmem >= 0; mp++) {
+ if (mp->start <= pa && pa < mp->start + mp->size)
+ break;
+ off += mp->size;
+ }
+ if (nmem < 0) {
+ _kvm_err(kd, 0, "invalid address (%lx)", (unsigned long)pa);
+ return (-1);
+ }
+
+ return (kd->dump_off + off + pa - mp->start);
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+ u_long max_uva;
+ extern struct ps_strings *__ps_strings;
+
+ max_uva = (u_long) (__ps_strings + 1);
+ kd->usrstack = max_uva;
+ kd->max_uva = max_uva;
+ kd->min_uva = 0;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_sparc64.c,v 1.17 2014/02/21 18:00:09 palle Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_sparc64.c,v 1.17 2014/02/21 18:00:09 palle Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * Sparc machine dependent routines for kvm. Hopefully, the forthcoming
+ * vm code will one day obsolete this module.
+ */
+
+#include <sys/param.h>
+#include <sys/exec.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/core.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/pmap.h>
+#include <machine/kcore.h>
+#include <machine/vmparam.h>
+#include <machine/param.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ if (kd->vmst != 0) {
+ _kvm_err(kd, kd->program, "_kvm_freevtop: internal error");
+ kd->vmst = 0;
+ }
+}
+
+/*
+ * Prepare for translation of kernel virtual addresses into offsets
+ * into crash dump files. We use the MMU specific goop written at the
+ * front of the crash dump by pmap_dumpmmu().
+ *
+ * We should read in and cache the ksegs here to speed up operations...
+ */
+int
+_kvm_initvtop(kvm_t *kd)
+{
+ kd->nbpg = 0x2000;
+
+ return (0);
+}
+
+/*
+ * Translate a kernel virtual address to a physical address using the
+ * mapping information in kd->vm. Returns the result in pa, and returns
+ * the number of bytes that are contiguously available from this
+ * physical address. This routine is used only for crash dumps.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpup = kd->cpu_data;
+ u_long kernbase = cpup->kernbase;
+ uint64_t *pseg, *pdir, *ptbl;
+ struct cpu_kcore_4mbseg *ktlb;
+ int64_t data;
+ int i;
+
+ if (va < kernbase)
+ goto lose;
+
+ /* Handle the wired 4MB TTEs and per-CPU mappings */
+ if (cpup->memsegoffset > sizeof(cpu_kcore_hdr_t) &&
+ cpup->newmagic == SPARC64_KCORE_NEWMAGIC) {
+ /*
+ * new format: we have a list of 4 MB mappings
+ */
+ ktlb = (struct cpu_kcore_4mbseg *)
+ ((uintptr_t)kd->cpu_data + cpup->off4mbsegs);
+ for (i = 0; i < cpup->num4mbsegs; i++) {
+ uint64_t start = ktlb[i].va;
+ if (va < start || va >= start+PAGE_SIZE_4M)
+ continue;
+ *pa = ktlb[i].pa + va - start;
+ return (int)(start+PAGE_SIZE_4M - va);
+ }
+
+ if (cpup->numcpuinfos > 0) {
+ /* we have per-CPU mapping info */
+ uint64_t start, base;
+
+ base = cpup->cpubase - 32*1024;
+ if (va >= base && va < (base + cpup->percpusz)) {
+ start = va - base;
+ *pa = cpup->cpusp
+ + cpup->thiscpu*cpup->percpusz
+ + start;
+ return cpup->percpusz - start;
+ }
+ }
+ } else {
+ /*
+ * old format: just a textbase/size and database/size
+ */
+ if (va > cpup->ktextbase && va <
+ (cpup->ktextbase + cpup->ktextsz)) {
+ u_long vaddr;
+
+ vaddr = va - cpup->ktextbase;
+ *pa = cpup->ktextp + vaddr;
+ return (int)(cpup->ktextsz - vaddr);
+ }
+ if (va > cpup->kdatabase && va <
+ (cpup->kdatabase + cpup->kdatasz)) {
+ u_long vaddr;
+
+ vaddr = va - cpup->kdatabase;
+ *pa = cpup->kdatap + vaddr;
+ return (int)(cpup->kdatasz - vaddr);
+ }
+ }
+
+ /*
+ * Parse kernel page table.
+ */
+ pseg = (uint64_t *)(u_long)cpup->segmapoffset;
+ if (_kvm_pread(kd, kd->pmfd, &pdir, sizeof(pdir),
+ _kvm_pa2off(kd, (paddr_t)&pseg[va_to_seg(va)]))
+ != sizeof(pdir)) {
+ _kvm_syserr(kd, 0, "could not read L1 PTE");
+ goto lose;
+ }
+
+ if (!pdir) {
+ _kvm_err(kd, 0, "invalid L1 PTE");
+ goto lose;
+ }
+
+ if (_kvm_pread(kd, kd->pmfd, &ptbl, sizeof(ptbl),
+ _kvm_pa2off(kd, (paddr_t)&pdir[va_to_dir(va)]))
+ != sizeof(ptbl)) {
+ _kvm_syserr(kd, 0, "could not read L2 PTE");
+ goto lose;
+ }
+
+ if (!ptbl) {
+ _kvm_err(kd, 0, "invalid L2 PTE");
+ goto lose;
+ }
+
+ if (_kvm_pread(kd, kd->pmfd, &data, sizeof(data),
+ _kvm_pa2off(kd, (paddr_t)&ptbl[va_to_pte(va)]))
+ != sizeof(data)) {
+ _kvm_syserr(kd, 0, "could not read TTE");
+ goto lose;
+ }
+
+ if (data >= 0) {
+ _kvm_err(kd, 0, "invalid L2 TTE");
+ goto lose;
+ }
+
+ /*
+ * Calculate page offsets and things.
+ *
+ * XXXX -- We could support multiple page sizes.
+ */
+ va = va & (kd->nbpg - 1);
+ data &= SUN4U_TLB_PA_MASK; /* XXX handle sun4u/sun4v */
+ *pa = data + va;
+
+ /*
+ * Parse and trnslate our TTE.
+ */
+
+ return (int)(kd->nbpg - va);
+
+lose:
+ *pa = (u_long)-1;
+ _kvm_err(kd, 0, "invalid address (%#"PRIxVADDR")", va);
+ return (0);
+}
+
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpup = kd->cpu_data;
+ phys_ram_seg_t *mp;
+ off_t off;
+ int nmem;
+
+ /*
+ * Layout of CPU segment:
+ * cpu_kcore_hdr_t;
+ * [alignment]
+ * phys_ram_seg_t[cpup->nmemseg];
+ */
+ mp = (phys_ram_seg_t *)((long)kd->cpu_data + cpup->memsegoffset);
+ off = 0;
+
+ /* Translate (sparse) pfnum to (packed) dump offset */
+ for (nmem = cpup->nmemseg; --nmem >= 0; mp++) {
+ if (mp->start <= pa && pa < mp->start + mp->size)
+ break;
+ off += mp->size;
+ }
+ if (nmem < 0) {
+ _kvm_err(kd, 0, "invalid address (%#"PRIxPADDR")", pa);
+ return (-1);
+ }
+
+ return (kd->dump_off + off + pa - mp->start);
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+ u_long max_uva;
+ extern struct ps_strings *__ps_strings;
+
+ max_uva = (u_long) (__ps_strings + 1);
+ kd->usrstack = max_uva;
+ kd->max_uva = max_uva;
+ kd->min_uva = 0;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_sun2.c,v 1.6 2011/09/14 12:37:55 christos Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_sun2.c,v 1.6 2011/09/14 12:37:55 christos Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * Sun2 machine dependent routines for kvm.
+ *
+ * Note: This file has to build on ALL m68000 machines,
+ * so do NOT include any <machine / *.h> files here.
+ */
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kcore.h>
+
+#include <unistd.h>
+#include <limits.h>
+#include <nlist.h>
+#include <kvm.h>
+#include <db.h>
+
+#include <m68k/kcore.h>
+
+#include "kvm_private.h"
+#include "kvm_m68k.h"
+
+int _kvm_sun2_initvtop(kvm_t *);
+void _kvm_sun2_freevtop(kvm_t *);
+int _kvm_sun2_kvatop (kvm_t *, vaddr_t, paddr_t *);
+off_t _kvm_sun2_pa2off (kvm_t *, paddr_t);
+
+struct kvm_ops _kvm_ops_sun2 = {
+ _kvm_sun2_initvtop,
+ _kvm_sun2_freevtop,
+ _kvm_sun2_kvatop,
+ _kvm_sun2_pa2off };
+
+#define _kvm_pg_pa(v, s, pte) \
+ (((pte) & (s)->pg_frame) << (v)->pgshift)
+
+#define _kvm_va_segnum(s, x) \
+ ((u_int)(x) >> (s)->segshift)
+#define _kvm_pte_num_mask(v) \
+ (0xf << (v)->pgshift)
+#define _kvm_va_pte_num(v, va) \
+ (((va) & _kvm_pte_num_mask((v))) >> (v)->pgshift)
+
+/*
+ * XXX Re-define these here, no other place for them.
+ */
+#define NKSEG 512 /* kernel segmap entries */
+#define NPAGSEG 16 /* pages per segment */
+
+/* Finally, our local stuff... */
+struct private_vmstate {
+ /* Page Map Entry Group (PMEG) */
+ int pmeg[NKSEG][NPAGSEG];
+};
+
+/*
+ * Prepare for translation of kernel virtual addresses into offsets
+ * into crash dump files. We use the MMU specific goop written at the
+ * beginning of a crash dump by dumpsys()
+ * Note: sun2 MMU specific!
+ */
+int
+_kvm_sun2_initvtop(kvm_t *kd)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ char *p;
+
+ p = kd->cpu_data;
+ p += (h->page_size - sizeof(kcore_seg_t));
+ kd->vmst->private = p;
+
+ return (0);
+}
+
+void
+_kvm_sun2_freevtop(kvm_t *kd)
+{
+ /* This was set by pointer arithmetic, not allocation. */
+ kd->vmst->private = (void*)0;
+}
+
+/*
+ * Translate a kernel virtual address to a physical address using the
+ * mapping information in kd->vm. Returns the result in pa, and returns
+ * the number of bytes that are contiguously available from this
+ * physical address. This routine is used only for crash dumps.
+ */
+int
+_kvm_sun2_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pap)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct sun2_kcore_hdr *s = &h->un._sun2;
+ struct vmstate *v = kd->vmst;
+ struct private_vmstate *pv = v->private;
+ int pte, offset;
+ u_int segnum, sme, ptenum;
+ paddr_t pa;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return(0);
+ }
+
+ if (va < h->kernbase) {
+ _kvm_err(kd, 0, "not a kernel address");
+ return(0);
+ }
+
+ /*
+ * Get the segmap entry (sme) from the kernel segmap.
+ * Note: only have segmap entries from KERNBASE to end.
+ */
+ segnum = _kvm_va_segnum(s, va - h->kernbase);
+ ptenum = _kvm_va_pte_num(v, va);
+ offset = va & v->pgofset;
+
+ /* The segmap entry selects a PMEG. */
+ sme = s->ksegmap[segnum];
+ pte = pv->pmeg[sme][ptenum];
+
+ if ((pte & (s)->pg_valid) == 0) {
+ _kvm_err(kd, 0, "page not valid (VA=0x%lx)", va);
+ return (0);
+ }
+ pa = _kvm_pg_pa(v, s, pte) + offset;
+
+ *pap = pa;
+ return (h->page_size - offset);
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_sun2_pa2off(kvm_t *kd, paddr_t pa)
+{
+ return(kd->dump_off + pa);
+}
--- /dev/null
+/* $NetBSD: kvm_sun3.c,v 1.15 2011/09/14 12:37:55 christos Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_sun3.c,v 1.15 2011/09/14 12:37:55 christos Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * Sun3 machine dependent routines for kvm.
+ *
+ * Note: This file has to build on ALL m68k machines,
+ * so do NOT include any <machine / *.h> files here.
+ */
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kcore.h>
+
+#include <unistd.h>
+#include <limits.h>
+#include <nlist.h>
+#include <kvm.h>
+#include <db.h>
+
+#include <m68k/kcore.h>
+
+#include "kvm_private.h"
+#include "kvm_m68k.h"
+
+int _kvm_sun3_initvtop(kvm_t *);
+void _kvm_sun3_freevtop(kvm_t *);
+int _kvm_sun3_kvatop (kvm_t *, vaddr_t, paddr_t *);
+off_t _kvm_sun3_pa2off (kvm_t *, paddr_t);
+
+struct kvm_ops _kvm_ops_sun3 = {
+ _kvm_sun3_initvtop,
+ _kvm_sun3_freevtop,
+ _kvm_sun3_kvatop,
+ _kvm_sun3_pa2off };
+
+#define _kvm_pg_pa(v, s, pte) \
+ (((pte) & (s)->pg_frame) << (v)->pgshift)
+
+#define _kvm_va_segnum(s, x) \
+ ((u_int)(x) >> (s)->segshift)
+#define _kvm_pte_num_mask(v) \
+ (0xf << (v)->pgshift)
+#define _kvm_va_pte_num(v, va) \
+ (((va) & _kvm_pte_num_mask((v))) >> (v)->pgshift)
+
+/*
+ * XXX Re-define these here, no other place for them.
+ */
+#define NKSEG 256 /* kernel segmap entries */
+#define NPAGSEG 16 /* pages per segment */
+
+/* Finally, our local stuff... */
+struct private_vmstate {
+ /* Page Map Entry Group (PMEG) */
+ int pmeg[NKSEG][NPAGSEG];
+};
+
+/*
+ * Prepare for translation of kernel virtual addresses into offsets
+ * into crash dump files. We use the MMU specific goop written at the
+ * beginning of a crash dump by dumpsys()
+ * Note: sun3 MMU specific!
+ */
+int
+_kvm_sun3_initvtop(kvm_t *kd)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ char *p;
+
+ p = kd->cpu_data;
+ p += (h->page_size - sizeof(kcore_seg_t));
+ kd->vmst->private = p;
+
+ return (0);
+}
+
+void
+_kvm_sun3_freevtop(kvm_t *kd)
+{
+ /* This was set by pointer arithmetic, not allocation. */
+ kd->vmst->private = (void*)0;
+}
+
+/*
+ * Translate a kernel virtual address to a physical address using the
+ * mapping information in kd->vm. Returns the result in pa, and returns
+ * the number of bytes that are contiguously available from this
+ * physical address. This routine is used only for crash dumps.
+ */
+int
+_kvm_sun3_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pap)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct sun3_kcore_hdr *s = &h->un._sun3;
+ struct vmstate *v = kd->vmst;
+ struct private_vmstate *pv = v->private;
+ int pte, offset;
+ u_int segnum, sme, ptenum;
+ paddr_t pa;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return(0);
+ }
+
+ if (va < h->kernbase) {
+ _kvm_err(kd, 0, "not a kernel address");
+ return(0);
+ }
+
+ /*
+ * Get the segmap entry (sme) from the kernel segmap.
+ * Note: only have segmap entries from KERNBASE to end.
+ */
+ segnum = _kvm_va_segnum(s, va - h->kernbase);
+ ptenum = _kvm_va_pte_num(v, va);
+ offset = va & v->pgofset;
+
+ /* The segmap entry selects a PMEG. */
+ sme = s->ksegmap[segnum];
+ pte = pv->pmeg[sme][ptenum];
+
+ if ((pte & (s)->pg_valid) == 0) {
+ _kvm_err(kd, 0, "page not valid (VA=%#"PRIxVADDR")", va);
+ return (0);
+ }
+ pa = _kvm_pg_pa(v, s, pte) + offset;
+
+ *pap = pa;
+ return (h->page_size - offset);
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_sun3_pa2off(kvm_t *kd, paddr_t pa)
+{
+ return(kd->dump_off + pa);
+}
--- /dev/null
+/* $NetBSD: kvm_sun3x.c,v 1.12 2011/09/14 12:37:55 christos Exp $ */
+
+/*-
+ * Copyright (c) 1997 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Gordon W. Ross.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_sun3x.c,v 1.12 2011/09/14 12:37:55 christos Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * Sun3x machine dependent routines for kvm.
+ *
+ * Note: This file has to build on ALL m68k machines,
+ * so do NOT include any <machine / *.h> files here.
+ */
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/kcore.h>
+
+#include <unistd.h>
+#include <limits.h>
+#include <nlist.h>
+#include <kvm.h>
+#include <db.h>
+
+#include <m68k/kcore.h>
+
+#include "kvm_private.h"
+#include "kvm_m68k.h"
+
+int _kvm_sun3x_initvtop(kvm_t *);
+void _kvm_sun3x_freevtop(kvm_t *);
+int _kvm_sun3x_kvatop (kvm_t *, vaddr_t, paddr_t *);
+off_t _kvm_sun3x_pa2off (kvm_t *, paddr_t);
+
+struct kvm_ops _kvm_ops_sun3x = {
+ _kvm_sun3x_initvtop,
+ _kvm_sun3x_freevtop,
+ _kvm_sun3x_kvatop,
+ _kvm_sun3x_pa2off };
+
+#define _kvm_kvas_size(h) \
+ (-((h)->kernbase))
+#define _kvm_nkptes(h, v) \
+ (_kvm_kvas_size((h)) >> (v)->pgshift)
+#define _kvm_pg_pa(pte, h) \
+ ((pte) & (h)->pg_frame)
+
+/*
+ * Prepare for translation of kernel virtual addresses into offsets
+ * into crash dump files. Nothing to do here.
+ */
+int
+_kvm_sun3x_initvtop(kvm_t *kd)
+{
+ return 0;
+}
+
+void
+_kvm_sun3x_freevtop(kvm_t *kd)
+{
+}
+
+/*
+ * Translate a kernel virtual address to a physical address using the
+ * mapping information in kd->vm. Returns the result in pa, and returns
+ * the number of bytes that are contiguously available from this
+ * physical address. This routine is used only for crash dumps.
+ */
+int
+_kvm_sun3x_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pap)
+{
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct sun3x_kcore_hdr *s = &h->un._sun3x;
+ struct vmstate *v = kd->vmst;
+ int idx, len, offset, pte;
+ u_long pteva, pa;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return(0);
+ }
+
+ if (va < h->kernbase) {
+ _kvm_err(kd, 0, "not a kernel address");
+ return(0);
+ }
+
+ /*
+ * If this VA is in the contiguous range, short-cut.
+ * Note that this ends our recursion when we call
+ * kvm_read to access the kernel page table, which
+ * is guaranteed to be in the contiguous range.
+ */
+ if (va < s->contig_end) {
+ len = s->contig_end - va;
+ pa = va - h->kernbase;
+ goto done;
+ }
+
+ /*
+ * The KVA is beyond the contiguous range, so we must
+ * read the PTE for this KVA from the page table.
+ */
+ idx = ((va - h->kernbase) >> v->pgshift);
+ pteva = s->kernCbase + (idx * 4);
+ if (kvm_read(kd, pteva, &pte, 4) != 4) {
+ _kvm_err(kd, 0, "can not read PTE!");
+ return (0);
+ }
+ if ((pte & s->pg_valid) == 0) {
+ _kvm_err(kd, 0, "page not valid (VA=0x%lx)", va);
+ return (0);
+ }
+ offset = va & v->pgofset;
+ len = (h->page_size - offset);
+ pa = _kvm_pg_pa(pte, s) + offset;
+
+done:
+ *pap = pa;
+ return (len);
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_sun3x_pa2off(kvm_t *kd, paddr_t pa)
+{
+ off_t off;
+ phys_ram_seg_t *rsp;
+ cpu_kcore_hdr_t *h = kd->cpu_data;
+ struct sun3x_kcore_hdr *s = &h->un._sun3x;
+
+ off = 0;
+ for (rsp = s->ram_segs; rsp->size; rsp++) {
+ if (pa >= rsp->start && pa < rsp->start + rsp->size) {
+ pa -= rsp->start;
+ break;
+ }
+ off += rsp->size;
+ }
+ return (kd->dump_off + off + pa);
+}
+
--- /dev/null
+/* $NetBSD: kvm_vax.c,v 1.20 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * VAX machine dependent routines for kvm. Hopefully, the forthcoming
+ * vm code will one day obsolete this module. Furthermore, I hope it
+ * gets here soon, because this basically is an error stub! (sorry)
+ * This code may not work anyway.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <unistd.h>
+#include <nlist.h>
+#include <stdlib.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <machine/vmparam.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+__RCSID("$NetBSD: kvm_vax.c,v 1.20 2014/02/19 20:21:22 dsl Exp $");
+
+struct vmstate {
+ u_long end;
+};
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+int
+_kvm_initvtop(kvm_t *kd)
+{
+ struct vmstate *vm;
+ struct stat st;
+ struct nlist nl[2];
+
+ vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
+ if (vm == 0)
+ return (-1);
+
+ kd->vmst = vm;
+
+ if (fstat(kd->pmfd, &st) < 0)
+ return (-1);
+
+ /* Get end of kernel address */
+ nl[0].n_name = "_end";
+ nl[1].n_name = 0;
+ if (kvm_nlist(kd, nl) != 0) {
+ _kvm_err(kd, kd->program, "pmap_stod: no such symbol");
+ return (-1);
+ }
+ vm->end = (u_long)nl[0].n_value;
+
+ return (0);
+}
+
+#define VA_OFF(va) (va & (NBPG - 1))
+
+/*
+ * Translate a kernel virtual address to a physical address using the
+ * mapping information in kd->vm. Returns the result in pa, and returns
+ * the number of bytes that are contiguously available from this
+ * physical address. This routine is used only for crash dumps.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ u_long end;
+
+ if (va < (u_long) KERNBASE) {
+ _kvm_err(kd, 0, "invalid address (%#"PRIxVADDR"<%lx)", va,
+ (u_long)KERNBASE);
+ return (0);
+ }
+
+ end = kd->vmst->end;
+ if (va >= end) {
+ _kvm_err(kd, 0, "invalid address (%#"PRIxVADDR"<%lx)", va,
+ end);
+ return (0);
+ }
+
+ *pa = (va - (u_long) KERNBASE);
+ return (end - va);
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ * XXX - crash dump doesn't work anyway.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ return(kd->dump_off + pa);
+}
+
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+/* $NetBSD: kvm_x86_64.c,v 1.10 2014/02/19 20:21:22 dsl Exp $ */
+
+/*-
+ * Copyright (c) 1989, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software developed by the Computer Systems
+ * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
+ * BG 91-66 and contributed to Berkeley.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#if defined(LIBC_SCCS) && !defined(lint)
+#if 0
+static char sccsid[] = "@(#)kvm_hp300.c 8.1 (Berkeley) 6/4/93";
+#else
+__RCSID("$NetBSD: kvm_x86_64.c,v 1.10 2014/02/19 20:21:22 dsl Exp $");
+#endif
+#endif /* LIBC_SCCS and not lint */
+
+/*
+ * x86-64 machine dependent routines for kvm.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/stat.h>
+#include <sys/kcore.h>
+#include <sys/types.h>
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <nlist.h>
+#include <kvm.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <limits.h>
+#include <db.h>
+
+#include "kvm_private.h"
+
+#include <machine/kcore.h>
+#include <machine/pmap.h>
+#include <machine/pte.h>
+#include <machine/vmparam.h>
+
+void
+_kvm_freevtop(kvm_t *kd)
+{
+
+ /* Not actually used for anything right now, but safe. */
+ if (kd->vmst != 0)
+ free(kd->vmst);
+}
+
+/*ARGSUSED*/
+int
+_kvm_initvtop(kvm_t *kd)
+{
+
+ return (0);
+}
+
+/*
+ * Translate a kernel virtual address to a physical address.
+ */
+int
+_kvm_kvatop(kvm_t *kd, vaddr_t va, paddr_t *pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ u_long page_off;
+ pd_entry_t pde;
+ pt_entry_t pte;
+ paddr_t pde_pa, pte_pa;
+
+ if (ISALIVE(kd)) {
+ _kvm_err(kd, 0, "vatop called in live kernel!");
+ return (0);
+ }
+
+ cpu_kh = kd->cpu_data;
+
+ /*
+ * Find and read all entries to get to the pa.
+ */
+
+ /*
+ * Level 4.
+ */
+ pde_pa = cpu_kh->ptdpaddr + (pl4_pi(va) * sizeof(pd_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
+ _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
+ _kvm_syserr(kd, 0, "could not read PT level 4 entry");
+ goto lose;
+ }
+ if ((pde & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid level 4 PDE)");
+ goto lose;
+ }
+
+ /*
+ * Level 3.
+ */
+ pde_pa = (pde & PG_FRAME) + (pl3_pi(va) * sizeof(pd_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
+ _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
+ _kvm_syserr(kd, 0, "could not read PT level 3 entry");
+ goto lose;
+ }
+ if ((pde & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid level 3 PDE)");
+ goto lose;
+ }
+ if (pde & PG_PS) {
+ page_off = va & (NBPD_L3 - 1);
+ *pa = (pde & PG_1GFRAME) + page_off;
+ return (int)(NBPD_L3 - page_off);
+ }
+
+ /*
+ * Level 2.
+ */
+ pde_pa = (pde & PG_FRAME) + (pl2_pi(va) * sizeof(pd_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *)&pde, sizeof(pde),
+ _kvm_pa2off(kd, pde_pa)) != sizeof(pde)) {
+ _kvm_syserr(kd, 0, "could not read PT level 2 entry");
+ goto lose;
+ }
+ if ((pde & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid level 2 PDE)");
+ goto lose;
+ }
+ if (pde & PG_PS) {
+ page_off = va & (NBPD_L2 - 1);
+ *pa = (pde & PG_2MFRAME) + page_off;
+ return (int)(NBPD_L2 - page_off);
+ }
+
+ /*
+ * Level 1.
+ */
+ pte_pa = (pde & PG_FRAME) + (pl1_pi(va) * sizeof(pt_entry_t));
+ if (_kvm_pread(kd, kd->pmfd, (void *) &pte, sizeof(pte),
+ _kvm_pa2off(kd, pte_pa)) != sizeof(pte)) {
+ _kvm_syserr(kd, 0, "could not read PTE");
+ goto lose;
+ }
+ /*
+ * Validate the PTE and return the physical address.
+ */
+ if ((pte & PG_V) == 0) {
+ _kvm_err(kd, 0, "invalid translation (invalid PTE)");
+ goto lose;
+ }
+ page_off = va & PGOFSET;
+ *pa = (pte & PG_FRAME) + page_off;
+ return (int)(NBPG - page_off);
+
+ lose:
+ *pa = (u_long)~0L;
+ return (0);
+}
+
+/*
+ * Translate a physical address to a file-offset in the crash dump.
+ */
+off_t
+_kvm_pa2off(kvm_t *kd, paddr_t pa)
+{
+ cpu_kcore_hdr_t *cpu_kh;
+ phys_ram_seg_t *ramsegs;
+ off_t off;
+ int i;
+
+ cpu_kh = kd->cpu_data;
+ ramsegs = (void *)((char *)(void *)cpu_kh + ALIGN(sizeof *cpu_kh));
+
+ off = 0;
+ for (i = 0; i < cpu_kh->nmemsegs; i++) {
+ if (pa >= ramsegs[i].start &&
+ (pa - ramsegs[i].start) < ramsegs[i].size) {
+ off += (pa - ramsegs[i].start);
+ break;
+ }
+ off += ramsegs[i].size;
+ }
+
+ return (kd->dump_off + off);
+}
+
+/*
+ * Machine-dependent initialization for ALL open kvm descriptors,
+ * not just those for a kernel crash dump. Some architectures
+ * have to deal with these NOT being constants! (i.e. m68k)
+ */
+int
+_kvm_mdopen(kvm_t *kd)
+{
+
+ kd->usrstack = USRSTACK;
+ kd->min_uva = VM_MIN_ADDRESS;
+ kd->max_uva = VM_MAXUSER_ADDRESS;
+
+ return (0);
+}
--- /dev/null
+# $NetBSD: shlib_version,v 1.12 2009/01/11 03:07:48 christos Exp $
+# Remember to update distrib/sets/lists/base/shl.* when changing
+#
+major=6
+minor=0