#define MAX_MAP_LEN 1048576
-#define munmap minix_munmap
-#define mmap minix_mmap
-
mmf_t *
mmopen(char *fn, char *mode)
{
size -= l;
}
size = rounddown(size, PAGE_SIZE);
- r = minix_munmap((void *) a, size);
+ r = munmap((void *) a, size);
if(r != OK) {
printf("memory: WARNING: munmap failed: %d\n", r);
}
/* Try to allocate a piece of memory for the RAM disk. */
if(ramdev_size > 0 &&
- (mem = minix_mmap(NULL, ramdev_size, PROT_READ|PROT_WRITE,
+ (mem = mmap(NULL, ramdev_size, PROT_READ|PROT_WRITE,
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
printf("MEM: failed to get memory for ramdisk\n");
return(ENOMEM);
* in use. We use mmap instead of malloc to allow the memory to be
* actually freed later.
*/
- if ((buf = minix_mmap(NULL, UDS_BUF, PROT_READ | PROT_WRITE,
+ if ((buf = mmap(NULL, UDS_BUF, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0)) == MAP_FAILED)
return ENOMEM;
uds_clear_fds(minor, &uds_fd_table[minor].ancillary_data);
/* Release the memory for the ring buffer. */
- minix_munmap(uds_fd_table[minor].buf, UDS_BUF);
+ munmap(uds_fd_table[minor].buf, UDS_BUF);
/* Set the socket back to its original UDS_FREE state. */
memset(&uds_fd_table[minor], '\0', sizeof(uds_fd_t));
* of malloc to allow the memory to be actually freed later.
*/
if (r == OK) {
- state.buf = minix_mmap(NULL, VND_BUF_SIZE, PROT_READ |
+ state.buf = mmap(NULL, VND_BUF_SIZE, PROT_READ |
PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
if (state.buf == MAP_FAILED)
r = ENOMEM;
}
if (r != OK) {
- minix_munmap(state.buf, VND_BUF_SIZE);
+ munmap(state.buf, VND_BUF_SIZE);
close(state.fd);
state.fd = -1;
}
* allow reuse until the device has been closed by the other
* users.
*/
- minix_munmap(state.buf, VND_BUF_SIZE);
+ munmap(state.buf, VND_BUF_SIZE);
close(state.fd);
state.fd = -1;
#define truncate _truncate
#define write _write
#define writev _writev
-#define minix_mmap _minix_mmap
-#define minix_munmap _minix_munmap
+#define mmap _mmap
+#define munmap _munmap
#define vfork __vfork14
#endif /* __minix */
#include <sys/mman.h>
#include <unistd.h>
-#define mmap minix_mmap
-#define munmap minix_munmap
-
#include "malloc-debug.h"
#if 0
*/
#ifdef __minix
-#include <machine/vmparam.h>
-#define mmap minix_mmap
-#define munmap minix_munmap
#ifdef _LIBSYS
#include <minix/sysutil.h>
+#include <machine/vmparam.h>
#define MALLOC_NO_SYSCALLS
#define wrtwarning(w) printf("libminc malloc warning: %s\n", w)
#define wrterror(w) panic("libminc malloc error: %s\n", w)
* This is necessary for VM to be able to define its own versions, and
* use this malloc.
*/
-#undef minix_mmap
-#undef minix_munmap
+#undef mmap
+#undef munmap
#include <sys/types.h>
#if defined(__NetBSD__)
__weak_alias(vm_unmap, _vm_unmap)
__weak_alias(vm_getphys, _vm_getphys)
__weak_alias(vm_getrefcount, _vm_getrefcount)
-__weak_alias(minix_mmap, _minix_mmap)
-__weak_alias(minix_munmap, _minix_munmap)
+__weak_alias(mmap, _mmap)
+__weak_alias(munmap, _munmap)
#endif
return _syscall(VM_PROC_NR, VM_VFS_MMAP, &m);
}
-void *minix_mmap(void *addr, size_t len, int prot, int flags,
+void *mmap(void *addr, size_t len, int prot, int flags,
int fd, off_t offset)
{
return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
}
-int minix_munmap(void *addr, size_t len)
+int munmap(void *addr, size_t len)
{
message m;
len = roundup(fs_block_size, PAGE_SIZE);
- if((bp->data = minix_mmap(0, fs_block_size,
+ if((bp->data = mmap(0, fs_block_size,
PROT_READ|PROT_WRITE, MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
free_unused_blocks();
- if((bp->data = minix_mmap(0, fs_block_size, PROT_READ|PROT_WRITE,
+ if((bp->data = mmap(0, fs_block_size, PROT_READ|PROT_WRITE,
MAP_PREALLOC|MAP_ANON, -1, 0)) == MAP_FAILED) {
panic("libminixfs: could not allocate block");
}
return lmfs_get_block_ino(dev, block, only_search, VMC_NO_INODE, 0);
}
-void minix_munmap_t(void *a, int len)
+void munmap_t(void *a, int len)
{
vir_bytes av = (vir_bytes) a;
assert(a);
assert(!(len % PAGE_SIZE));
- if(minix_munmap(a, len) < 0)
+ if(munmap(a, len) < 0)
panic("libminixfs cache: munmap failed");
}
MARKCLEAN(bp); /* NO_DEV blocks may be marked dirty */
if(bp->lmfs_bytes > 0) {
assert(bp->data);
- minix_munmap_t(bp->data, bp->lmfs_bytes);
+ munmap_t(bp->data, bp->lmfs_bytes);
bp->lmfs_bytes = 0;
bp->data = NULL;
} else assert(!bp->data);
if (bp->lmfs_dev == device) {
assert(bp->data);
assert(bp->lmfs_bytes > 0);
- minix_munmap_t(bp->data, bp->lmfs_bytes);
+ munmap_t(bp->data, bp->lmfs_bytes);
bp->lmfs_dev = NO_DEV;
bp->lmfs_bytes = 0;
bp->data = NULL;
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
if(bp->data) {
assert(bp->lmfs_bytes > 0);
- minix_munmap_t(bp->data, bp->lmfs_bytes);
+ munmap_t(bp->data, bp->lmfs_bytes);
}
}
}
char *guard_start, *guard_end;
stacksize = round_page(stacksize + MTHREAD_GUARDSIZE);
- stackaddr = minix_mmap(NULL, stacksize,
+ stackaddr = mmap(NULL, stacksize,
PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE,
-1, 0);
if (stackaddr == MAP_FAILED)
# error "Unsupported platform"
#endif
stacksize = guarded_stacksize;
- if (minix_munmap(guard_start, MTHREAD_GUARDSIZE) != 0)
+ if (munmap(guard_start, MTHREAD_GUARDSIZE) != 0)
mthread_panic("unable to unmap stack space for guard");
tcb->m_context.uc_stack.ss_sp = guard_end;
} else
rt->m_cond = NULL;
if (rt->m_attr.ma_stackaddr == NULL) { /* We allocated stack space */
if (rt->m_context.uc_stack.ss_sp) {
- if (minix_munmap(rt->m_context.uc_stack.ss_sp,
+ if (munmap(rt->m_context.uc_stack.ss_sp,
rt->m_context.uc_stack.ss_size) != 0) {
mthread_panic("unable to unmap memory");
}
if (puffs_fakecc)
return &fakecc;
- sp = minix_mmap(NULL, stacksize, PROT_READ|PROT_WRITE,
+ sp = mmap(NULL, stacksize, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_PRIVATE, -1, 0);
if (sp == MAP_FAILED)
return NULL;
/* initialize both ucontext's */
if (getcontext(&pcc->pcc_uc) == -1) {
- minix_munmap(pcc, stacksize);
+ munmap(pcc, stacksize);
return NULL;
}
if (getcontext(&pcc->pcc_uc_ret) == -1) {
- minix_munmap(pcc, stacksize);
+ munmap(pcc, stacksize);
return NULL;
}
DPRINTF(("invalidating pcc %p\n", pcc));
assert(!puffs_fakecc);
- minix_munmap(pcc, stacksize);
+ munmap(pcc, stacksize);
}
void
if(flags & AC_LOWER1M)
mmapflags |= MAP_LOWER1M;
if(flags & AC_ALIGN64K)
- mmapflags |= MAP_ALIGN64K;
+ mmapflags |= MAP_ALIGNMENT_64KB;
- /* First try to get memory with minix_mmap. This is guaranteed
+ /* First try to get memory with mmap. This is guaranteed
* to be page-aligned, and we can tell VM it has to be
* pre-allocated and contiguous.
*/
errno = 0;
- buf = (vir_bytes) minix_mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0);
+ buf = (vir_bytes) mmap(0, len, PROT_READ|PROT_WRITE, mmapflags, -1, 0);
- /* If that failed, maybe we're not running in paged mode.
- * If that's the case, ENXIO will be returned.
- * Memory returned with malloc() will be preallocated and
- * contiguous, so fallback on that, and ask for a little extra
- * so we can page align it ourselves.
- */
if(buf == (vir_bytes) MAP_FAILED) {
- u32_t align = 0;
- if(errno != (_SIGN ENXIO)) {
- return NULL;
- }
- if(flags & AC_ALIGN4K)
- align = 4*1024;
- if(flags & AC_ALIGN64K)
- align = 64*1024;
- if(len + align < len)
- return NULL;
- len += align;
- if(!(buf = (vir_bytes) malloc(len))) {
- return NULL;
- }
- if(align)
- buf += align - (buf % align);
+ return NULL;
}
/* Get physical address, if requested. */
int free_contig(void *addr, size_t len)
{
- return minix_munmap(addr, len);
+ return munmap(addr, len);
}
#include <sys/sysctl.h>
#include <dirent.h>
-#ifdef __minix
-#define munmap minix_munmap
-#endif
-
#include "debug.h"
#include "rtld.h"
#include "rtld.h"
#ifdef __minix
-#define munmap minix_munmap
-#define mmap minix_mmap
#ifndef MAP_SHARED
#define MAP_SHARED MAP_PRIVATE /* minix: MAP_SHARED should be MAP_PRIVATE */
#endif
#include "debug.h"
#include "rtld.h"
-#ifdef __minix
-#define munmap minix_munmap
-#endif
-
#define MINIXVERBOSE 0
#if MINIXVERBOSE
}
#ifdef __minix
- ehdr = minix_mmap(NULL, _rtld_pagesz, PROT_READ|PROT_WRITE,
+ ehdr = mmap(NULL, _rtld_pagesz, PROT_READ|PROT_WRITE,
MAP_PREALLOC|MAP_ANON, -1, (off_t)0);
Pread(ehdr, _rtld_pagesz, fd, 0);
#if MINIXVERBOSE
mapbase = mmap(base_addr, mapsize, text_flags,
mapflags | MAP_FILE | MAP_PRIVATE, fd, base_offset);
#else
- mapbase = minix_mmap(base_addr, mapsize, PROT_READ|PROT_WRITE,
+ mapbase = mmap(base_addr, mapsize, PROT_READ|PROT_WRITE,
MAP_ANON | MAP_PREALLOC, -1, 0);
#if MINIXVERBOSE
fprintf(stderr, "minix mmap for whole block: 0x%lx-0x%lx\n", mapbase, mapbase+mapsize);
* John Polstra <jdp@polstra.com>.
*/
-#ifdef __minix
-#define munmap minix_munmap
-#endif
-
#include <sys/cdefs.h>
#ifndef lint
__RCSID("$NetBSD: rtld.c,v 1.159 2012/10/01 03:03:46 riastradh Exp $");
* SUCH DAMAGE.
*/
-#ifdef __minix
-/* Minix mmap can do this. */
-#define mmap minix_mmap
-#define munmap minix_munmap
-#endif
-
#if defined(LIBC_SCCS) && !defined(lint)
/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/
#endif /* LIBC_SCCS and not lint */
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* mmap/munmap are used in this file just to allocate/free memory
- * so these functions are ok.
- */
-#define mmap minix_mmap
-#define munmap minix_munmap
-
/*
* mke2fs.c: "re-invent (dumb but non-GPLed) wheel as a fun project"
*
return ENOSPC;
shm = &shm_list[shm_list_nr];
memset(shm, 0, sizeof(struct shm_struct));
- shm->page = (vir_bytes) minix_mmap(0, size,
+ shm->page = (vir_bytes) mmap(0, size,
PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
if (shm->page == (vir_bytes) MAP_FAILED)
return ENOMEM;
int size = shm_list[i].shmid_ds.shm_segsz;
if (size % PAGE_SIZE)
size += PAGE_SIZE - size % PAGE_SIZE;
- minix_munmap((void *)shm_list[i].page, size);
+ munmap((void *)shm_list[i].page, size);
}
}
shm_list_nr = j;
#include "inc.h"
-#define minix_munmap _minix_munmap
+#define munmap _munmap
#include <sys/mman.h>
-#undef minix_munmap
+#undef munmap
int unmap_ok = 0;
/*===========================================================================*
- * minix_munmap *
+ * munmap *
*===========================================================================*/
-int minix_munmap(void *addrstart, vir_bytes len)
+int munmap(void *addrstart, vir_bytes len)
{
if(!unmap_ok)
return ENOSYS;
- return _minix_munmap(addrstart, len);
+ return _munmap(addrstart, len);
}
if(vmm_flags & MAP_LOWER16M) vrflags |= VR_LOWER16MB;
if(vmm_flags & MAP_LOWER1M) vrflags |= VR_LOWER1MB;
- if(vmm_flags & MAP_ALIGN64K) vrflags |= VR_PHYS64K;
+ if(vmm_flags & MAP_ALIGNMENT_64KB) vrflags |= VR_PHYS64K;
if(vmm_flags & MAP_PREALLOC) mfflags |= MF_PREALLOC;
if(vmm_flags & MAP_UNINITIALIZED) {
if(!execpriv) return NULL;
return OK;
}
-void *minix_mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
+void *mmap(void *addr, size_t len, int f, int f2, int f3, off_t o)
{
void *ret;
phys_bytes p;
return ret;
}
-int minix_munmap(void * addr, size_t len)
+int munmap(void * addr, size_t len)
{
vm_freepages((vir_bytes) addr, roundup(len, VM_PAGE_SIZE)/VM_PAGE_SIZE);
return 0;
#include <sys/featuretest.h>
#include <machine/ansi.h>
-#include <minix/type.h>
#ifdef _BSD_SIZE_T_
typedef _BSD_SIZE_T_ size_t;
#endif
#define MAP_PRIVATE 0x0002 /* changes are private */
+#ifdef _KERNEL
+/*
+ * Deprecated flag; these are treated as MAP_PRIVATE internally by
+ * the kernel.
+ */
+#define MAP_COPY 0x0004 /* "copy" region at mmap time */
+#endif
+
+/*
+ * Other flags
+ */
+#define MAP_FIXED 0x0010 /* map addr must be exactly as requested */
+#define MAP_RENAME 0x0020 /* Sun: rename private pages to file */
+#define MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */
+#define MAP_INHERIT 0x0080 /* region is retained after exec */
+#define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */
+#define MAP_TRYFIXED 0x0400 /* attempt hint address, even within break */
+#define MAP_WIRED 0x0800 /* mlock() mapping when it is established */
+
/*
* Mapping type
*/
-#define MAP_ANON 0x0004 /* anonymous memory */
+#define MAP_FILE 0x0000 /* map from file (default) */
+#define MAP_ANON 0x1000 /* allocated from memory, swap space */
+#define MAP_STACK 0x2000 /* allocated from memory, swap space (stack) */
/*
- * Minix specific flags.
+ * Alignment (expressed in log2). Must be >= log2(PAGE_SIZE) and
+ * < # bits in a pointer (26 (acorn26), 32 or 64).
+ */
+#define MAP_ALIGNED(n) ((n) << MAP_ALIGNMENT_SHIFT)
+#define MAP_ALIGNMENT_SHIFT 24
+#define MAP_ALIGNMENT_MASK MAP_ALIGNED(0xff)
+#define MAP_ALIGNMENT_64KB MAP_ALIGNED(16) /* 2^16 */
+#define MAP_ALIGNMENT_16MB MAP_ALIGNED(24) /* 2^24 */
+#define MAP_ALIGNMENT_4GB MAP_ALIGNED(32) /* 2^32 */
+#define MAP_ALIGNMENT_1TB MAP_ALIGNED(40) /* 2^40 */
+#define MAP_ALIGNMENT_256TB MAP_ALIGNED(48) /* 2^48 */
+#define MAP_ALIGNMENT_64PB MAP_ALIGNED(56) /* 2^56 */
+
+#ifdef __minix
+/*
+ * Minix-specific flags
*/
-#define MAP_PREALLOC 0x0008 /* not on-demand */
-#define MAP_CONTIG 0x0010 /* contiguous in physical memory */
-#define MAP_LOWER16M 0x0020 /* physically below 16MB */
-#define MAP_ALIGN64K 0x0040 /* physically aligned at 64kB */
-#define MAP_LOWER1M 0x0080 /* physically below 16MB */
-#define MAP_ALIGNMENT_64KB MAP_ALIGN64K
-
-#define MAP_FIXED 0x0200 /* require mapping to happen at hint */
-#define MAP_THIRDPARTY 0x0400 /* perform on behalf of any process */
-#define MAP_UNINITIALIZED 0x0800 /* do not clear memory */
-#define MAP_FILE 0x1000 /* it's a file */
+#define MAP_UNINITIALIZED 0x040000 /* do not clear memory */
+#define MAP_PREALLOC 0x080000 /* not on-demand */
+#define MAP_CONTIG 0x100000 /* contiguous in physical memory */
+#define MAP_LOWER16M 0x200000 /* physically below 16MB */
+#define MAP_LOWER1M 0x400000 /* physically below 16MB */
+#define MAP_THIRDPARTY 0x800000 /* perform on behalf of any process */
+#endif
/*
* Error indicator returned by mmap(2)
*/
#define MAP_FAILED ((void *) -1) /* mmap() failed */
+/*
+ * Flags to msync
+ */
+#define MS_ASYNC 0x01 /* perform asynchronous writes */
+#define MS_INVALIDATE 0x02 /* invalidate cached data */
+#define MS_SYNC 0x04 /* perform synchronous writes */
+
+/*
+ * Flags to mlockall
+ */
+#define MCL_CURRENT 0x01 /* lock all pages currently mapped */
+#define MCL_FUTURE 0x02 /* lock all pages mapped in the future */
+
+/*
+ * POSIX memory avissory values.
+ * Note: keep consistent with the original defintions below.
+ */
+#define POSIX_MADV_NORMAL 0 /* No further special treatment */
+#define POSIX_MADV_RANDOM 1 /* Expect random page references */
+#define POSIX_MADV_SEQUENTIAL 2 /* Expect sequential page references */
+#define POSIX_MADV_WILLNEED 3 /* Will need these pages */
+#define POSIX_MADV_DONTNEED 4 /* Don't need these pages */
+
+#if defined(_NETBSD_SOURCE)
+/*
+ * Original advice values, equivalent to POSIX defintions,
+ * and few implementation-specific ones.
+ */
+#define MADV_NORMAL POSIX_MADV_NORMAL
+#define MADV_RANDOM POSIX_MADV_RANDOM
+#define MADV_SEQUENTIAL POSIX_MADV_SEQUENTIAL
+#define MADV_WILLNEED POSIX_MADV_WILLNEED
+#define MADV_DONTNEED POSIX_MADV_DONTNEED
+#define MADV_SPACEAVAIL 5 /* Insure that resources are reserved */
+#define MADV_FREE 6 /* Pages are empty, free them */
+
+/*
+ * Flags to minherit
+ */
+#define MAP_INHERIT_SHARE 0 /* share with child */
+#define MAP_INHERIT_COPY 1 /* copy into child */
+#define MAP_INHERIT_NONE 2 /* absent from child */
+#define MAP_INHERIT_DONATE_COPY 3 /* copy and delete -- not
+ implemented in UVM */
+#define MAP_INHERIT_DEFAULT MAP_INHERIT_COPY
+#endif
+
+#ifndef _KERNEL
+
#include <sys/cdefs.h>
__BEGIN_DECLS
-#ifndef __minix
void * mmap(void *, size_t, int, int, int, off_t);
int munmap(void *, size_t);
-#else
-void * minix_mmap(void *, size_t, int, int, int, off_t);
-void * minix_mmap64(void *, size_t, int, int, int, u64_t);
-void * minix_mmap_for(endpoint_t, void *, size_t, int, int, int, u64_t);
-int minix_munmap(void *, size_t);
-void * vm_remap(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
-void * vm_remap_ro(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
-int vm_unmap(endpoint_t endpt, void *addr);
-unsigned long vm_getphys(endpoint_t endpt, void *addr);
-u8_t vm_getrefcount(endpoint_t endpt, void *addr);
-#endif /* __minix */
+int mprotect(void *, size_t, int);
+#ifndef __LIBC12_SOURCE__
+int msync(void *, size_t, int) __RENAME(__msync13);
+#endif
+int mlock(const void *, size_t);
+int munlock(const void *, size_t);
+int mlockall(int);
+int munlockall(void);
+#if defined(_NETBSD_SOURCE)
+int madvise(void *, size_t, int);
+int mincore(void *, size_t, char *);
+int minherit(void *, size_t, int);
+void * mremap(void *, size_t, void *, size_t, int);
+#endif
+int posix_madvise(void *, size_t, int);
+
+#ifdef __minix
+#include <minix/endpoint.h>
+void * vm_remap(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
+void * vm_remap_ro(endpoint_t d, endpoint_t s, void *da, void *sa, size_t si);
+int vm_unmap(endpoint_t endpt, void *addr);
+unsigned long vm_getphys(endpoint_t endpt, void *addr);
+u8_t vm_getrefcount(endpoint_t endpt, void *addr);
+#endif
+
__END_DECLS
+#endif /* !_KERNEL */
+
#endif /* !_SYS_MMAN_H_ */
if (contig)
ptr = alloc_contig(size, 0, NULL);
else
- ptr = minix_mmap(NULL, size, PROT_READ | PROT_WRITE,
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_PREALLOC | MAP_ANON, -1, 0);
if (ptr == MAP_FAILED)
if (contig)
free_contig(ptr, size);
else
- minix_munmap(ptr, size);
+ munmap(ptr, size);
}
static int set_result(result_t *res, int type, ssize_t value)
* very unlikely that the actual piece of memory will end up
* being physically contiguous with the last piece.
*/
- tmp = minix_mmap((void *) (buf->addr + len + PAGE_SIZE), len,
+ tmp = mmap((void *) (buf->addr + len + PAGE_SIZE), len,
PROT_READ | PROT_WRITE, MAP_ANON | MAP_PREALLOC |
MAP_CONTIG, -1, 0L);
panic("unable to allocate temporary buffer");
}
- addr = (vir_bytes) minix_mmap((void *) buf->addr, len,
+ addr = (vir_bytes) mmap((void *) buf->addr, len,
PROT_READ | PROT_WRITE, flags, -1, 0L);
if (addr != buf->addr)
if (!prealloc)
return;
- if ((r = minix_munmap(tmp, len)) != OK)
+ if ((r = munmap(tmp, len)) != OK)
panic("unable to unmap buffer (%d)", errno);
if ((r = sys_umap(SELF, VM_D, addr, len, &buf->phys)) < 0)
* unmapped the temporary memory also, there's a small chance we'll end
* up with a different physical page this time. Who knows.
*/
- minix_munmap((void *) addr, len);
+ munmap((void *) addr, len);
- addr = (vir_bytes) minix_mmap((void *) buf->addr, len,
+ addr = (vir_bytes) mmap((void *) buf->addr, len,
PROT_READ | PROT_WRITE, flags, -1, 0L);
if (addr != buf->addr)
for (i = 0; i < count; i++) {
for (j = 0; j < buf[i].pages; j++) {
- r = minix_munmap((void *) (buf[i].addr + j * PAGE_SIZE),
+ r = munmap((void *) (buf[i].addr + j * PAGE_SIZE),
PAGE_SIZE);
if (r != OK)
got_result("invalid virtual vector pointer");
/* Test unallocated virtual vector. */
- vvecp = (struct vumap_vir *) minix_mmap(NULL, PAGE_SIZE,
+ vvecp = (struct vumap_vir *) mmap(NULL, PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
if (vvecp == MAP_FAILED)
got_result("unallocated virtual vector pointer");
- minix_munmap((void *) vvecp, PAGE_SIZE);
+ munmap((void *) vvecp, PAGE_SIZE);
/* Test invalid physical vector pointer. */
r = do_vumap(SELF, vvec, 2, 0, VUA_READ, NULL, &pcount);
got_result("invalid physical vector pointer");
/* Test unallocated physical vector. */
- pvecp = (struct vumap_phys *) minix_mmap(NULL, PAGE_SIZE,
+ pvecp = (struct vumap_phys *) mmap(NULL, PAGE_SIZE,
PROT_READ | PROT_WRITE, MAP_ANON, -1, 0L);
if (pvecp == MAP_FAILED)
got_result("unallocated physical vector pointer");
- minix_munmap((void *) pvecp, PAGE_SIZE);
+ munmap((void *) pvecp, PAGE_SIZE);
free_bufs(buf, 2);
}
# Programs that require setuid
setuids="test11 test33 test43 test44 test46 test56 test60 test61 test65 \
- test69 test76 test77 test78" # test73"
+ test69 test76 test73 test77 test78"
# Scripts that require to be run as root
rootscripts="testisofs testvnd"
alltests="1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 \
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 \
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 \
- 61 62 63 64 65 66 67 68 69 70 71 72 75 76 77 78 79 \
+ 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 \
sh1 sh2 interp mfs isofs vnd"
tests_no=`expr 0`
-# test mmap only if enabled in sysenv
-filemap=1 # the default is on
-if sysenv filemap >/dev/null
-then filemap=`sysenv filemap`
-fi
-
-if [ "$filemap" -ne 0 ]
-then alltests="$alltests 74"
-fi
-
# If root, make sure the setuid tests have the correct permissions
# and make the dir bin-owned.
if [ "$ROOT" ]
start(44);
for(i = 0; i < CHUNKS; i++) {
- v[i] = minix_mmap(vaddr, CHUNKSIZE, PROT_READ|PROT_WRITE, 0,
+ v[i] = mmap(vaddr, CHUNKSIZE, PROT_READ|PROT_WRITE, 0,
-1, 0);
if(v[i] == MAP_FAILED) {
- perror("minix_mmap");
- fprintf(stderr, "minix_mmap failed\n");
+ perror("mmap");
+ fprintf(stderr, "mmap failed\n");
quit();
}
if(v[i] != vaddr) {
fprintf(stderr,
- "minix_mmap said 0x%p but i wanted 0x%p\n",
+ "mmap said 0x%p but i wanted 0x%p\n",
v[i], vaddr);
quit();
}
if(f == 0) {
/* child: use up as much memory as we can */
- while((addrs[i++ % NADDRS] = minix_mmap(0, LEN, PROT_READ|PROT_WRITE,
+ while((addrs[i++ % NADDRS] = mmap(0, LEN, PROT_READ|PROT_WRITE,
MAP_PREALLOC|MAP_CONTIG|MAP_ANON, -1, 0)) != MAP_FAILED)
;
exit(0);
}
}
- if((mmapdata = minix_mmap(NULL, blocksize, PROT_READ, MAP_PRIVATE | MAP_FILE,
+ if((mmapdata = mmap(NULL, blocksize, PROT_READ, MAP_PRIVATE | MAP_FILE,
fd, offset)) == MAP_FAILED) {
perror("mmap");
return -1;
return -1;
}
- if(minix_munmap(mmapdata, blocksize) < 0) {
+ if(munmap(mmapdata, blocksize) < 0) {
perror("munmap");
return -1;
}
{
void *block;
#define BLOCKSIZE (PAGE_SIZE*10)
- block = minix_mmap(0, BLOCKSIZE, PROT_READ | PROT_WRITE,
+ block = mmap(0, BLOCKSIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
if(block == MAP_FAILED) { e(1); exit(1); }
memset(block, 0, BLOCKSIZE);
/* shrink from bottom */
- minix_munmap(block, PAGE_SIZE);
+ munmap(block, PAGE_SIZE);
}
int
if((bdata = vm_map_cacheblock(MYDEV, dev_off,
VMC_NO_INODE, 0, NULL, blocksize)) == MAP_FAILED) {
- if((bdata = minix_mmap(0, blocksize,
+ if((bdata = mmap(0, blocksize,
PROT_READ|PROT_WRITE, MAP_ANON, -1, 0)) == MAP_FAILED) {
- printf("minix_mmap failed\n");
+ printf("mmap failed\n");
exit(1);
}
mustset = 1;
exit(1);
}
- if(minix_munmap(bdata, blocksize) < 0) {
- printf("dowriteblock: minix_munmap failed %d\n", r);
+ if(munmap(bdata, blocksize) < 0) {
+ printf("dowriteblock: munmap failed %d\n", r);
exit(1);
}
memcpy(block, bdata, blocksize);
- if(minix_munmap(bdata, blocksize) < 0) {
- printf("dowriteblock: minix_munmap failed\n");
+ if(munmap(bdata, blocksize) < 0) {
+ printf("dowriteblock: munmap failed\n");
exit(1);
}
#include <err.h>
#include "returns.h"
-#if defined(__minix)
-#define munmap minix_munmap
-#define mmap minix_mmap
-#endif /* defined(__minix) */
-
void yyparse(void);
#define DEF_TERMPATH "."
#define DEF_TERM "atf"
#include "rtld.h"
#include "ldd.h"
-#define munmap minix_munmap
-
/*
* elfxx_ldd() - bit-size independant ELF ldd implementation.
* returns 0 on success and -1 on failure.
#ifdef __minix
-#define mmap minix_mmap
-#define munmap minix_munmap
#ifndef MAP_COPY
#define MAP_COPY MAP_PRIVATE
#endif
int fd;
if (msgmap != MAP_FAILED)
-#ifdef __minix
- minix_munmap(msgmap, msgmapsz);
-#else /* ! __minix */
munmap(msgmap, msgmapsz);
-#endif /* ! __minix */
msgmap = MAP_FAILED;
if (!file)
return 0;
return -1;
msgmapsz = lseek(fd, 0, SEEK_END);
#ifdef __minix
- msgmap = minix_mmap(0, msgmapsz, PROT_READ, MAP_PRIVATE, fd, 0);
+ msgmap = mmap(0, msgmapsz, PROT_READ, MAP_PRIVATE, fd, 0);
#else /* ! __minix */
msgmap = mmap(0, msgmapsz, PROT_READ, MAP_SHARED, fd, 0);
#endif /* ! __minix */