/* General calls. */
#define VM_MMAP (VM_RQ_BASE+10)
-# define VMM_ADDR m5_l1
-# define VMM_LEN m5_l2
-# define VMM_PROT m5_s1
-# define VMM_FLAGS m5_s2
-# define VMM_FD m5_i1
-# define VMM_OFFSET_LO m5_i2
-# define VMM_FORWHOM m5_l3
-# define VMM_OFFSET_HI m5_l3
-# define VMM_RETADDR m5_l1 /* result */
-#define VM_UMAP (VM_RQ_BASE+11)
-# define VMU_SEG m1_i1
-# define VMU_OFFSET m1_p1
-# define VMU_LENGTH m1_p2
-# define VMU_RETADDR m1_p3
+# define VMM_ADDR m_u.m_mmap.addr
+# define VMM_LEN m_u.m_mmap.len
+# define VMM_PROT m_u.m_mmap.prot
+# define VMM_FLAGS m_u.m_mmap.flags
+# define VMM_FD m_u.m_mmap.fd
+# define VMM_OFFSET m_u.m_mmap.offset
+# define VMM_FORWHOM m_u.m_mmap.forwhom
+# define VMM_RETADDR m_u.m_mmap.retaddr
+
+#define VM_MUNMAP (VM_RQ_BASE+17)
+# define VMUM_ADDR m_u.m_mmap.addr
+# define VMUM_LEN m_u.m_mmap.len
/* to VM: inform VM about a region of memory that is used for
* bus-master DMA
# define VMUP_EP m1_i1
# define VMUP_VADDR m1_p1
-#define VM_MUNMAP (VM_RQ_BASE+17)
-# define VMUM_ADDR m1_p1
-# define VMUM_LEN m1_i1
-
/* To VM: map in cache block by FS */
#define VM_MAPCACHEPAGE (VM_RQ_BASE+26)
# define VMRE_FLAGS m1_i3
#define VM_SHM_UNMAP (VM_RQ_BASE+34)
-# define VMUN_ENDPT m2_i1
-# define VMUN_ADDR m2_l1
+# define VMUN_ENDPT m_u.m_mmap.forwhom
+# define VMUN_ADDR m_u.m_mmap.addr
#define VM_GETPHYS (VM_RQ_BASE+35)
# define VMPHYS_ENDPT m2_i1
typedef struct {
dev_t dev; /* 64bits long. */
- void *block;
- u32_t dev_offset_pages;
- u32_t ino_offset_pages;
- u32_t ino;
+ off_t dev_offset;
+ off_t ino_offset;
+ ino_t ino;
u32_t *flags_ptr;
+ void *block;
u8_t pages;
u8_t flags;
- uint8_t padding[26];
+ uint8_t padding[12];
} mess_vmmcp;
_ASSERT_MSG_SIZE(mess_vmmcp);
_ASSERT_MSG_SIZE(mess_notify);
typedef struct {
- endpoint_t who;
- u32_t offset;
+ off_t offset;
+ void *addr;
+ size_t len;
+ int prot;
+ int flags;
+ int fd;
+ endpoint_t forwhom;
+ void *retaddr;
+ u32_t padding[5];
+} mess_mmap;
+_ASSERT_MSG_SIZE(mess_mmap);
+
+typedef struct {
+ off_t offset;
dev_t dev;
- u32_t ino;
+ ino_t ino;
+ endpoint_t who;
u32_t vaddr;
u32_t len;
- u16_t fd;
- u16_t clearend_and_flags; /* low 12 bits are clearend, rest flags */
- uint8_t padding[24];
+ u32_t flags;
+ u32_t fd;
+ u16_t clearend;
+ uint8_t padding[8];
} mess_vm_vfs_mmap;
_ASSERT_MSG_SIZE(mess_vm_vfs_mmap);
mess_11 m_m11;
mess_vmmcp m_vmmcp;
mess_vmmcp_reply m_vmmcp_reply;
+ mess_mmap m_mmap;
mess_vm_vfs_mmap m_vm_vfs;
mess_notify m_notify; /* notify messages */
mess_sigcalls m_sigcalls; /* SYS_{GETKSIG,ENDKSIG,KILL,SIGSEND,SIGRETURN} */
int vm_watch_exit(endpoint_t ep);
int vm_forgetblock(u64_t id);
void vm_forgetblocks(void);
-int minix_vfs_mmap(endpoint_t who, u32_t offset, u32_t len,
- dev_t dev, u32_t ino, u16_t fd, u32_t vaddr, u16_t clearend, u16_t
+int minix_vfs_mmap(endpoint_t who, off_t offset, size_t len,
+ dev_t dev, ino_t ino, int fd, u32_t vaddr, u16_t clearend, u16_t
flags);
/* minix vfs mmap flags */
-#define MVM_LENMASK 0x0FFF
-#define MVM_FLAGSMASK 0xF000
#define MVM_WRITABLE 0x8000
/* VM kernel request types. */
count, vir_bytes *next);
int vm_procctl(endpoint_t ep, int param);
-int vm_set_cacheblock(void *block, dev_t dev, u64_t dev_offset,
- u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize);
+int vm_set_cacheblock(void *block, dev_t dev, off_t dev_offset,
+ ino_t ino, off_t ino_offset, u32_t *flags, int blocksize);
-void *vm_map_cacheblock(dev_t dev, u64_t dev_offset,
- u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize);
+void *vm_map_cacheblock(dev_t dev, off_t dev_offset,
+ ino_t ino, off_t ino_offset, u32_t *flags, int blocksize);
int vm_clear_cache(dev_t dev);
int r;
memset(&m, 0, sizeof(m));
- m.VMM_ADDR = (vir_bytes) addr;
+ m.VMM_ADDR = addr;
m.VMM_LEN = len;
m.VMM_PROT = prot;
m.VMM_FLAGS = flags;
m.VMM_FD = fd;
- m.VMM_OFFSET_LO = ex64lo(offset);
+ m.VMM_OFFSET = offset;
+ m.VMM_FORWHOM = forwhom;
if(forwhom != SELF) {
m.VMM_FLAGS |= MAP_THIRDPARTY;
- m.VMM_FORWHOM = forwhom;
- } else {
- m.VMM_OFFSET_HI = ex64hi(offset);
}
r = _syscall(VM_PROC_NR, VM_MMAP, &m);
return MAP_FAILED;
}
- return (void *) m.VMM_RETADDR;
+ return m.VMM_RETADDR;
}
-int minix_vfs_mmap(endpoint_t who, u32_t offset, u32_t len,
- dev_t dev, u32_t ino, u16_t fd, u32_t vaddr, u16_t clearend,
+int minix_vfs_mmap(endpoint_t who, off_t offset, size_t len,
+ dev_t dev, ino_t ino, int fd, u32_t vaddr, u16_t clearend,
u16_t flags)
{
message m;
m.m_u.m_vm_vfs.vaddr = vaddr;
m.m_u.m_vm_vfs.len = len;
m.m_u.m_vm_vfs.fd = fd;
- m.m_u.m_vm_vfs.clearend_and_flags = clearend | flags;
+ m.m_u.m_vm_vfs.clearend = clearend;
+ m.m_u.m_vm_vfs.flags = flags;
return _syscall(VM_PROC_NR, VM_VFS_MMAP, &m);
}
return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
}
-void *minix_mmap64(void *addr, size_t len, int prot, int flags,
- int fd, u64_t offset)
-{
- return minix_mmap_for(SELF, addr, len, prot, flags, fd, offset);
-}
-
int minix_munmap(void *addr, size_t len)
{
message m;
memset(&m, 0, sizeof(m));
m.VMUN_ENDPT = endpt;
- m.VMUN_ADDR = (long) addr;
+ m.VMUN_ADDR = addr;
return _syscall(VM_PROC_NR, VM_SHM_UNMAP, &m);
}
* disk immediately if they are dirty.
*/
dev_t dev;
- u64_t dev_off;
+ off_t dev_off;
int r;
if (bp == NULL) return; /* it is easier to check here than in caller */
dev = bp->lmfs_dev;
- dev_off = (u64_t) bp->lmfs_blocknr * fs_block_size;
+ dev_off = (off_t) bp->lmfs_blocknr * fs_block_size;
lowercount(bp);
if (bp->lmfs_count != 0) return; /* block is still in use */
* from the cache, it is not clear what the caller could do about it anyway.
*/
int r, op_failed;
- u64_t pos;
+ off_t pos;
dev_t dev = bp->lmfs_dev;
op_failed = 0;
ASSERT(bp->lmfs_bytes == fs_block_size);
ASSERT(fs_block_size > 0);
- pos = (u64_t)bp->lmfs_blocknr * fs_block_size;
+ pos = (off_t)bp->lmfs_blocknr * fs_block_size;
if(fs_block_size > PAGE_SIZE) {
#define MAXPAGES 20
vir_bytes blockrem, vaddr = (vir_bytes) bp->data;
register int i;
register iovec_t *iop;
static iovec_t iovec[NR_IOREQS];
- u64_t pos;
+ off_t pos;
int iov_per_block;
int start_in_use = bufs_in_use, start_bufqsize = bufqsize;
assert(nblocks > 0);
assert(niovecs > 0);
- pos = (u64_t)bufq[0]->lmfs_blocknr * fs_block_size;
+ pos = (off_t)bufq[0]->lmfs_blocknr * fs_block_size;
if (rw_flag == READING)
r = bdev_gather(dev, pos, iovec, niovecs, BDEV_NOFLAGS);
else
{
block_t startblock, b, limitblock;
dev_t dev = m->REQ_DEV;
- u64_t extra, pos = make64(m->REQ_SEEK_POS_LO, m->REQ_SEEK_POS_HI);
+ off_t extra, pos = make64(m->REQ_SEEK_POS_LO, m->REQ_SEEK_POS_HI);
size_t len = m->REQ_NBYTES;
struct buf *bp;
vm_map_phys.c \
vm_memctl.c \
vm_notify_sig.c \
- vm_umap.c \
vm_procctl.c \
vm_query_exit.c \
vm_set_priv.c \
#include <minix/sysutil.h>
#include <machine/vmparam.h>
-int vm_cachecall(message *m, int call, void *addr, dev_t dev, u64_t dev_offset,
- u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
+int vm_cachecall(message *m, int call, void *addr, dev_t dev, off_t dev_offset,
+ ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
if(blocksize % PAGE_SIZE)
panic("blocksize %d should be a multiple of pagesize %d\n",
assert(dev != NO_DEV);
- m->m_u.m_vmmcp.dev_offset_pages = dev_offset/PAGE_SIZE;
- m->m_u.m_vmmcp.ino_offset_pages = ino_offset/PAGE_SIZE;
+ m->m_u.m_vmmcp.dev_offset = dev_offset;
+ m->m_u.m_vmmcp.ino_offset = ino_offset;
m->m_u.m_vmmcp.ino = ino;
m->m_u.m_vmmcp.block = addr;
m->m_u.m_vmmcp.flags_ptr = flags;
return _taskcall(VM_PROC_NR, call, m);
}
-void *vm_map_cacheblock(dev_t dev, u64_t dev_offset,
- u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
+void *vm_map_cacheblock(dev_t dev, off_t dev_offset,
+ ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
message m;
return m.m_u.m_vmmcp_reply.addr;
}
-int vm_set_cacheblock(void *block, dev_t dev, u64_t dev_offset,
- u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
+int vm_set_cacheblock(void *block, dev_t dev, off_t dev_offset,
+ ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
message m;
do_mapcache(message *msg)
{
dev_t dev = msg->m_u.m_vmmcp.dev;
- u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE;
- u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
+ u64_t dev_off = msg->m_u.m_vmmcp.dev_offset;
+ u64_t ino_off = msg->m_u.m_vmmcp.ino_offset;
int n;
phys_bytes bytes = msg->m_u.m_vmmcp.pages * VM_PAGE_SIZE;
struct vir_region *vr;
vir_bytes offset;
int io = 0;
+ if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
+ printf("VM: unaligned cache operation\n");
+ return EFAULT;
+ }
+
if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
caller = &vmproc[n];
{
int r;
dev_t dev = msg->m_u.m_vmmcp.dev;
- u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset_pages * VM_PAGE_SIZE;
- u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset_pages * VM_PAGE_SIZE;
+ u64_t dev_off = (u64_t) msg->m_u.m_vmmcp.dev_offset;
+ u64_t ino_off = (u64_t) msg->m_u.m_vmmcp.ino_offset;
int n;
struct vmproc *caller;
phys_bytes offset;
if(bytes < VM_PAGE_SIZE) return EINVAL;
+ if(dev_off % PAGE_SIZE || ino_off % PAGE_SIZE) {
+ printf("VM: unaligned cache operation\n");
+ return EFAULT;
+ }
+
if(vm_isokendpt(msg->m_source, &n) != OK) panic("bogus source");
caller = &vmproc[n];
}
static int mmap_file(struct vmproc *vmp,
- int vmfd, u32_t off_lo, u32_t off_hi, int flags,
+ int vmfd, off_t file_offset, int flags,
ino_t ino, dev_t dev, u64_t filesize, vir_bytes addr, vir_bytes len,
vir_bytes *retaddr, u16_t clearend, int writable, int mayclosefd)
{
/* VFS has replied to a VMVFSREQ_FDLOOKUP request. */
struct vir_region *vr;
- u64_t file_offset, page_offset;
+ u64_t page_offset;
int result = OK;
u32_t vrflags = 0;
if(writable) vrflags |= VR_WRITABLE;
- if(flags & MAP_THIRDPARTY) {
- file_offset = off_lo;
- } else {
- file_offset = make64(off_lo, off_hi);
- if(off_hi && !off_lo) {
- /* XXX clang compatability hack */
- off_hi = file_offset = 0;
- }
- }
-
/* Do some page alignments. */
if((page_offset = (file_offset % VM_PAGE_SIZE))) {
file_offset -= page_offset;
/* It might be disabled */
if(!enable_filemap) return ENXIO;
- clearend = (m->m_u.m_vm_vfs.clearend_and_flags & MVM_LENMASK);
- flags = (m->m_u.m_vm_vfs.clearend_and_flags & MVM_FLAGSMASK);
+ clearend = m->m_u.m_vm_vfs.clearend;
+ flags = m->m_u.m_vm_vfs.flags;
if((r=vm_isokendpt(m->m_u.m_vm_vfs.who, &n)) != OK)
panic("bad ep %d from vfs", m->m_u.m_vm_vfs.who);
vmp = &vmproc[n];
- return mmap_file(vmp, m->m_u.m_vm_vfs.fd, m->m_u.m_vm_vfs.offset, 0,
+ return mmap_file(vmp, m->m_u.m_vm_vfs.fd, m->m_u.m_vm_vfs.offset,
MAP_PRIVATE | MAP_FIXED,
m->m_u.m_vm_vfs.ino, m->m_u.m_vm_vfs.dev,
(u64_t) LONG_MAX * VM_PAGE_SIZE,
result = origmsg->VMV_RESULT;
} else {
/* Finish mmap */
- result = mmap_file(vmp, replymsg->VMV_FD, origmsg->VMM_OFFSET_LO,
- origmsg->VMM_OFFSET_HI, origmsg->VMM_FLAGS,
+ result = mmap_file(vmp, replymsg->VMV_FD, origmsg->VMM_OFFSET,
+ origmsg->VMM_FLAGS,
replymsg->VMV_INO, replymsg->VMV_DEV,
(u64_t) replymsg->VMV_SIZE_PAGES*PAGE_SIZE,
- origmsg->VMM_ADDR,
+ (vir_bytes) origmsg->VMM_ADDR,
origmsg->VMM_LEN, &v, 0, writable, 1);
}
/* Unblock requesting process. */
memset(&mmap_reply, 0, sizeof(mmap_reply));
mmap_reply.m_type = result;
- mmap_reply.VMM_ADDR = v;
+ mmap_reply.VMM_RETADDR = (void *) v;
if(ipc_send(vmp->vm_endpoint, &mmap_reply) != OK)
panic("VM: mmap_file_cont: ipc_send() failed");
{
int r, n;
struct vmproc *vmp;
- vir_bytes addr = m->VMM_ADDR;
+ vir_bytes addr = (vir_bytes) m->VMM_ADDR;
struct vir_region *vr = NULL;
int execpriv = 0;
size_t len = (vir_bytes) m->VMM_LEN;
}
/* Return mapping, as seen from process. */
- m->VMM_RETADDR = vr->vaddr;
+ m->VMM_RETADDR = (void *) vr->vaddr;
return OK;
}
return (u32_t) sqrt(v);
}
-int vm_set_cacheblock(void *block, dev_t dev, u64_t dev_offset,
- u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
+int vm_set_cacheblock(void *block, dev_t dev, off_t dev_offset,
+ ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
return ENOSYS;
}
-void *vm_map_cacheblock(dev_t dev, u64_t dev_offset,
- u64_t ino, u64_t ino_offset, u32_t *flags, int blocksize)
+void *vm_map_cacheblock(dev_t dev, off_t dev_offset,
+ ino_t ino, off_t ino_offset, u32_t *flags, int blocksize)
{
return MAP_FAILED;
}