#define CPUID1_ECX_SSE4_1 (1L << 19)
#define CPUID1_ECX_SSE4_2 (1L << 20)
+#ifndef __ASSEMBLY__
+
+#include <minix/type.h>
+
+/* structure used by VM to pass data to the kernel while enabling paging */
+struct vm_ep_data {
+ struct mem_map * mem_map;
+ vir_bytes data_seg_limit;
+};
+#endif
+
#endif /* __SYS_VM_386_H__ */
_PROTOTYPE( int sys_vmctl_get_memreq, (endpoint_t *who, vir_bytes *mem,
vir_bytes *len, int *wrflag, endpoint_t *who_s, vir_bytes *mem_s,
endpoint_t *) );
-_PROTOTYPE( int sys_vmctl_enable_paging, (struct mem_map *));
+_PROTOTYPE( int sys_vmctl_enable_paging, (void * data));
_PROTOTYPE( int sys_readbios, (phys_bytes address, void *buf, size_t size));
_PROTOTYPE( int sys_stime, (time_t boottime));
#include "../../proc.h"
#include "../../vm.h"
+#include <machine/vm.h>
+
#include <minix/type.h>
#include <minix/syslib.h>
#include <minix/cpufeature.h>
return OK;
}
-PUBLIC int arch_enable_paging(void)
+PUBLIC int arch_enable_paging(struct proc * caller, message * m_ptr)
{
+ struct vm_ep_data ep_data;
+ int r;
+
+ /*
+ * copy the extra data associated with the call from userspace
+ */
+ if((r=data_copy(caller->p_endpoint, (vir_bytes)m_ptr->SVMCTL_VALUE,
+ KERNEL, (vir_bytes) &ep_data, sizeof(ep_data))) != OK) {
+ printf("vmctl_enable_paging: data_copy failed! (%d)\n", r);
+ return r;
+ }
+
+ /*
+ * when turning paging on i386 we also change the segment limits to make
+ * the special mappings requested by the kernel reachable
+ */
+ if ((r = prot_set_kern_seg_limit(ep_data.data_seg_limit)) != OK)
+ return r;
+
+ /*
+ * install the new map provided by the call
+ */
+ if (newmap(caller, caller, ep_data.mem_map) != OK)
+ panic("arch_enable_paging: newmap failed");
+
+ FIXLINMSG(caller);
+ assert(caller->p_delivermsg_lin == umap_local(caller, D,
+ caller->p_delivermsg_vir, sizeof(message)));
+
#ifdef CONFIG_APIC
/* if local APIC is enabled */
if (lapic_addr) {
_PROTOTYPE( int arch_phys_map, (int index, phys_bytes *addr,
phys_bytes *len, int *flags));
_PROTOTYPE( int arch_phys_map_reply, (int index, vir_bytes addr));
-_PROTOTYPE( int arch_enable_paging, (void));
+_PROTOTYPE( int arch_enable_paging, (struct proc * caller, message * m_ptr));
_PROTOTYPE( int copy_msg_from_user, (struct proc * p, message * user_mbuf,
message * dst));
vm_init(p);
if(!vm_running)
panic("do_vmctl: paging enabling failed");
- if ((err = arch_enable_paging()) != OK) {
- return err;
- }
- if(newmap(caller, p, (struct mem_map *) m_ptr->SVMCTL_VALUE) != OK)
- panic("do_vmctl: newmap failed");
- FIXLINMSG(p);
- assert(p->p_delivermsg_lin ==
- umap_local(p, D, p->p_delivermsg_vir, sizeof(message)));
- return OK;
+ return arch_enable_paging(caller, m_ptr);
case VMCTL_KERN_PHYSMAP:
{
int i = m_ptr->SVMCTL_VALUE;
return r;
}
-PUBLIC int sys_vmctl_enable_paging(struct mem_map *map)
+PUBLIC int sys_vmctl_enable_paging(void * data)
{
message m;
m.SVMCTL_WHO = SELF;
m.SVMCTL_PARAM = VMCTL_ENABLE_PAGING;
- m.SVMCTL_VALUE = (int) map;
+ m.SVMCTL_VALUE = (u32_t) data;
return _kernel_call(SYS_VMCTL, &m);
}
int global_bit_ok = 0;
int free_pde;
int p;
- vir_bytes kernlimit;
+ struct vm_ep_data ep_data;
vir_bytes sparepages_mem;
phys_bytes sparepages_ph;
/* first pde in use by process. */
proc_pde = free_pde;
- kernlimit = free_pde*I386_BIG_PAGE_SIZE;
-
- /* Increase kernel segment to address this memory. */
- if((r=sys_vmctl(SELF, VMCTL_I386_KERNELLIMIT, kernlimit)) != OK) {
- panic("VMCTL_I386_KERNELLIMIT failed: %d", r);
- }
-
kpagedir = arch_map2vir(&vmproc[VMP_SYSTEM],
pagedir_pde*I386_BIG_PAGE_SIZE);
pt_mapkernel(newpt); /* didn't know about vm_dir pages earlier */
pt_bind(newpt, vmp);
+ /* new segment limit for the kernel after paging is enabled */
+ ep_data.data_seg_limit = free_pde*I386_BIG_PAGE_SIZE;
+ /* the memory map which must be installed after paging is enabled */
+ ep_data.mem_map = vmp->vm_arch.vm_seg;
+
/* Now actually enable paging. */
- if(sys_vmctl_enable_paging(vmp->vm_arch.vm_seg) != OK)
+ if(sys_vmctl_enable_paging(&ep_data) != OK)
panic("pt_init: enable paging failed");
/* Back to reality - this is where the stack actually is. */