if (new_ttbr == orig_ttbr)
return;
- refresh_tlb();
write_ttbr0(new_ttbr);
*__ptproc = p;
pdeval = pr->p_seg.p_ttbr_v[ARM_VM_PDE(linaddr)];
} else {
/* Requested address is physical. Make up the PDE entry. */
- pdeval = (linaddr & ARM_VM_SECTION_MASK) |
- ARM_VM_SECTION |
- ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER;
+ pdeval = (linaddr & ARM_VM_SECTION_MASK)
+ | ARM_VM_SECTION
+ | ARM_VM_SECTION_DOMAIN
+ | ARM_VM_SECTION_WT
+ | ARM_VM_SECTION_USER;
}
/* Write the pde value that we need into a pde that the kernel
dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
if(changed) {
reload_ttbr0();
- refresh_tlb();
}
/* Copy pages. */
PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr);
return EFAULT;
}
- /* We don't expect to ever see this. */
+ /* We don't expect to ever see this.
+ * LSC Impossible with the previous test.
if(pde_v & ARM_VM_BIGPAGE) {
*physical = pde_v & ARM_VM_SECTION_MASK;
if(ptent) *ptent = pde_v;
*physical += virtual & ARM_VM_OFFSET_MASK_1MB;
- } else {
+ } else */ {
/* Retrieve page table entry. */
pt = (u32_t *) (pde_v & ARM_VM_PDE_MASK);
assert(!((u32_t) pt % ARM_PAGETABLE_SIZE));
if (new_ttbr) {
reload_ttbr0();
- refresh_tlb();
}
/* If a page fault happens, pfa is non-null */
if ((pfa = phys_memset(ptr, pattern, chunk))) {
void release_address_space(struct proc *pr)
{
pr->p_seg.p_ttbr_v = NULL;
- refresh_tlb();
+ barrier();
}
assert(cbi->mem_high_phys);
/* Set up an identity mapping page directory */
- for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
- u32_t flags = ARM_VM_SECTION |
- ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_USER;
- phys = i * ARM_BIG_PAGE_SIZE;
- pagedir[i] = phys | flags;
+ for(i = 0; i < ARM_VM_DIR_ENTRIES; i++) {
+ u32_t flags = ARM_VM_SECTION
+ | ARM_VM_SECTION_USER
+ | ARM_VM_SECTION_DOMAIN;
+ phys = i * ARM_BIG_PAGE_SIZE;
+ pagedir[i] = phys | flags;
}
}
int pde;
u32_t mapped = 0, kern_phys = kern_phys_start;
- assert(!(kern_vir_start % ARM_BIG_PAGE_SIZE));
- assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE));
- pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */
+ assert(!(kern_vir_start % ARM_BIG_PAGE_SIZE));
+ assert(!(kern_phys_start % ARM_BIG_PAGE_SIZE));
+ pde = kern_vir_start / ARM_BIG_PAGE_SIZE; /* start pde */
while(mapped < kern_kernlen) {
- pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) |
- ARM_VM_SECTION |
- ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
- ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
+ pagedir[pde] = (kern_phys & ARM_VM_PDE_MASK) | ARM_VM_SECTION
+ | ARM_VM_SECTION_SUPER
+ | ARM_VM_SECTION_DOMAIN
+ | ARM_VM_SECTION_WT;
mapped += ARM_BIG_PAGE_SIZE;
kern_phys += ARM_BIG_PAGE_SIZE;
pde++;
sctlr = read_sctlr();
/* Enable MMU */
- sctlr |= (SCTLR_M);
+ sctlr |= SCTLR_M;
+
+ /* AFE set to zero (default reset value): not using simplified model. */
+ /* TRE set to zero (default reset value): TEX[2:0] are used, plus C and B bits.*/
/* Enable instruction and data cache */
sctlr |= SCTLR_C;
phys_bytes pg_load()
{
phys_bytes phpagedir = vir2phys(pagedir);
- refresh_tlb();
write_ttbr0(phpagedir);
return phpagedir;
}
phys_bytes ph;
pt = alloc_pagetable(&ph);
pagedir[pde] = (ph & ARM_VM_PDE_MASK)
- | ARM_VM_PAGEDIR | ARM_VM_PDE_DOMAIN;
+ | ARM_VM_PAGEDIR
+ | ARM_VM_PDE_DOMAIN;
mapped_pde = pde;
}
assert(pt);
pt[pte] = (source & ARM_VM_PTE_MASK)
- | ARM_VM_PAGETABLE
- | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
- | ARM_VM_PTE_USER;
+ | ARM_VM_PAGETABLE
+ | ARM_VM_PTE_WT
+ | ARM_VM_PTE_USER;
vaddr += ARM_PAGE_SIZE;
if(phys != PG_ALLOCATEME)
phys += ARM_PAGE_SIZE;
/* Set up an identity mapping page directory */
for(i = 0; i < I386_VM_DIR_ENTRIES; i++) {
- u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE |
- I386_VM_USER | I386_VM_WRITE;
+ u32_t flags = I386_VM_PRESENT | I386_VM_BIGPAGE
+ | I386_VM_USER
+ | I386_VM_WRITE;
phys = i * I386_BIG_PAGE_SIZE;
if((cbi->mem_high_phys & I386_VM_ADDR_MASK_4MB)
<= (phys & I386_VM_ADDR_MASK_4MB)) {
if((r=pt_writemap(vmprocess, pt, loc, *phys, VM_PAGE_SIZE*pages,
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#if defined(__arm__)
- | ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
+ | ARM_VM_PTE_WT
#endif
, 0)) != OK) {
free_mem(newpage, pages);
#if defined(__arm__)
else
flags |= ARCH_VM_PTE_RO;
- flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
+ flags |= ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE; // LSC FIXME
#endif
/* Update flags. */
printf("addr not ok: pde %d present but pde unwritable\n", pde);
return 0;
}
+#elif defined(__arm__)
+ if(writeflag &&
+ (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) {
+ printf("addr not ok: pde %d present but pde unwritable\n", pde);
+ return 0;
+ }
#endif
if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) {
if(writeflag &&
!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) {
printf("addr not ok: pde %d / pte %d present but unwritable\n",
+ pde, pte);
#elif defined(__arm__)
- if(!writeflag &&
- !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
- printf("addr not ok: pde %d / pte %d present but writable\n",
-#endif
+ if(writeflag &&
+ (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) {
+ printf("addr not ok: pde %d / pte %d present but unwritable\n",
pde, pte);
+#endif
return 0;
}
| ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW;
#elif defined(__arm__)
pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK)
- | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
+ | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME
#endif
return OK;
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW,
#elif defined(__arm__)
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE,
- ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
- ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE,
+ ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER |
+ ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE, //LSC FIXME
#endif
WMF_OVERWRITE)) != OK) {
return r;
if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE,
ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW
#ifdef __arm__
- | ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW |
- ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE
+ | ARM_VM_PTE_WB
#endif
,
WMF_OVERWRITE)) != OK) {
kern_mappings[index].flags |= PTF_NOCACHE;
#elif defined(__arm__)
kern_mappings[index].flags |= ARM_VM_PTE_DEVICE;
- else
- kern_mappings[index].flags |=
- ARM_VM_PTE_WB | ARM_VM_PTE_SHAREABLE;
#endif
if(flags & VMMF_USER)
kern_mappings[index].flags |= ARCH_VM_PTE_USER;
pdm->val = (ph & ARCH_VM_ADDR_MASK) |
ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW;
#elif defined(__arm__)
- pdm->val = (ph & ARCH_VM_PDE_MASK) |
- ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN;
+ pdm->val = (ph & ARCH_VM_PDE_MASK)
+ | ARCH_VM_PDE_PRESENT
+ | ARM_VM_PDE_DOMAIN; //LSC FIXME
#endif
}
}
int i;
for (i = 0; i < pages_per_pagedir; i++) {
pdm->page_directories[pdeslot*pages_per_pagedir+i] =
- (phys+i*VM_PAGE_SIZE) |
- ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_RW |
- ARCH_VM_PTE_USER;
+ (phys+i*VM_PAGE_SIZE)
+ | ARCH_VM_PTE_PRESENT
+ | ARCH_VM_PTE_RW
+ | ARCH_VM_PTE_USER; //LSC FIXME
}
}
#endif
pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT |
ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit;
#elif defined(__arm__)
- pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK) |
- ARM_VM_SECTION |
- ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_WB |
- ARM_VM_SECTION_SHAREABLE | ARM_VM_SECTION_SUPER;
+ pt->pt_dir[kern_pde] = (addr & ARCH_VM_PDE_MASK)
+ | ARM_VM_SECTION
+ | ARM_VM_SECTION_DOMAIN
+ | ARM_VM_SECTION_WB
+ | ARM_VM_SECTION_SUPER;
#endif
kern_pde++;
mapped += ARCH_BIG_PAGE_SIZE;
#define PTF_PRESENT I386_VM_PRESENT
#define PTF_USER I386_VM_USER
#define PTF_GLOBAL I386_VM_GLOBAL
-#define PTF_MAPALLOC I386_VM_PTAVAIL1 /* Page allocated by pt code. */
#define PTF_NOCACHE (I386_VM_PWT | I386_VM_PCD)
#define ARCH_VM_DIR_ENTRIES I386_VM_DIR_ENTRIES
static int map_ph_writept(struct vmproc *vmp, struct vir_region *vr,
struct phys_region *pr)
{
- int rw;
+ int flags = PTF_PRESENT | PTF_USER;
struct phys_block *pb = pr->ph;
assert(vr);
assert(pb->refcount > 0);
if(pr_writable(vr, pr))
- rw = PTF_WRITE;
+ flags |= PTF_WRITE;
else
- rw = PTF_READ;
+ flags |= PTF_READ;
+
+#if defined(__arm__)
+ if (pb->phys >= 0x80000000 && pb->phys < (0xc0000000 - VM_PAGE_SIZE)) {
+ // LSC Do this only for actual RAM
+ flags |= ARM_VM_PTE_WT;
+ }
+#endif
if(pt_writemap(vmp, &vmp->vm_pt, vr->vaddr + pr->offset,
- pb->phys, VM_PAGE_SIZE, PTF_PRESENT | PTF_USER | rw,
+ pb->phys, VM_PAGE_SIZE, flags,
#if SANITYCHECKS
!pr->written ? 0 :
#endif
if(newpage == NO_MEM) return -1;
mem = CLICK2ABS(newpage);
if(pt_writemap(vmprocess, &vmprocess->vm_pt,
- v, mem, VM_PAGE_SIZE,
- ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 0) != OK) {
+ v, mem, VM_PAGE_SIZE,
+ ARCH_VM_PTE_PRESENT
+ | ARCH_VM_PTE_USER
+ | ARCH_VM_PTE_RW
+#if defined(__arm__)
+ | ARM_VM_PTE_WB
+#endif
+ , 0) != OK) {
free_mem(newpage, 1);
return -1;
}