#define SET_BIT(map,bit) ( MAP_CHUNK(map,bit) |= (1 << CHUNK_OFFSET(bit) ))
#define UNSET_BIT(map,bit) ( MAP_CHUNK(map,bit) &= ~(1 << CHUNK_OFFSET(bit) ))
+#if defined(CONFIG_SMP) && defined(__GNUC__)
+#ifndef __ASSEMBLY__
+static inline bits_fill(bitchunk_t * chunks, unsigned bits)
+{
+ unsigned c, cnt;
+
+ cnt = BITMAP_CHUNKS(bits);
+ for (c = 0; c < cnt; c++)
+ bit_fill(chunks[c]);
+}
+#endif
+#endif
+
+
#endif /* _BITMAP_H */
/*===========================================================================*
* lin_lin_copy *
*===========================================================================*/
-PRIVATE int lin_lin_copy(const struct proc *srcproc, vir_bytes srclinaddr,
- const struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
+PRIVATE int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr,
+ struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes)
{
u32_t addr;
proc_nr_t procslot;
vir_bytes chunk = bytes;
int changed = 0;
+#ifdef CONFIG_SMP
+ unsigned cpu = cpuid;
+
+ if (GET_BIT(srcproc->p_stale_tlb, cpu)) {
+ changed = 1;
+ UNSET_BIT(srcproc->p_stale_tlb, cpu);
+ }
+ if (GET_BIT(dstproc->p_stale_tlb, cpu)) {
+ changed = 1;
+ UNSET_BIT(dstproc->p_stale_tlb, cpu);
+ }
+#endif
+
/* Set up 4MB ranges. */
srcptr = createpde(srcproc, srclinaddr, &chunk, 0, &changed);
dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed);
bitchunk_t p_cpu_mask[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* what CPUs is hte
process allowed to
run on */
+ bitchunk_t p_stale_tlb[BITMAP_CHUNKS(CONFIG_MAX_CPUS)]; /* On which cpu are
+ possibly stale entries from this process and has
+ to be fresed the next kernel touches this
+ processes memory
+ */
#endif
/* Accounting statistics that get passed to the process' scheduler */
printf("do_update: curr ptproc %d\n", get_cpulocal_var(ptproc)->p_endpoint);
#endif
+#ifdef CONFIG_SMP
+ bits_fill(src_rp->p_stale_tlb, CONFIG_MAX_CPUS);
+ bits_fill(dst_rp->p_stale_tlb, CONFIG_MAX_CPUS);
+#endif
+
return OK;
}
* cpu
*/
RTS_UNSET(p, RTS_VMINHIBIT);
+#ifdef CONFIG_SMP
+ /*
+ * We don't know whether kernel has the changed mapping
+ * installed to access userspace memory. And if so, on what CPU.
+ * More over we don't know what mapping has changed and how and
+ * therefore we must invalidate all mappings we have anywhere.
+ * Next time we map memory, we map it fresh.
+ */
+ bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
+#endif
return OK;
}