+ 2 * ((cpu) + 1) * K_STACK_SIZE))
+/*
+ * Definition of a callback used when a memory map changes it's base address
+ */
+typedef int (*kern_phys_map_mapped)(vir_bytes id, vir_bytes new_addr );
+
+/*
+ * struct used internally by memory.c to keep a list of
+ * items to map. These should be staticaly allocated
+ * in the individual files and passed as argument.
+ * The data doesn't need to be initialized. See omap_serial for
+ * and example usage.
+ */
+typedef struct kern_phys_map{
+ phys_bytes addr; /* The physical address to map */
+ vir_bytes size; /* The size of the mapping */
+ vir_bytes id; /* an id passed to the callback */
+ kern_phys_map_mapped cb; /* the callback itself */
+ phys_bytes vir; /* The virtual address once remapped */
+ int index; /* index */
+ struct kern_phys_map *next; /* pointer to the next */
+} kern_phys_map ;
+
+
+/*
+ * Request an in kernel physical mapping.
+ *
+ * On ARM many devices are memory mapped and some of these devices
+ * are used in the kernel. These device can be things like serial
+ * lines, interrupt controller and clocks. The kernel needs to be
+ * able to access these devices at the various stages of booting.
+ * During startup, until arch_enable_paging is called, it is the
+ * kernel whom is controlling the mappings and it often needs to
+ * access the memory using a 1:1 mapping between virtual and
+ * physical memory.
+ *
+ * Once processes start to run it is no longer desirable for the
+ * kernel to have devices mapped in the middle of the process
+ * address space.
+ *
+ * This method requests the memory manager to map base_address/size
+ * in the kernel address space and call back the kernel when this
+ * mapping takes effect (after enable_paging).
+ *
+ * Before the callback is called it is up to the kernel to use it's
+ * own addressing. The callback will happen *after* the kernel lost
+ * it's initial mapping. It it therefore not safe to use the initial
+ * mapping in the callback. It also is not possible to use printf for
+ * the same reason.
+ */
+int kern_req_phys_map( phys_bytes base_address, vir_bytes io_size,
+ kern_phys_map * priv, kern_phys_map_mapped cb,
+ vir_bytes id);
+
+/*
+ * Request a physical mapping and put the result in the given prt
+ * Note that ptr will only be valid once the callback happend.
+ */
+int kern_phys_map_ptr( phys_bytes base_address, vir_bytes io_size,
+ kern_phys_map * priv, vir_bytes ptr);
+
+
/* functions defined in architecture-independent kernel source. */
#include "kernel/proto.h"
static u32_t phys_get32(phys_bytes v);
+/* list of requested physical mapping */
+static kern_phys_map *kern_phys_map_head;
void mem_clear_mapcache(void)
{
phys = 0;
} else {
if(phys == 0)
- panic("vm_lookup returned phys: %d", phys);
+ panic("vm_lookup returned phys: 0x%lx", phys);
}
if(phys == 0) {
int *flags)
{
static int first = 1;
+ kern_phys_map *phys_maps;
+
int freeidx = 0;
u32_t glo_len = (u32_t) &usermapped_nonglo_start -
(u32_t) &usermapped_start;
if(usermapped_glo_index != -1)
first_um_idx = usermapped_glo_index;
first = 0;
+
+ /* list over the maps and index them */
+ phys_maps = kern_phys_map_head;
+ while(phys_maps != NULL){
+ phys_maps->index = freeidx++;
+ phys_maps = phys_maps->next;
+ }
+
}
if(index == usermapped_glo_index) {
*len = ARM_PAGE_SIZE;
*flags = VMMF_USER;
return OK;
+ }
+ /* if this all fails loop over the maps */
+ /* list over the maps and index them */
+ phys_maps = kern_phys_map_head;
+ while(phys_maps != NULL){
+ if(phys_maps->index == index){
+ *addr = phys_maps->addr;
+ *len = phys_maps->size;
+ *flags = VMMF_UNCACHED | VMMF_WRITE;
+ return OK;
+ }
+ phys_maps = phys_maps->next;
}
return EINVAL;
int arch_phys_map_reply(const int index, const vir_bytes addr)
{
+ kern_phys_map *phys_maps;
+
if(index == first_um_idx) {
u32_t usermapped_offset;
assert(addr > (u32_t) &usermapped_start);
return OK;
}
+ /* if this all fails loop over the maps */
+ /* list over the maps and index them */
+ phys_maps = kern_phys_map_head;
+ while(phys_maps != NULL){
+ if(phys_maps->index == index){
+ assert(phys_maps->cb != NULL);
+ /* only update the vir addr we are
+ going to call the callback in enable
+ paging
+ */
+ phys_maps->vir = addr;
+ return OK;
+ }
+ phys_maps = phys_maps->next;
+ }
+
return EINVAL;
}
int arch_enable_paging(struct proc * caller)
{
+ kern_phys_map *phys_maps;
assert(caller->p_seg.p_ttbr);
+
/* load caller's page table */
switch_address_space(caller);
+ /* We have now switched address spaces and the mappings are
+ valid. We can now remap previous mappings. This is not a
+ good time to do printf as the initial massing is gone and
+ the new mapping is not in place */
+ phys_maps = kern_phys_map_head;
+ while(phys_maps != NULL){
+ assert(phys_maps->cb != NULL);
+ phys_maps->cb(phys_maps->id, phys_maps->vir);
+ phys_maps = phys_maps->next;
+ }
+
+
device_mem = (char *) device_mem_vaddr;
return OK;
pr->p_seg.p_ttbr_v = NULL;
barrier();
}
+
+
+
+/*
+ * Request a physical mapping
+ */
+int kern_req_phys_map( phys_bytes base_address, vir_bytes io_size,
+ kern_phys_map * priv, kern_phys_map_mapped cb,
+ vir_bytes id)
+{
+ /* Assign the values to the given struct and add priv
+ to the list */
+ assert(base_address != 0);
+ assert(io_size % ARM_PAGE_SIZE == 0);
+ assert(cb != NULL);
+
+ priv->addr = base_address;
+ priv->size = io_size;
+ priv->cb = cb;
+ priv->id = id;
+ priv->index = -1;
+ priv->next = NULL;
+
+
+ if (kern_phys_map_head == NULL){
+ /* keep a list of items this is the first one */
+ kern_phys_map_head = priv;
+ kern_phys_map_head->next = NULL;
+ } else {
+ /* insert the item head but first keep track
+ of the current by putting it in next */
+ priv->next = kern_phys_map_head;
+ /* replace the head */
+ kern_phys_map_head = priv;
+ }
+ return 0;
+}
+
+/*
+ * Callback implementation where the id given to the
+ * kern_phys_map is a pointer to the io map base address.
+ * this implementation will change that base address.
+ */
+int kern_phys_map_mapped_ptr(vir_bytes id, phys_bytes address){
+ *((vir_bytes*)id) = address;
+ return 0;
+}
+
+/*
+ * Request a physical mapping and put the result in the given prt
+ * Note that ptr will only be valid once the callback happend.
+ */
+int kern_phys_map_ptr(
+ phys_bytes base_address,
+ vir_bytes io_size,
+ kern_phys_map * priv,
+ vir_bytes ptr)
+{
+ return kern_req_phys_map(base_address,io_size,priv,kern_phys_map_mapped_ptr,ptr);
+}
+
#include <machine/cpu.h>
#include <minix/type.h>
#include <io.h>
-#include "omap_intr.h"
+#include "kernel/kernel.h"
+#include "kernel/proc.h"
+#include "kernel/vm.h"
+#include "kernel/proto.h"
+#include "arch_proto.h"
+
+#include "omap_intr.h"
static struct omap_intr {
vir_bytes base;
+ int size;
} omap_intr;
+
+static kern_phys_map intr_phys_map;
+
int intr_init(const int auto_eoi)
{
#ifdef DM37XX
- omap_intr.base = OMAP3_DM37XX_INTR_BASE;
+ omap_intr.base = OMAP3_DM37XX_INTR_BASE;
#endif
#ifdef AM335X
- omap_intr.base = OMAP3_AM335X_INTR_BASE;
+ omap_intr.base = OMAP3_AM335X_INTR_BASE;
#endif
+ omap_intr.size = 0x1000 ; /* 4K */
+
+ kern_phys_map_ptr(omap_intr.base,omap_intr.size,&intr_phys_map,&omap_intr.base);
return 0;
}
#include <machine/cpu.h>
#include <minix/type.h>
#include <io.h>
+
+#include "kernel/kernel.h"
+#include "kernel/proc.h"
+#include "kernel/vm.h"
+#include "kernel/proto.h"
+#include "arch_proto.h"
+
#include "omap_serial.h"
+
struct omap_serial {
vir_bytes base;
+ vir_bytes size;
};
static struct omap_serial omap_serial = {
.base = 0,
};
+static kern_phys_map serial_phys_map;
+
/*
* In kernel serial for the omap. The serial driver like most other
* drivers needs to be started early and even before the MMU is turned on.
#ifdef AM335X
omap_serial.base = OMAP3_AM335X_DEBUG_UART_BASE;
#endif
+ omap_serial.size = 0x1000 ; /* 4k */
+
+
+ kern_phys_map_ptr(omap_serial.base,omap_serial.size,&serial_phys_map,&omap_serial.base);
assert(omap_serial.base);
}
#include <sys/types.h>
#include <machine/cpu.h>
#include <minix/mmio.h>
+#include <assert.h>
#include <io.h>
#include <stdlib.h>
#include <stdio.h>
return 0;
}
+
+/* meta data for remapping */
+static kern_phys_map timer_phys_map;
+static kern_phys_map fr_timer_phys_map;
+
void omap3_frclock_init(void)
{
u32_t tisr;
+ kern_phys_map_ptr(fr_timer.base,ARM_PAGE_SIZE,&fr_timer_phys_map,&fr_timer.base);
/* enable the clock */
#ifdef AM335X
/* Disable the module and wait for the module to be disabled */
void omap3_timer_init(unsigned freq)
{
u32_t tisr;
+ kern_phys_map_ptr(timer.base,ARM_PAGE_SIZE,&timer_phys_map,&timer.base);
#ifdef AM335X
/* disable the module and wait for the module to be disabled */
set32(CM_WKUP_TIMER1_CLKCTRL, CM_MODULEMODE_MASK,CM_MODULEMODE_DISABLED);
void minix_shutdown(timer_t *t) { arch_shutdown(RBT_PANIC); }
void busy_delay_ms(int x) { }
int raise(int n) { panic("raise(%d)\n", n); }
-
+int kern_phys_map_ptr( phys_bytes base_address, vir_bytes io_size,
+ struct kern_phys_map * priv, vir_bytes ptr) {};