-############################################################################
CC = gcc
-CFLAGS = -c -fno-builtin
+CFLAGS = -g -c -fno-builtin
SYSTEMMAP = System.map
KERNELBIN = KERNEL.BIN
LINKSCRIPT = scripts/link.ld
-############################################################################
SRC_DIRS = boot setup mm lib fs kernel drivers pci
INC_DIRS = include drivers
-CSOURCE_FILES := $(foreach dir, $(SRC_DIRS), $(wildcard $(dir)/*.c))
-SSOURCE_FILES := $(foreach dir, $(SRC_DIRS), $(wildcard $(dir)/*.S))
-
-OBJS := $(patsubst %.c,%.c.o,$(CSOURCE_FILES))
-OBJS += $(patsubst %.S,%.S.o,$(SSOURCE_FILES))
-
CFLAGS += ${INC_DIRS:%=-I%}
-${KERNELBIN}: ${OBJS} $
+SOURCE_FILES := $(foreach DIR, $(SRC_DIRS), $(wildcard $(DIR)/*.[cS]))
+OBJS := $(patsubst %,%.o,$(SOURCE_FILES))
+
+${KERNELBIN}: ${OBJS}
ld -M -T$(LINKSCRIPT) $(OBJS) -o $@ > $(SYSTEMMAP)
%.S.o: %.S
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~((1UL << PAGE_SHIFT)-1))
#define PAGE_OFFSET (0xC0000000)
+#define PAGE_PDE_CNT 1024
+#define PAGE_PTE_CNT 1024
#ifndef ASM
#include <types.h>
unsigned long private;
unsigned long index;
list_head_t lru;
+
+ struct page *head_page;
+ unsigned int order;
+
+ void **freelist; // for slub
+ unsigned long inuse;
} page_t;
+void *page2va(page_t *page);
+page_t *va2page(unsigned long addr);
+
+static inline page_t *get_head_page(page_t *page) { return page->head_page; }
+
#define __GETPAGEFLAG(name) \
static inline int Page##name(page_t *page) \
{return constant_test_bit(PG_##name, &page->flags); }
unsigned long alloc_pages(unsigned int gfp_mask, unsigned int order);
-void free_pages(unsigned long addr, unsigned int order);
+void free_pages(unsigned long addr);
// TODO Remove
typedef struct page_
return 0;
}
-#define va2page(addr) (buddy_system.page_map + va2pfn(addr))
-#define page2va(page) (pfn2va((page) - buddy_system.page_map))
+page_t *va2page(unsigned long addr)
+{
+ return buddy_system.page_map + va2pfn(addr);
+}
+
+void *page2va(page_t *page)
+{
+ return pfn2va((page) - buddy_system.page_map);
+}
page_t *__alloc_pages(unsigned int order)
{
free_area_t *area;
unsigned long size;
unsigned int select_order;
+ unsigned int i;
for(select_order=order; select_order<MAX_ORDER; ++select_order)
{
area = buddy_system.free_area + select_order;
SetPagePrivate(buddy);
}
+ //
+ for(i=0; i<(1UL<<order); ++i)
+ {
+ page_t *p = page + i;
+ p->head_page = page;
+ p->order = order;
+ }
+
return page;
}
}
-void free_pages(unsigned long addr, unsigned int order)
+void free_pages(unsigned long addr)
{
if(!valid_va(addr))
{
BUG_ON(!valid_va(addr));
}
- __free_pages(va2page(addr), order);
+ page_t *page = va2page(addr);
+
+ __free_pages(page, page->order);
}
void dump_buddy_system()
#include <mm.h>
#include <system.h>
-list_head_t slub_caches;
+list_head_t slub_caches = LIST_HEAD_INIT(slub_caches);
typedef struct kmem_cache
{
page_t *page;
- void **freelist;
+// void **freelist;
+
+ list_head_t list;
} kmem_cache_t;
-#define SLUB_PAGE_SHIFT PAGE_SHIFT
-#define KMALLOC_MIN_SIZE 32
-#define KMALLOC_MIN_ALIGN 32
-static kmem_cache_t kmalloc_caches[SLUB_PAGE_SHIFT];
+#define SLUB_MIN_SHIFT 5
+#define SLUB_MAX_SHIFT 12
+#define SLUB_INIT_CACHE_SIZE ((SLUB_MAX_SHIFT) - (SLUB_MIN_SHIFT))
+#define KMALLOC_MIN_SIZE (1UL<<(SLUB_MIN_SHIFT))
+#define KMALLOC_MIN_ALIGN (1UL<<(SLUB_MIN_SHIFT))
+
+static kmem_cache_t kmalloc_caches[SLUB_INIT_CACHE_SIZE];
typedef unsigned int gfp_t;
cache->objsize = size;
cache->align = align;
cache->page = 0;
- cache->freelist = 0;
+ //cache->freelist = 0;
cache->partial_cnt = 0;
INIT_LIST_HEAD(&(cache->partial));
if(!calculate_params(cache))
goto err;
- return true;
+ return true;
err:
panic("kmem_cache_init can not create cache\n");
return false;
}
+static page_t *get_partial(kmem_cache_t *cache, gfp_t gfpflags)
+{
+ if(list_empty(&cache->partial))
+ return 0;
+
+ list_head_t *p = cache->partial.next;
+ list_del(p);
+
+ page_t *page = 0;
+
+ page = list_entry(p, page_t, lru);
+
+ return page;
+}
+
+static page_t *new_slub(kmem_cache_t *cache, gfp_t gfpflags)
+{
+ // alloc pages from buddy system
+ unsigned long bgn = alloc_pages(gfpflags, cache->order);
+ unsigned long end = 0;
+ page_t *page = va2page(bgn);
+
+ if(0 == page)
+ return 0;
+
+
+ end = bgn + cache->objects*cache->size;
+
+ unsigned long last = bgn;
+ unsigned long addr;
+ for(addr=bgn; addr<end; addr+=cache->size)
+ {
+ *((void **)last) = (void *) addr;
+ last = addr;
+ }
+
+ *((void **)last) = 0;
+
+ page->freelist = (void **)bgn;
+ page->inuse = 0;
+
+ return page;
+}
+
+static void *__slub_alloc(kmem_cache_t *cache, gfp_t gfpflags)
+{
+ void **object = 0;
+ page_t *page = 0;
+
+ if(cache->page == 0)
+ {
+ page = get_partial(cache, gfpflags);
+ if(page == 0)
+ {
+ page = new_slub(cache, gfpflags);
+ if(page != 0)
+ {
+ cache->page = page;
+ }
+ }
+ }
+
+ if(cache->page == 0)
+ return 0;
+
+ object = cache->page->freelist;
+
+ if(object == 0)
+ {
+ cache->page = 0;
+ }
+ else
+ {
+ cache->page->freelist = object[0];
+ cache->page->inuse++;
+ }
+
+ return object;
+}
+
+static void *slub_alloc(kmem_cache_t *cache, gfp_t gfpflags)
+{
+ void **object = 0;
+
+ unsigned long objsize = cache->objsize;
+
+ if(cache->page == 0 || cache->page->freelist == 0)
+ {
+ cache->page = 0;
+ object = __slub_alloc(cache, gfpflags);
+ }
+ else
+ {
+ object = cache->page->freelist;
+ cache->page->freelist = object[0];
+ }
+
+ return object;
+}
+
+static void __slub_free(kmem_cache_t *cache, page_t *page, void *addr)
+{
+ void *prior;
+ void **object = addr;
+
+ prior = object[0] = page->freelist;
+ page->freelist = object;
+ page->inuse--;
+
+ if(page->inuse == 0)
+ {
+ list_del(&page->lru);
+ free_pages((unsigned long)page2va(page));
+ }
+
+ if(prior == 0)
+ {
+ list_add(&page->lru, &cache->partial);
+ }
+}
+
+static void slub_free(kmem_cache_t *cache, page_t *page, void *addr)
+{
+ void **object = addr;
+
+ if(page == cache->page)
+ {
+ object[0] = page->freelist;
+ page->freelist = object;
+ }
+ else
+ {
+ __slub_free(cache, page, addr);
+ }
+}
+
+void kmem_cache_free(kmem_cache_t *cache, void *addr)
+{
+ page_t *page = 0;
+
+ page = get_head_page(va2page((unsigned long)addr));
+
+ slub_free(cache, page, addr);
+}
+
+
void init_slub_system()
{
unsigned int i;
kmem_cache_t *cache;
- for(i=5; i<SLUB_PAGE_SHIFT; ++i)
+ for(i=SLUB_MIN_SHIFT; i<SLUB_MAX_SHIFT; ++i)
{
- cache = kmalloc_caches + i;
+ cache = kmalloc_caches + i - SLUB_MIN_SHIFT;
kmem_cache_init(cache, "kmalloc", 1UL<<i, KMALLOC_MIN_ALIGN);
+
+ //cache->inx = i;
+
+ list_add(&(cache->list), &slub_caches);
+ }
+
+ list_head_t *p;
+ list_for_each(p, &slub_caches)
+ {
+ cache = list_entry(p, kmem_cache_t, list);
+ printk("cache size %d align %d \n", cache->size, cache->align);
+ }
+
+ for(i=0; i<10; ++i)
+ {
+ void *addr = slub_alloc(kmalloc_caches+2, 0);
+ printk("slub addr %08x\n", (unsigned long)addr);
}
}