From 6b091698a8c1575d96e6c4e3dd36252cfa7aabd1 Mon Sep 17 00:00:00 2001 From: ChengYang Fu Date: Mon, 2 Mar 2015 16:11:18 -0500 Subject: Merge chengyangfu branch to wip-mc2-new --- litmus/bank_proc.c | 210 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 123 insertions(+), 87 deletions(-) diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c index 07d572833b30..295c450bfbe2 100644 --- a/litmus/bank_proc.c +++ b/litmus/bank_proc.c @@ -1,3 +1,9 @@ +/* + * bank_proc.c -- Implementation of the page coloring for cache and bank partition. + * The file will keep a pool of colored pages. Users can require pages with + * specific color or bank number. + * Part of the code is modified from Jonathan Herman's code + */ #include #include #include @@ -14,16 +20,23 @@ #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 -// This is Address Decoding for imx6-sabredsd board +// This Address Decoding is used in imx6-sabredsd platform #define CACHE_MASK 0x0000f000 #define BANK_MASK 0x00007000 #define OFFSET_SHIFT 12 #define PAGES_PER_COLOR 1024 +unsigned long used_cachecolor; +unsigned long curr_cachecolor; + + unsigned long number_banks; unsigned long number_cachecolors; +/* + * Every page list should contain a lock, a list, and a number recording how many pages it store + */ struct color_group { spinlock_t lock; char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN]; @@ -31,6 +44,10 @@ struct color_group { atomic_t nr_pages; }; +/* + * This is old code which is not used in current version + */ +/* static struct alloced_pages { spinlock_t lock; struct list_head list; @@ -41,6 +58,7 @@ struct alloced_page { struct vm_area_struct *vma; struct list_head list; }; +*/ static struct color_group *color_groups; static struct lock_class_key color_lock_keys[16]; @@ -59,6 +77,9 @@ static inline unsigned long page_bank(struct page *page) return ((page_to_phys(page)& BANK_MASK) >> PAGE_SHIFT); } +/* + * It is used to determine the smallest number of page lists. + */ static unsigned long smallest_nr_pages(void) { unsigned long i, min_pages = -1; @@ -70,8 +91,9 @@ static unsigned long smallest_nr_pages(void) } return min_pages; } + /* - * Page's count should be one, it sould not be on any LRU list. + * Add a page to current pool. */ void add_page_to_color_list(struct page *page) { @@ -82,22 +104,26 @@ void add_page_to_color_list(struct page *page) spin_lock(&cgroup->lock); list_add_tail(&page->lru, &cgroup->list); atomic_inc(&cgroup->nr_pages); -// SetPageLRU(page); spin_unlock(&cgroup->lock); } +/* + * Replenish the page pool. + * If the newly allocate page is what we want, it will be pushed to the correct page list + * otherwise, it will be freed. + */ static int do_add_pages(void) { - //printk("LITMUS do add pages\n"); + printk("LITMUS do add pages\n"); struct page *page, *page_tmp; LIST_HEAD(free_later); unsigned long color; int ret = 0; + // until all the page lists contain enough pages while (smallest_nr_pages() < PAGES_PER_COLOR) { - //page = alloc_page(GFP_HIGHUSER | __GFP_MOVABLE); page = alloc_page(GFP_HIGHUSER_MOVABLE); if (unlikely(!page)) { @@ -107,70 +133,79 @@ static int do_add_pages(void) } color = page_color(page); if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { - // SetPageReserved(page); add_page_to_color_list(page); - } else + } else{ + // Pages here will be freed later list_add_tail(&page->lru, &free_later); + } } + // Free the unwanted pages list_for_each_entry_safe(page, page_tmp, &free_later, lru) { list_del(&page->lru); __free_page(page); } - /* setup the color queue stuff */ -// ret = setup_flusher_array(); out: return ret; } -extern int l2_usable_sets; +/* + * Provide pages for replacement according cache color + * This should be the only implementation here + * This function should not be accessed by others directly. + * + */ +static struct page *new_alloc_page_color( unsigned long color) +{ + printk("allocate new page color = %d\n", color); + struct color_group *cgroup; + struct page *rPage = NULL; + + if( (color <0) || (color)>15) { + TRACE_CUR("Wrong color %lu\n", color); + printk(KERN_WARNING "Wrong color %lu\n", color); + goto out_unlock; + } + + + cgroup = &color_groups[color]; + spin_lock(&cgroup->lock); + if (unlikely(!atomic_read(&cgroup->nr_pages))) { + TRACE_CUR("No free %lu colored pages.\n", color); + printk(KERN_WARNING "no free %lu colored pages.\n", color); + goto out_unlock; + } + rPage = list_first_entry(&cgroup->list, struct page, lru); + BUG_ON(page_count(rPage) > 1); + get_page(rPage); + list_del(&rPage->lru); + atomic_dec(&cgroup->nr_pages); +// ClearPageLRU(rPage); +out_unlock: + spin_unlock(&cgroup->lock); +out: + do_add_pages(); + return rPage; +} + /* - * provide pages for replacement + * provide pages for replacement according to * node = 0 for Level A, B tasks in Cpu 0 * node = 1 for Level A, B tasks in Cpu 1 * node = 2 for Level A, B tasks in Cpu 2 * node = 3 for Level A, B tasks in Cpu 3 * node = 4 for Level C tasks */ -#if 1 struct page *new_alloc_page(struct page *page, unsigned long node, int **x) { - //printk("allocate new page node = %d\n", node); + printk("allocate new page node = %d\n", node); // return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); struct color_group *cgroup; struct page *rPage = NULL; unsigned int color; get_random_bytes(&color, sizeof(unsigned int)); - /* - if(node ==0){ - color = (color%2)*8+node; - }else if(node == 1){ - color = (color%2)*8+node; - }else if(node == 2){ - color = (color%2)*8+; - }else if(node == 3){ - color = color%2 + 6; - }else if(node == 4){ - color = color%8 + 8; - }else{ - goto out; - } - */ - switch(node ){ - case 0: - color = (color % l2_usable_sets); - break; - case 1: - case 2: - case 3: - case 4: - color = (color% (16-l2_usable_sets)) + l2_usable_sets; - break; - default: - goto out; - } - /* + // Decode the node to decide what color pages we should provide switch(node ){ case 0: case 1: @@ -184,34 +219,22 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) color+=4; break; default: - goto out; + TRACE_CUR("Wrong color %lu\n", color); + printk(KERN_WARNING "Wrong color %lu\n", color); + return rPage; } - */ - //printk("allocate new page color = %d\n", color); - //TRACE("allocate new page color = %d\n", color); + + printk("allocate new page color = %d\n", color); - cgroup = &color_groups[color]; - spin_lock(&cgroup->lock); - if (unlikely(!atomic_read(&cgroup->nr_pages))) { - //TRACE_CUR("No free %lu colored pages.\n", color); - printk(KERN_WARNING "no free %lu colored pages.\n", color); - goto out_unlock; - } - rPage = list_first_entry(&cgroup->list, struct page, lru); - BUG_ON(page_count(rPage) > 1); - get_page(rPage); - list_del(&rPage->lru); - atomic_dec(&cgroup->nr_pages); -// ClearPageLRU(rPage); -out_unlock: - spin_unlock(&cgroup->lock); -out: - do_add_pages(); - return rPage; + rPage = new_alloc_page_color(color); + return rPage; } -#endif +/* + * Provide pages for replacement according to bank number. + * This is used in cache way partition + */ struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int **x) { printk("allocate new page bank = %d\n", banknr); @@ -225,30 +248,43 @@ struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int }else{ goto out; } + + rPage = new_alloc_page_color(color); - cgroup = &color_groups[color]; - spin_lock(&cgroup->lock); - if (unlikely(!atomic_read(&cgroup->nr_pages))) { - TRACE_CUR("No free %lu colored pages.\n", color); - printk(KERN_WARNING "no free %lu colored pages.\n", color); - goto out_unlock; - } - rPage = list_first_entry(&cgroup->list, struct page, lru); - BUG_ON(page_count(rPage) > 1); - get_page(rPage); - list_del(&rPage->lru); - atomic_dec(&cgroup->nr_pages); -// ClearPageLRU(rPage); -out_unlock: - spin_unlock(&cgroup->lock); out: - do_add_pages(); return rPage; +} +void set_number_of_colors(unsigned long colornr) +{ + used_cachecolor = colornr ; + curr_cachecolor = 0; +} + +/* + * Provide pages for replacement + * This is used to generate experiments + */ +struct page *new_alloc_page_predefined(struct page *page, int **x) +{ + unsigned int color = curr_cachecolor; + + printk("allocate new page color = %d\n", color); + struct color_group *cgroup; + struct page *rPage = NULL; + + rPage = new_alloc_page_color(color); + color = (color + 1)% used_cachecolor; +out: + return rPage; } + +/* + * Initialize the numbers of banks and cache colors + */ static int __init init_variables(void) { number_banks = 1+(BANK_MASK >> PAGE_SHIFT); @@ -256,7 +292,9 @@ static int __init init_variables(void) } - +/* + * Initialize the page pool + */ static int __init init_color_groups(void) { struct color_group *cgroup; @@ -275,22 +313,20 @@ static int __init init_color_groups(void) atomic_set(&cgroup->nr_pages, 0); INIT_LIST_HEAD(&cgroup->list); spin_lock_init(&cgroup->lock); -// LOCKDEP_DYNAMIC_ALLOC(&cgroup->lock, &color_lock_keys[i], -// cgroup->_lock_name, "color%lu", i); } } return err; } /* - * Initialzie the this proc + * Initialzie this proc */ static int __init litmus_color_init(void) { int err=0; - INIT_LIST_HEAD(&alloced_pages.list); - spin_lock_init(&alloced_pages.lock); + //INIT_LIST_HEAD(&alloced_pages.list); + //spin_lock_init(&alloced_pages.lock); init_variables(); printk("Cache number = %d , Cache mask = 0x%lx\n", number_cachecolors, CACHE_MASK); printk("Bank number = %d , Bank mask = 0x%lx\n", number_banks, BANK_MASK); -- cgit v1.2.2