/* * bank_proc.c -- Implementation of the page coloring for cache and bank partition. * The file will keep a pool of colored pages. Users can require pages with * specific color or bank number. * Part of the code is modified from Jonathan Herman's code */ #include #include #include #include #include #include #include #include #include #include #include #include #define LITMUS_LOCKDEP_NAME_MAX_LEN 50 // This Address Decoding is used in imx6-sabredsd platform #define CACHE_MASK 0x0000f000 #define BANK_MASK 0x00007000 #define OFFSET_SHIFT 12 #define PAGES_PER_COLOR 1024 unsigned long used_cachecolor; unsigned long curr_cachecolor; unsigned long number_banks; unsigned long number_cachecolors; /* * Every page list should contain a lock, a list, and a number recording how many pages it store */ struct color_group { spinlock_t lock; char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN]; struct list_head list; atomic_t nr_pages; }; /* * This is old code which is not used in current version */ /* static struct alloced_pages { spinlock_t lock; struct list_head list; } alloced_pages; struct alloced_page { struct page *page; struct vm_area_struct *vma; struct list_head list; }; */ static struct color_group *color_groups; static struct lock_class_key color_lock_keys[16]; //static struct color_group *color_groups; /* Decoding page color, 0~15 */ static inline unsigned long page_color(struct page *page) { return ((page_to_phys(page)& CACHE_MASK) >> PAGE_SHIFT); } /* Decoding page bank number, 0~7 */ static inline unsigned long page_bank(struct page *page) { return ((page_to_phys(page)& BANK_MASK) >> PAGE_SHIFT); } /* * It is used to determine the smallest number of page lists. */ static unsigned long smallest_nr_pages(void) { unsigned long i, min_pages = -1; struct color_group *cgroup; for (i = 0; i < number_cachecolors; ++i) { cgroup = &color_groups[i]; if (atomic_read(&cgroup->nr_pages) < min_pages) min_pages = atomic_read(&cgroup->nr_pages); } return min_pages; } /* * Add a page to current pool. */ void add_page_to_color_list(struct page *page) { const unsigned long color = page_color(page); struct color_group *cgroup = &color_groups[color]; BUG_ON(in_list(&page->lru) || PageLRU(page)); BUG_ON(page_count(page) > 1); spin_lock(&cgroup->lock); list_add_tail(&page->lru, &cgroup->list); atomic_inc(&cgroup->nr_pages); spin_unlock(&cgroup->lock); } /* * Replenish the page pool. * If the newly allocate page is what we want, it will be pushed to the correct page list * otherwise, it will be freed. */ static int do_add_pages(void) { printk("LITMUS do add pages\n"); struct page *page, *page_tmp; LIST_HEAD(free_later); unsigned long color; int ret = 0; // until all the page lists contain enough pages while (smallest_nr_pages() < PAGES_PER_COLOR) { page = alloc_page(GFP_HIGHUSER_MOVABLE); if (unlikely(!page)) { printk(KERN_WARNING "Could not allocate pages.\n"); ret = -ENOMEM; goto out; } color = page_color(page); if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { add_page_to_color_list(page); } else{ // Pages here will be freed later list_add_tail(&page->lru, &free_later); } } // Free the unwanted pages list_for_each_entry_safe(page, page_tmp, &free_later, lru) { list_del(&page->lru); __free_page(page); } out: return ret; } /* * Provide pages for replacement according cache color * This should be the only implementation here * This function should not be accessed by others directly. * */ static struct page *new_alloc_page_color( unsigned long color) { printk("allocate new page color = %d\n", color); struct color_group *cgroup; struct page *rPage = NULL; if( (color <0) || (color)>15) { TRACE_CUR("Wrong color %lu\n", color); printk(KERN_WARNING "Wrong color %lu\n", color); goto out_unlock; } cgroup = &color_groups[color]; spin_lock(&cgroup->lock); if (unlikely(!atomic_read(&cgroup->nr_pages))) { TRACE_CUR("No free %lu colored pages.\n", color); printk(KERN_WARNING "no free %lu colored pages.\n", color); goto out_unlock; } rPage = list_first_entry(&cgroup->list, struct page, lru); BUG_ON(page_count(rPage) > 1); get_page(rPage); list_del(&rPage->lru); atomic_dec(&cgroup->nr_pages); // ClearPageLRU(rPage); out_unlock: spin_unlock(&cgroup->lock); out: do_add_pages(); return rPage; } /* * provide pages for replacement according to * node = 0 for Level A, B tasks in Cpu 0 * node = 1 for Level A, B tasks in Cpu 1 * node = 2 for Level A, B tasks in Cpu 2 * node = 3 for Level A, B tasks in Cpu 3 * node = 4 for Level C tasks */ struct page *new_alloc_page(struct page *page, unsigned long node, int **x) { printk("allocate new page node = %d\n", node); // return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0); struct color_group *cgroup; struct page *rPage = NULL; unsigned int color; get_random_bytes(&color, sizeof(unsigned int)); // Decode the node to decide what color pages we should provide switch(node ){ case 0: case 1: case 2: case 3: color = (color%2) * 8 + node; break; case 4: color = (color%8)+4; if(color >=8) color+=4; break; default: TRACE_CUR("Wrong color %lu\n", color); printk(KERN_WARNING "Wrong color %lu\n", color); return rPage; } printk("allocate new page color = %d\n", color); rPage = new_alloc_page_color(color); return rPage; } /* * Provide pages for replacement according to bank number. * This is used in cache way partition */ struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int **x) { printk("allocate new page bank = %d\n", banknr); struct color_group *cgroup; struct page *rPage = NULL; unsigned int color; get_random_bytes(&color, sizeof(unsigned int)); if((banknr<= 7) && (banknr>=0)){ color = (color%2) * 8 + banknr; }else{ goto out; } rPage = new_alloc_page_color(color); out: return rPage; } void set_number_of_colors(unsigned long colornr) { used_cachecolor = colornr ; curr_cachecolor = 0; } /* * Provide pages for replacement * This is used to generate experiments */ struct page *new_alloc_page_predefined(struct page *page, int **x) { unsigned int color = curr_cachecolor; printk("allocate new page color = %d\n", color); struct color_group *cgroup; struct page *rPage = NULL; rPage = new_alloc_page_color(color); color = (color + 1)% used_cachecolor; out: return rPage; } /* * Initialize the numbers of banks and cache colors */ static int __init init_variables(void) { number_banks = 1+(BANK_MASK >> PAGE_SHIFT); number_cachecolors = 1+(CACHE_MASK >> PAGE_SHIFT); } /* * Initialize the page pool */ static int __init init_color_groups(void) { struct color_group *cgroup; unsigned long i; int err = 0; color_groups = kmalloc(number_cachecolors * sizeof(struct color_group), GFP_KERNEL); if (!color_groups) { printk(KERN_WARNING "Could not allocate color groups.\n"); err = -ENOMEM; }else{ for (i = 0; i < number_cachecolors; ++i) { cgroup = &color_groups[i]; atomic_set(&cgroup->nr_pages, 0); INIT_LIST_HEAD(&cgroup->list); spin_lock_init(&cgroup->lock); } } return err; } /* * Initialzie this proc */ static int __init litmus_color_init(void) { int err=0; //INIT_LIST_HEAD(&alloced_pages.list); //spin_lock_init(&alloced_pages.lock); init_variables(); printk("Cache number = %d , Cache mask = 0x%lx\n", number_cachecolors, CACHE_MASK); printk("Bank number = %d , Bank mask = 0x%lx\n", number_banks, BANK_MASK); init_color_groups(); do_add_pages(); printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n"); return err; } module_init(litmus_color_init);