#include #include #include #include #include #include #include /* required by litmus.h */ #include /* page_to_phys on SPARC */ #ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE #include /* set_memory_uc */ #endif #include #include /* for in_list(...) */ #define PAGES_PER_COLOR 1024 /* * This is used only to "trick" lockdep into permitting dynamically allocated * locks of different classes that are initialized on the same line. */ #define LOCKDEP_MAX_NR_COLORS 128 static struct lock_class_key color_lock_keys[LOCKDEP_MAX_NR_COLORS]; struct color_group { spinlock_t lock; char _lock_name[LITMUS_LOCKDEP_NAME_MAX_LEN]; struct list_head list; atomic_t nr_pages; }; static unsigned long color_mask; static struct color_group *color_groups; /* non-static: extern'ed in various files */ struct color_cache_info color_cache_info; int color_sysctl_add_pages_data; static inline unsigned long page_color(struct page *page) { return ((page_to_phys(page) & color_mask) >> PAGE_SHIFT); } /* * Page's count should be one, it sould not be on any LRU list. */ void add_page_to_color_list(struct page *page) { const unsigned long color = page_color(page); struct color_group *cgroup = &color_groups[color]; BUG_ON(in_list(&page->lru) || PageLRU(page)); BUG_ON(page_count(page) > 1); spin_lock(&cgroup->lock); list_add_tail(&page->lru, &cgroup->list); atomic_inc(&cgroup->nr_pages); SetPageLRU(page); spin_unlock(&cgroup->lock); } /* * Increases page's count to two. */ struct page* get_colored_page(unsigned long color) { struct color_group *cgroup; struct page *page = NULL; if (color >= color_cache_info.nr_colors) goto out; cgroup = &color_groups[color]; spin_lock(&cgroup->lock); if (unlikely(!atomic_read(&cgroup->nr_pages))) { TRACE_CUR("No free %lu colored pages.\n", color); printk(KERN_WARNING "no free %lu colored pages.\n", color); goto out_unlock; } page = list_first_entry(&cgroup->list, struct page, lru); BUG_ON(page_count(page) > 1); get_page(page); list_del(&page->lru); atomic_dec(&cgroup->nr_pages); ClearPageLRU(page); out_unlock: spin_unlock(&cgroup->lock); out: return page; } static unsigned long smallest_nr_pages(void) { unsigned long i, min_pages = -1; struct color_group *cgroup; for (i = 0; i < color_cache_info.nr_colors; ++i) { cgroup = &color_groups[i]; if (atomic_read(&cgroup->nr_pages) < min_pages) min_pages = atomic_read(&cgroup->nr_pages); } return min_pages; } static int do_add_pages(void) { struct page *page, *page_tmp; LIST_HEAD(free_later); unsigned long color; int ret = 0; while (smallest_nr_pages() < PAGES_PER_COLOR) { #ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE unsigned long vaddr; #endif #if defined(CONFIG_X86) page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_MOVABLE); #elif defined(CONFIG_SPARC) || defined(CONFIG_ARM) page = alloc_page(GFP_HIGHUSER | __GFP_MOVABLE); #else #error What architecture are you using? #endif if (unlikely(!page)) { printk(KERN_WARNING "Could not allocate pages.\n"); ret = -ENOMEM; goto out; } color = page_color(page); if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { SetPageReserved(page); #ifdef CONFIG_PLUGIN_COLOR_UNCACHABLE vaddr = (unsigned long) pfn_to_kaddr(page_to_pfn(page)); if (set_memory_uc(vaddr, 1)) { printk(KERN_ALERT "Could not set_memory_uc\n"); BUG(); } #endif add_page_to_color_list(page); } else list_add_tail(&page->lru, &free_later); } list_for_each_entry_safe(page, page_tmp, &free_later, lru) { list_del(&page->lru); __free_page(page); } out: return ret; } static struct alloced_pages { spinlock_t lock; struct list_head list; } alloced_pages; struct alloced_page { struct page *page; struct vm_area_struct *vma; struct list_head list; }; static struct alloced_page * new_alloced_page(struct page *page, struct vm_area_struct *vma) { struct alloced_page *ap = kmalloc(sizeof(*ap), GFP_KERNEL); INIT_LIST_HEAD(&ap->list); ap->page = page; ap->vma = vma; return ap; } /* * Page's count should be two or more. It should not be on any LRU list. */ void add_page_to_alloced_list(struct page *page, struct vm_area_struct *vma) { struct alloced_page *ap; BUG_ON(page_count(page) < 2); ap = new_alloced_page(page, vma); spin_lock(&alloced_pages.lock); list_add_tail(&ap->list, &alloced_pages.list); spin_unlock(&alloced_pages.lock); } /* * Reclaim pages. */ void reclaim_pages(struct vm_area_struct *vma) { struct alloced_page *ap, *ap_tmp; unsigned long nr_reclaimed = 0; spin_lock(&alloced_pages.lock); list_for_each_entry_safe(ap, ap_tmp, &alloced_pages.list, list) { if (vma == ap->vma) { list_del(&ap->list); put_page(ap->page); add_page_to_color_list(ap->page); nr_reclaimed++; TRACE_CUR("reclaiming page (pa:0x%10llx, pfn:%8lu, " "color:%3lu)\n", page_to_phys(ap->page), page_to_pfn(ap->page), page_color(ap->page)); kfree(ap); } } spin_unlock(&alloced_pages.lock); TRACE_CUR("Reclaimed %lu pages.\n", nr_reclaimed); } /*********************************************************** * Proc ***********************************************************/ int color_add_pages_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret = 0; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret) goto out; if (write && color_sysctl_add_pages_data) ret = do_add_pages(); out: return ret; } int color_nr_pages_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct color_group *cgroup; char *buf; unsigned long i; int used = 0, ret = 0; if (write) { ret = -EPERM; goto out; } for (i = 0; i < color_cache_info.nr_colors; ++i) { cgroup = &color_groups[i]; buf = ((char*)table->data) + used; used += snprintf(buf, table->maxlen - used, ONE_COLOR_FMT, i, atomic_read(&cgroup->nr_pages)); } ret = proc_dostring(table, write, buffer, lenp, ppos); out: return ret; } /*********************************************************** * Initialization ***********************************************************/ #if defined(CONFIG_X86) || defined(CONFIG_ARM) /* slowest possible way to find a log, but we only do this once on boot */ static unsigned int __init slow_log(unsigned int v) { unsigned int r = 0; while (v >>= 1) r++; return r; } static int __init init_mask(void) { unsigned int line_size_log = slow_log(color_cache_info.line_size); int err = 0; WARN(color_cache_info.size < 1048576 || color_cache_info.ways < 15 || (color_cache_info.line_size != 64 && color_cache_info.line_size != 32), "Potentially bad cache size, ways, or line size.\n"); printk("Cache size: %lu line-size: %lu ways: %lu sets: %lu\n", color_cache_info.size, color_cache_info.line_size, color_cache_info.ways, color_cache_info.sets); if (!color_cache_info.size) { printk(KERN_WARNING "No cache information found.\n"); err = -EINVAL; goto out; } WARN(color_cache_info.size / color_cache_info.line_size / color_cache_info.ways != color_cache_info.sets, "Cache size and set information do not agree.\n"); WARN(PAGE_SIZE >= (color_cache_info.sets << line_size_log), "Cache set bits contained entirely within page bits.\n"); color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ (PAGE_SIZE - 1); color_cache_info.nr_colors = (color_mask >> PAGE_SHIFT) + 1; out: return err; } #elif defined(CONFIG_SPARC) static int __init init_mask(void) { /* * Static assuming we are using Flare (our Niagara machine). * This machine has weirdness with cache banks, and I don't want * to waste time trying to auto-detect this. */ color_mask = 0x3e000UL; /* bits 17:13 */ color_cache_info.size = 3 * 1024 * 1024; /* 3 MB */ color_cache_info.line_size = 64; color_cache_info.ways = 12; color_cache_info.sets = 1024 * 4; color_cache_info.nr_colors = (1 << hweight_long(color_mask)); return 0; } #endif static int __init init_color_groups(void) { struct color_group *cgroup; unsigned long i; int err = 0; color_groups = kmalloc(color_cache_info.nr_colors * sizeof(struct color_group), GFP_KERNEL); if (!color_groups) { printk(KERN_WARNING "Could not allocate color groups.\n"); err = -ENOMEM; goto out; } for (i = 0; i < color_cache_info.nr_colors; ++i) { cgroup = &color_groups[i]; atomic_set(&cgroup->nr_pages, 0); INIT_LIST_HEAD(&cgroup->list); spin_lock_init(&cgroup->lock); LOCKDEP_DYNAMIC_ALLOC(&cgroup->lock, &color_lock_keys[i], cgroup->_lock_name, "color%lu", i); } out: return err; } static int __init init_color(void) { int err = 0; printk("Initializing LITMUS^RT cache coloring.\n"); INIT_LIST_HEAD(&alloced_pages.list); spin_lock_init(&alloced_pages.lock); err = init_mask(); printk("PAGE_SIZE: %lu Color mask: 0x%lx Total colors: %lu\n", PAGE_SIZE, color_mask, color_cache_info.nr_colors); BUG_ON(LOCKDEP_MAX_NR_COLORS < color_cache_info.nr_colors); err = init_color_groups(); return err; } module_init(init_color);