diff options
author | Christopher Kenna <cjk@cs.unc.edu> | 2012-05-04 12:20:52 -0400 |
---|---|---|
committer | Christopher Kenna <cjk@cs.unc.edu> | 2012-05-04 12:20:52 -0400 |
commit | 803e44774958312123b0bee0fcffd4e97c7c88aa (patch) | |
tree | e3014f61ed2bff071569745c2ab7e618001c42e4 /litmus | |
parent | b881d3cdf01cd463073b016261b9af2cfe3ed417 (diff) |
Move nr_colors into color_cache_info and extern it for everyone.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/color.c | 34 | ||||
-rw-r--r-- | litmus/color_dev.c | 8 |
2 files changed, 24 insertions, 18 deletions
diff --git a/litmus/color.c b/litmus/color.c index 93d12a718543..21bb0ee81895 100644 --- a/litmus/color.c +++ b/litmus/color.c | |||
@@ -32,10 +32,7 @@ static struct color_group *color_groups; | |||
32 | 32 | ||
33 | 33 | ||
34 | /* non-static: extern'ed in various files */ | 34 | /* non-static: extern'ed in various files */ |
35 | unsigned long nr_colors; | ||
36 | #ifdef CONFIG_X86 | ||
37 | struct color_cache_info color_cache_info; | 35 | struct color_cache_info color_cache_info; |
38 | #endif | ||
39 | int color_sysctl_add_pages_data; | 36 | int color_sysctl_add_pages_data; |
40 | 37 | ||
41 | static inline unsigned long page_color(struct page *page) | 38 | static inline unsigned long page_color(struct page *page) |
@@ -67,7 +64,7 @@ struct page* get_colored_page(unsigned long color) | |||
67 | struct color_group *cgroup; | 64 | struct color_group *cgroup; |
68 | struct page *page = NULL; | 65 | struct page *page = NULL; |
69 | 66 | ||
70 | if (color >= nr_colors) | 67 | if (color >= color_cache_info.nr_colors) |
71 | goto out; | 68 | goto out; |
72 | 69 | ||
73 | cgroup = &color_groups[color]; | 70 | cgroup = &color_groups[color]; |
@@ -93,7 +90,7 @@ static unsigned long smallest_nr_pages(void) | |||
93 | { | 90 | { |
94 | unsigned long i, min_pages = -1; | 91 | unsigned long i, min_pages = -1; |
95 | struct color_group *cgroup; | 92 | struct color_group *cgroup; |
96 | for (i = 0; i < nr_colors; ++i) { | 93 | for (i = 0; i < color_cache_info.nr_colors; ++i) { |
97 | cgroup = &color_groups[i]; | 94 | cgroup = &color_groups[i]; |
98 | if (atomic_read(&cgroup->nr_pages) < min_pages) | 95 | if (atomic_read(&cgroup->nr_pages) < min_pages) |
99 | min_pages = atomic_read(&cgroup->nr_pages); | 96 | min_pages = atomic_read(&cgroup->nr_pages); |
@@ -226,7 +223,7 @@ int color_nr_pages_handler(struct ctl_table *table, int write, void __user *buff | |||
226 | ret = -EPERM; | 223 | ret = -EPERM; |
227 | goto out; | 224 | goto out; |
228 | } | 225 | } |
229 | for (i = 0; i < nr_colors; ++i) { | 226 | for (i = 0; i < color_cache_info.nr_colors; ++i) { |
230 | cgroup = &color_groups[i]; | 227 | cgroup = &color_groups[i]; |
231 | buf = ((char*)table->data) + used; | 228 | buf = ((char*)table->data) + used; |
232 | used += snprintf(buf, table->maxlen - used, ONE_COLOR_FMT, | 229 | used += snprintf(buf, table->maxlen - used, ONE_COLOR_FMT, |
@@ -275,16 +272,24 @@ static int __init init_mask(void) | |||
275 | BUG_ON(PAGE_SIZE >= (color_cache_info.sets << line_size_log)); | 272 | BUG_ON(PAGE_SIZE >= (color_cache_info.sets << line_size_log)); |
276 | color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ | 273 | color_mask = ((color_cache_info.sets << line_size_log) - 1) ^ |
277 | (PAGE_SIZE - 1); | 274 | (PAGE_SIZE - 1); |
278 | nr_colors = (color_mask >> PAGE_SHIFT) + 1; | 275 | color_cache_info.nr_colors = (color_mask >> PAGE_SHIFT) + 1; |
279 | out: | 276 | out: |
280 | return err; | 277 | return err; |
281 | } | 278 | } |
282 | #elif defined(CONFIG_SPARC) /* X86 */ | 279 | #elif defined(CONFIG_SPARC) /* X86 */ |
283 | static int __init init_mask(void) | 280 | static int __init init_mask(void) |
284 | { | 281 | { |
285 | /* bits 17:13 */ | 282 | /* |
286 | color_mask = 0x3e000UL; | 283 | * Static assuming we are using Flare (our Niagara machine). |
287 | nr_colors = (1 << hweight_long(color_mask)); | 284 | * This machine has weirdness with cache banks, and I don't want |
285 | * to waste time trying to auto-detect this. | ||
286 | */ | ||
287 | color_mask = 0x3e000UL; /* bits 17:13 */ | ||
288 | color_cache_info.size = 3 * 1024 * 1024; /* 3 MB */ | ||
289 | color_cache_info.line_size = 64; | ||
290 | color_cache_info.ways = 12; | ||
291 | color_cache_info.sets = 1024 * 4; | ||
292 | color_cache_info.nr_colors = (1 << hweight_long(color_mask)); | ||
288 | return 0; | 293 | return 0; |
289 | } | 294 | } |
290 | #endif /* SPARC/X86 */ | 295 | #endif /* SPARC/X86 */ |
@@ -297,14 +302,15 @@ static int __init init_color_groups(void) | |||
297 | unsigned long i; | 302 | unsigned long i; |
298 | int err = 0; | 303 | int err = 0; |
299 | 304 | ||
300 | color_groups = kmalloc(nr_colors * sizeof(struct color_group), GFP_KERNEL); | 305 | color_groups = kmalloc(color_cache_info.nr_colors * |
306 | sizeof(struct color_group), GFP_KERNEL); | ||
301 | if (!color_groups) { | 307 | if (!color_groups) { |
302 | printk(KERN_WARNING "Could not allocate color groups.\n"); | 308 | printk(KERN_WARNING "Could not allocate color groups.\n"); |
303 | err = -ENOMEM; | 309 | err = -ENOMEM; |
304 | goto out; | 310 | goto out; |
305 | } | 311 | } |
306 | 312 | ||
307 | for (i = 0; i < nr_colors; ++i) { | 313 | for (i = 0; i < color_cache_info.nr_colors; ++i) { |
308 | cgroup = &color_groups[i]; | 314 | cgroup = &color_groups[i]; |
309 | atomic_set(&cgroup->nr_pages, 0); | 315 | atomic_set(&cgroup->nr_pages, 0); |
310 | INIT_LIST_HEAD(&cgroup->list); | 316 | INIT_LIST_HEAD(&cgroup->list); |
@@ -326,9 +332,9 @@ static int __init init_color(void) | |||
326 | 332 | ||
327 | err = init_mask(); | 333 | err = init_mask(); |
328 | printk("PAGE_SIZE: %lu Color mask: 0x%lx Total colors: %lu\n", | 334 | printk("PAGE_SIZE: %lu Color mask: 0x%lx Total colors: %lu\n", |
329 | PAGE_SIZE, color_mask, nr_colors); | 335 | PAGE_SIZE, color_mask, color_cache_info.nr_colors); |
330 | 336 | ||
331 | BUG_ON(LOCKDEP_MAX_NR_COLORS < nr_colors); | 337 | BUG_ON(LOCKDEP_MAX_NR_COLORS < color_cache_info.nr_colors); |
332 | err = init_color_groups(); | 338 | err = init_color_groups(); |
333 | return err; | 339 | return err; |
334 | } | 340 | } |
diff --git a/litmus/color_dev.c b/litmus/color_dev.c index d681f57be01f..7ccdaf03740b 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c | |||
@@ -102,8 +102,6 @@ static int map_color_ctrl_page(struct vm_area_struct *vma) | |||
102 | /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise | 102 | /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise |
103 | * userspace actually gets a copy-on-write page. */ | 103 | * userspace actually gets a copy-on-write page. */ |
104 | err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); | 104 | err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); |
105 | TRACE_CUR("page shared: guess:0x1(63)...1??111 actual:0x%lx\n", PAGE_SHARED); | ||
106 | /* present, RW, user, accessed, NX=63 */ | ||
107 | 105 | ||
108 | if (err) | 106 | if (err) |
109 | TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); | 107 | TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); |
@@ -181,10 +179,12 @@ static int do_map_colored_pages(struct vm_area_struct *vma) | |||
181 | clear_user_highpage(page, addr); | 179 | clear_user_highpage(page, addr); |
182 | #endif | 180 | #endif |
183 | TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " | 181 | TRACE_CUR(ALLOC_NAME ": insert page (pa:0x%10llx, pfn:%8lu, " |
184 | "color:%3lu) at 0x%lx (flags:%lx prot:%lx\n", | 182 | "color:%3lu) at 0x%lx (flags:%16lx prot:%16lx) " |
183 | "PAGE_SHARED:0x%16lx\n", | ||
185 | page_to_phys(page), | 184 | page_to_phys(page), |
186 | page_to_pfn(page), this_color, addr, | 185 | page_to_pfn(page), this_color, addr, |
187 | vma->vm_flags, pgprot_val(vma->vm_page_prot)); | 186 | vma->vm_flags, pgprot_val(vma->vm_page_prot), |
187 | PAGE_SHARED); | ||
188 | //err = vm_insert_page(vma, addr, page); | 188 | //err = vm_insert_page(vma, addr, page); |
189 | err = remap_pfn_range(vma, addr, page_to_pfn(page), | 189 | err = remap_pfn_range(vma, addr, page_to_pfn(page), |
190 | PAGE_SIZE, PAGE_SHARED); | 190 | PAGE_SIZE, PAGE_SHARED); |