aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChengYang Fu <chengyangfu@gmail.com>2015-03-22 15:05:52 -0400
committerChengYang Fu <chengyangfu@gmail.com>2015-03-22 15:05:52 -0400
commitbf0b4079ab52d1eba4c99dfe404548fefea4b94d (patch)
treef8cace015726dd1118c38ae137031f486879227b
parent623fe6255439add90f416df69b92134fbd01f342 (diff)
Use (Interleaving off), and provide system variables to adjust the size
of cache and bank number
-rw-r--r--litmus/bank_proc.c548
-rw-r--r--litmus/litmus.c5
2 files changed, 407 insertions, 146 deletions
diff --git a/litmus/bank_proc.c b/litmus/bank_proc.c
index 05c7fc3df98c..7cf07ee1dad9 100644
--- a/litmus/bank_proc.c
+++ b/litmus/bank_proc.c
@@ -18,22 +18,58 @@
18#include <litmus/litmus_proc.h> 18#include <litmus/litmus_proc.h>
19#include <litmus/sched_trace.h> 19#include <litmus/sched_trace.h>
20 20
21
22#define LITMUS_LOCKDEP_NAME_MAX_LEN 50 21#define LITMUS_LOCKDEP_NAME_MAX_LEN 50
23 22
24// This Address Decoding is used in imx6-sabredsd platform 23// This Address Decoding is used in imx6-sabredsd platform
25#define CACHE_MASK 0x0000f000 24#define BANK_MASK 0x38000000
26#define BANK_MASK 0x00007000 25#define BANK_SHIFT 27
27#define OFFSET_SHIFT 12 26#define CACHE_MASK 0x0000f000
27#define CACHE_SHIFT 12
28
29#define PAGES_PER_COLOR 256
30unsigned int NUM_PAGE_LIST; //8*16
31
32unsigned int number_banks;
33unsigned int number_cachecolors;
34
35unsigned int set_partition_max = 0x0000ffff;
36unsigned int set_partition_min = 0;
37unsigned int bank_partition_max = 0x000000ff;
38unsigned int bank_partition_min = 0;
39
40unsigned int set_partition[9] = {
41 0x00000003, /* Core 0, and Level A*/
42 0x00000003, /* Core 0, and Level B*/
43 0x0000000C, /* Core 1, and Level A*/
44 0x0000000C, /* Core 1, and Level B*/
45 0x00000030, /* Core 2, and Level A*/
46 0x00000030, /* Core 2, and Level B*/
47 0x000000C0, /* Core 3, and Level A*/
48 0x000000C0, /* Core 3, and Level B*/
49 0x0000ff00, /* Level C */
50};
28 51
29#define PAGES_PER_COLOR 1024 52unsigned int bank_partition[9] = {
53 0x00000010, /* Core 0, and Level A*/
54 0x00000010, /* Core 0, and Level B*/
55 0x00000020, /* Core 1, and Level A*/
56 0x00000020, /* Core 1, and Level B*/
57 0x00000040, /* Core 2, and Level A*/
58 0x00000040, /* Core 2, and Level B*/
59 0x00000080, /* Core 3, and Level A*/
60 0x00000080, /* Core 3, and Level B*/
61 0x0000000f, /* Level C */
62};
30 63
31unsigned long curr_cachecolor; 64unsigned int set_index[9] = {
32int used_cachecolor; 65 0, 0, 0, 0, 0, 0, 0, 0, 0
66};
33 67
68unsigned int bank_index[9] = {
69 0, 0, 0, 0, 0, 0, 0, 0, 0
70};
34 71
35unsigned long number_banks; 72struct mutex void_lockdown_proc;
36unsigned long number_cachecolors;
37 73
38 74
39/* 75/*
@@ -46,39 +82,80 @@ struct color_group {
46 atomic_t nr_pages; 82 atomic_t nr_pages;
47}; 83};
48 84
85
86static struct color_group *color_groups;
87
49/* 88/*
50 * This is old code which is not used in current version 89 * Naive function to count the number of 1's
51 */ 90 */
52/* 91unsigned int counting_one_set(unsigned int v)
53static struct alloced_pages { 92{
54 spinlock_t lock; 93// unsigned int v; // count the number of bits set in v
55 struct list_head list; 94 unsigned int c; // c accumulates the total bits set in v
56} alloced_pages; 95
96 for (c = 0; v; v >>= 1)
97 {
98 c += v & 1;
99 }
100 return c;
101}
57 102
58struct alloced_page { 103unsigned int two_exp(unsigned int e)
59 struct page *page; 104{
60 struct vm_area_struct *vma; 105 unsigned int v = 1;
61 struct list_head list; 106 for (e; e>0; e-- )
62}; 107 {
63*/ 108 v=v*2;
109 }
110 return v;
111}
112
113unsigned int num_by_bitmask_index(bitmask, index)
114{
115 unsigned int pos = 0;
116
117 while(true)
118 {
119 if(index ==0 && (bitmask & 1)==1)
120 {
121 break;
122 }
123 if(index !=0 && (bitmask & 1)==1){
124 index--;
125 }
126 pos++;
127 bitmask = bitmask >>1;
128
129 }
130 return pos;
131}
64 132
65static struct color_group *color_groups;
66static struct lock_class_key color_lock_keys[16];
67 133
68//static struct color_group *color_groups;
69 134
70/* Decoding page color, 0~15 */ 135/* Decoding page color, 0~15 */
71static inline unsigned long page_color(struct page *page) 136static inline unsigned int page_color(struct page *page)
72{ 137{
73 return ((page_to_phys(page)& CACHE_MASK) >> PAGE_SHIFT); 138 return ((page_to_phys(page)& CACHE_MASK) >> CACHE_SHIFT);
74} 139}
75 140
76/* Decoding page bank number, 0~7 */ 141/* Decoding page bank number, 0~7 */
77static inline unsigned long page_bank(struct page *page) 142static inline unsigned int page_bank(struct page *page)
78{ 143{
79 return ((page_to_phys(page)& BANK_MASK) >> PAGE_SHIFT); 144 return ((page_to_phys(page)& BANK_MASK) >> BANK_SHIFT);
80} 145}
81 146
147static inline unsigned int page_list_index(struct page *page)
148{
149 unsigned int idx;
150 idx = (page_color(page) + page_bank(page)*(number_cachecolors));
151// printk("address = %lx, ", page_to_phys(page));
152// printk("color(%d), bank(%d), indx = %d\n", page_color(page), page_bank(page), idx);
153
154 return idx;
155}
156
157
158
82/* 159/*
83 * It is used to determine the smallest number of page lists. 160 * It is used to determine the smallest number of page lists.
84 */ 161 */
@@ -86,7 +163,7 @@ static unsigned long smallest_nr_pages(void)
86{ 163{
87 unsigned long i, min_pages = -1; 164 unsigned long i, min_pages = -1;
88 struct color_group *cgroup; 165 struct color_group *cgroup;
89 for (i = 0; i < number_cachecolors; ++i) { 166 for (i = 0; i < NUM_PAGE_LIST; ++i) {
90 cgroup = &color_groups[i]; 167 cgroup = &color_groups[i];
91 if (atomic_read(&cgroup->nr_pages) < min_pages) 168 if (atomic_read(&cgroup->nr_pages) < min_pages)
92 min_pages = atomic_read(&cgroup->nr_pages); 169 min_pages = atomic_read(&cgroup->nr_pages);
@@ -94,12 +171,22 @@ static unsigned long smallest_nr_pages(void)
94 return min_pages; 171 return min_pages;
95} 172}
96 173
174static void show_nr_pages(void)
175{
176 unsigned long i;
177 struct color_group *cgroup;
178 for (i = 0; i < NUM_PAGE_LIST; ++i) {
179 cgroup = &color_groups[i];
180 printk("i =%d, nr_pages = %d\n", i, atomic_read(&cgroup->nr_pages));
181 }
182}
183
97/* 184/*
98 * Add a page to current pool. 185 * Add a page to current pool.
99 */ 186 */
100void add_page_to_color_list(struct page *page) 187void add_page_to_color_list(struct page *page)
101{ 188{
102 const unsigned long color = page_color(page); 189 const unsigned long color = page_list_index(page);
103 struct color_group *cgroup = &color_groups[color]; 190 struct color_group *cgroup = &color_groups[color];
104 BUG_ON(in_list(&page->lru) || PageLRU(page)); 191 BUG_ON(in_list(&page->lru) || PageLRU(page));
105 BUG_ON(page_count(page) > 1); 192 BUG_ON(page_count(page) > 1);
@@ -116,38 +203,47 @@ void add_page_to_color_list(struct page *page)
116 */ 203 */
117static int do_add_pages(void) 204static int do_add_pages(void)
118{ 205{
119// printk("LITMUS do add pages\n"); 206 printk("LITMUS do add pages\n");
120 207
121 struct page *page, *page_tmp; 208 struct page *page, *page_tmp;
122 LIST_HEAD(free_later); 209 LIST_HEAD(free_later);
123 unsigned long color; 210 unsigned long color;
124 int ret = 0; 211 int ret = 0;
212 int i = 0;
125 213
126 // until all the page lists contain enough pages 214 // until all the page lists contain enough pages
215 //for (i =0; i<5; i++) {
127 while (smallest_nr_pages() < PAGES_PER_COLOR) { 216 while (smallest_nr_pages() < PAGES_PER_COLOR) {
128 217
129 page = alloc_page(GFP_HIGHUSER_MOVABLE); 218 page = alloc_page(GFP_HIGHUSER_MOVABLE);
130
131 if (unlikely(!page)) { 219 if (unlikely(!page)) {
132 // printk(KERN_WARNING "Could not allocate pages.\n"); 220 printk(KERN_WARNING "Could not allocate pages.\n");
133 ret = -ENOMEM; 221 ret = -ENOMEM;
134 goto out; 222 goto out;
135 } 223 }
136 color = page_color(page); 224 color = page_list_index(page);
225 //show_nr_pages();
226 //printk("before : nr_pages = %d\n", atomic_read(&color_groups[color].nr_pages));
137 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) { 227 if (atomic_read(&color_groups[color].nr_pages) < PAGES_PER_COLOR) {
138 add_page_to_color_list(page); 228 add_page_to_color_list(page);
139 } else{ 229 } else{
140 // Pages here will be freed later 230 // Pages here will be freed later
141 list_add_tail(&page->lru, &free_later); 231 list_add_tail(&page->lru, &free_later);
142 } 232 }
143 } 233 //show_nr_pages();
234 //printk("after : nr_pages = %d\n", atomic_read(&color_groups[color].nr_pages));
235
236 }
237 show_nr_pages();
238#if 1
144 // Free the unwanted pages 239 // Free the unwanted pages
145 list_for_each_entry_safe(page, page_tmp, &free_later, lru) { 240 list_for_each_entry_safe(page, page_tmp, &free_later, lru) {
146 list_del(&page->lru); 241 list_del(&page->lru);
147 __free_page(page); 242 __free_page(page);
148 } 243 }
244#endif
149out: 245out:
150 return ret; 246 return ret;
151} 247}
152 248
153/* 249/*
@@ -162,7 +258,7 @@ static struct page *new_alloc_page_color( unsigned long color)
162 struct color_group *cgroup; 258 struct color_group *cgroup;
163 struct page *rPage = NULL; 259 struct page *rPage = NULL;
164 260
165 if( (color <0) || (color)>15) { 261 if( (color <0) || (color)>(number_cachecolors*number_banks -1)) {
166 TRACE_CUR("Wrong color %lu\n", color); 262 TRACE_CUR("Wrong color %lu\n", color);
167// printk(KERN_WARNING "Wrong color %lu\n", color); 263// printk(KERN_WARNING "Wrong color %lu\n", color);
168 goto out_unlock; 264 goto out_unlock;
@@ -192,31 +288,17 @@ out:
192 288
193 289
194/* 290/*
195 * Provide pages for replacement
196 * This is used to generate experiments
197 */
198struct page *new_alloc_page_predefined(struct page *page, int **x)
199{
200 unsigned int color = curr_cachecolor;
201
202// printk("allocate new page color = %d\n", color);
203 struct color_group *cgroup;
204 struct page *rPage = NULL;
205
206 rPage = new_alloc_page_color(color);
207 curr_cachecolor = (color + 1)% used_cachecolor;
208out:
209 return rPage;
210}
211/*
212 * provide pages for replacement according to 291 * provide pages for replacement according to
213 * node = 0 for Level A, B tasks in Cpu 0 292 * node = 0 for Level A tasks in Cpu 0
214 * node = 1 for Level A, B tasks in Cpu 1 293 * node = 1 for Level B tasks in Cpu 0
215 * node = 2 for Level A, B tasks in Cpu 2 294 * node = 2 for Level A tasks in Cpu 1
216 * node = 3 for Level A, B tasks in Cpu 3 295 * node = 3 for Level B tasks in Cpu 1
217 * node = 4 for Level C tasks 296 * node = 4 for Level A tasks in Cpu 2
297 * node = 5 for Level B tasks in Cpu 2
298 * node = 6 for Level A tasks in Cpu 3
299 * node = 7 for Level B tasks in Cpu 3
300 * node = 8 for Level C tasks
218 */ 301 */
219#if 1
220struct page *new_alloc_page(struct page *page, unsigned long node, int **x) 302struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
221{ 303{
222// printk("allocate new page node = %d\n", node); 304// printk("allocate new page node = %d\n", node);
@@ -224,79 +306,20 @@ struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
224 struct color_group *cgroup; 306 struct color_group *cgroup;
225 struct page *rPage = NULL; 307 struct page *rPage = NULL;
226 unsigned int color; 308 unsigned int color;
227 get_random_bytes(&color, sizeof(unsigned int));
228 309
229 // Decode the node to decide what color pages we should provide
230 switch(node ){
231 case 0:
232 case 1:
233 case 2:
234 case 3:
235 color = (color%4) * 4 + node;
236 case 4:
237 color = (color%16);
238/*
239 case 0:
240 case 1:
241 case 2:
242 case 3:
243 color = (color%2) * 8 + node;
244 break;
245 case 4:
246 color = (color%8)+4;
247 if(color >=8)
248 color+=4;
249 break;
250 default:
251 TRACE_CUR("Wrong color %lu\n", color);
252 return rPage;
253*/
254 }
255 310
311 unsigned int idx = 0;
312 idx += num_by_bitmask_index(set_partition[node], set_index[node]);
313 idx += number_cachecolors* num_by_bitmask_index(bank_partition[node], bank_index[node]);
314 printk("node = %d, idx = %d\n", node, idx);
256 315
257// printk("allocate new page color = %d\n", color); 316 rPage = new_alloc_page_color(idx);
258 317
259 rPage = new_alloc_page_color(color); 318
319 set_index[node] = (set_index[node]+1) % counting_one_set(set_partition[node]);
320 bank_index[node] = (bank_index[node]+1) % counting_one_set(bank_partition[node]);
260 return rPage; 321 return rPage;
261} 322}
262#else
263struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
264{
265 return new_alloc_page_predefined(page, x);
266}
267#endif
268
269/*
270 * Provide pages for replacement according to bank number.
271 * This is used in cache way partition
272 */
273struct page *new_alloc_page_banknr(struct page *page, unsigned long banknr, int **x)
274{
275// printk("allocate new page bank = %d\n", banknr);
276 struct color_group *cgroup;
277 struct page *rPage = NULL;
278 unsigned int color;
279 get_random_bytes(&color, sizeof(unsigned int));
280
281 if((banknr<= 7) && (banknr>=0)){
282 color = (color%2) * 8 + banknr;
283 }else{
284 goto out;
285 }
286
287 rPage = new_alloc_page_color(color);
288
289out:
290 return rPage;
291}
292
293
294void set_number_of_colors(int colornr)
295{
296// printk(KERN_WARNING "Set of colors = %d.\n", colornr);
297 used_cachecolor = colornr ;
298 curr_cachecolor = 0;
299}
300 323
301 324
302 325
@@ -306,12 +329,14 @@ void set_number_of_colors(int colornr)
306 */ 329 */
307static int __init init_variables(void) 330static int __init init_variables(void)
308{ 331{
309 number_banks = 1+(BANK_MASK >> PAGE_SHIFT); 332 number_banks = counting_one_set(BANK_MASK);
310 number_cachecolors = 1+(CACHE_MASK >> PAGE_SHIFT); 333 number_banks = two_exp(number_banks);
311 used_cachecolor = 16; 334
312 curr_cachecolor = 0; 335 number_cachecolors = counting_one_set(CACHE_MASK);
313 336 number_cachecolors = two_exp(number_cachecolors);
314 337 NUM_PAGE_LIST = number_banks * number_cachecolors;
338 printk(KERN_WARNING "number of banks = %d, number of cachecolors=%d\n", number_banks, number_cachecolors);
339 mutex_init(&void_lockdown_proc);
315} 340}
316 341
317 342
@@ -324,39 +349,274 @@ static int __init init_color_groups(void)
324 unsigned long i; 349 unsigned long i;
325 int err = 0; 350 int err = 0;
326 351
327 color_groups = kmalloc(number_cachecolors * 352 printk("NUM_PAGE_LIST = %d\n", NUM_PAGE_LIST);
328 sizeof(struct color_group), GFP_KERNEL); 353 color_groups = kmalloc(NUM_PAGE_LIST *sizeof(struct color_group), GFP_KERNEL);
354
329 if (!color_groups) { 355 if (!color_groups) {
330// printk(KERN_WARNING "Could not allocate color groups.\n"); 356 printk(KERN_WARNING "Could not allocate color groups.\n");
331 err = -ENOMEM; 357 err = -ENOMEM;
332 }else{ 358 }else{
333 359
334 for (i = 0; i < number_cachecolors; ++i) { 360 for (i = 0; i < NUM_PAGE_LIST; ++i) {
335 cgroup = &color_groups[i]; 361 cgroup = &color_groups[i];
336 atomic_set(&cgroup->nr_pages, 0); 362 atomic_set(&cgroup->nr_pages, 0);
337 INIT_LIST_HEAD(&cgroup->list); 363 INIT_LIST_HEAD(&cgroup->list);
338 spin_lock_init(&cgroup->lock); 364 spin_lock_init(&cgroup->lock);
339 } 365 }
340 } 366 }
341 return err; 367 return err;
368}
369
370int set_partition_handler(struct ctl_table *table, int write, void __user *buffer,
371 size_t *lenp, loff_t *ppos)
372{
373 int ret = 0, i = 0;
374 mutex_lock(&void_lockdown_proc);
375 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
376 if (ret)
377 goto out;
378 if (write) {
379 printk("New set Partition : \n");
380 for(i =0;i <9;i++)
381 {
382 set_index[i] = 0;
383 printk("set[%d] = %x \n", i, set_partition[i]);
384 }
385 }
386out:
387 mutex_unlock(&void_lockdown_proc);
388 return ret;
342} 389}
343 390
391int bank_partition_handler(struct ctl_table *table, int write, void __user *buffer,
392 size_t *lenp, loff_t *ppos)
393{
394 int ret = 0, i = 0;
395 mutex_lock(&void_lockdown_proc);
396 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
397 if (ret)
398 goto out;
399 if (write) {
400 for(i =0;i <9;i++)
401 {
402 bank_index[i] = 0;
403 }
404 }
405out:
406 mutex_unlock(&void_lockdown_proc);
407 return ret;
408}
409
410
411static struct ctl_table cache_table[] =
412{
413
414 {
415 .procname = "C0_LA_set",
416 .mode = 0666,
417 .proc_handler = set_partition_handler,
418 .data = &set_partition[0],
419 .maxlen = sizeof(set_partition[0]),
420 .extra1 = &set_partition_min,
421 .extra2 = &set_partition_max,
422 },
423 {
424 .procname = "C0_LB_set",
425 .mode = 0666,
426 .proc_handler = set_partition_handler,
427 .data = &set_partition[1],
428 .maxlen = sizeof(set_partition[1]),
429 .extra1 = &set_partition_min,
430 .extra2 = &set_partition_max,
431 },
432 {
433 .procname = "C1_LA_set",
434 .mode = 0666,
435 .proc_handler = set_partition_handler,
436 .data = &set_partition[2],
437 .maxlen = sizeof(set_partition[2]),
438 .extra1 = &set_partition_min,
439 .extra2 = &set_partition_max,
440 },
441 {
442 .procname = "C1_LB_set",
443 .mode = 0666,
444 .proc_handler = set_partition_handler,
445 .data = &set_partition[3],
446 .maxlen = sizeof(set_partition[3]),
447 .extra1 = &set_partition_min,
448 .extra2 = &set_partition_max,
449 },
450 {
451 .procname = "C2_LA_set",
452 .mode = 0666,
453 .proc_handler = set_partition_handler,
454 .data = &set_partition[4],
455 .maxlen = sizeof(set_partition[4]),
456 .extra1 = &set_partition_min,
457 .extra2 = &set_partition_max,
458 },
459 {
460 .procname = "C2_LB_set",
461 .mode = 0666,
462 .proc_handler = set_partition_handler,
463 .data = &set_partition[5],
464 .maxlen = sizeof(set_partition[5]),
465 .extra1 = &set_partition_min,
466 .extra2 = &set_partition_max,
467 },
468 {
469 .procname = "C3_LA_set",
470 .mode = 0666,
471 .proc_handler = set_partition_handler,
472 .data = &set_partition[6],
473 .maxlen = sizeof(set_partition[6]),
474 .extra1 = &set_partition_min,
475 .extra2 = &set_partition_max,
476 },
477 {
478 .procname = "C3_LB_set",
479 .mode = 0666,
480 .proc_handler = set_partition_handler,
481 .data = &set_partition[7],
482 .maxlen = sizeof(set_partition[7]),
483 .extra1 = &set_partition_min,
484 .extra2 = &set_partition_max,
485 },
486 {
487 .procname = "Call_LC_set",
488 .mode = 0666,
489 .proc_handler = set_partition_handler,
490 .data = &set_partition[8],
491 .maxlen = sizeof(set_partition[8]),
492 .extra1 = &set_partition_min,
493 .extra2 = &set_partition_max,
494 },
495 {
496 .procname = "C0_LA_bank",
497 .mode = 0666,
498 .proc_handler = bank_partition_handler,
499 .data = &bank_partition[0],
500 .maxlen = sizeof(set_partition[0]),
501 .extra1 = &bank_partition_min,
502 .extra2 = &bank_partition_max,
503 },
504 {
505 .procname = "C0_LB_bank",
506 .mode = 0666,
507 .proc_handler = bank_partition_handler,
508 .data = &bank_partition[1],
509 .maxlen = sizeof(set_partition[1]),
510 .extra1 = &bank_partition_min,
511 .extra2 = &bank_partition_max,
512 },
513 {
514 .procname = "C1_LA_bank",
515 .mode = 0666,
516 .proc_handler = bank_partition_handler,
517 .data = &bank_partition[2],
518 .maxlen = sizeof(set_partition[2]),
519 .extra1 = &bank_partition_min,
520 .extra2 = &bank_partition_max,
521 },
522 {
523 .procname = "C1_LB_bank",
524 .mode = 0666,
525 .proc_handler = bank_partition_handler,
526 .data = &bank_partition[3],
527 .maxlen = sizeof(set_partition[3]),
528 .extra1 = &bank_partition_min,
529 .extra2 = &bank_partition_max,
530 },
531 {
532 .procname = "C2_LA_bank",
533 .mode = 0666,
534 .proc_handler = bank_partition_handler,
535 .data = &bank_partition[4],
536 .maxlen = sizeof(set_partition[4]),
537 .extra1 = &bank_partition_min,
538 .extra2 = &bank_partition_max,
539 },
540 {
541 .procname = "C2_LB_bank",
542 .mode = 0666,
543 .proc_handler = bank_partition_handler,
544 .data = &bank_partition[5],
545 .maxlen = sizeof(set_partition[5]),
546 .extra1 = &bank_partition_min,
547 .extra2 = &bank_partition_max,
548 },
549 {
550 .procname = "C3_LA_bank",
551 .mode = 0666,
552 .proc_handler = bank_partition_handler,
553 .data = &bank_partition[6],
554 .maxlen = sizeof(set_partition[6]),
555 .extra1 = &bank_partition_min,
556 .extra2 = &bank_partition_max,
557 },
558 {
559 .procname = "C3_LB_bank",
560 .mode = 0666,
561 .proc_handler = bank_partition_handler,
562 .data = &bank_partition[7],
563 .maxlen = sizeof(set_partition[7]),
564 .extra1 = &bank_partition_min,
565 .extra2 = &bank_partition_max,
566 },
567 {
568 .procname = "Call_LC_bank",
569 .mode = 0666,
570 .proc_handler = bank_partition_handler,
571 .data = &bank_partition[8],
572 .maxlen = sizeof(set_partition[8]),
573 .extra1 = &bank_partition_min,
574 .extra2 = &bank_partition_max,
575 },
576
577
578 { }
579};
580
581static struct ctl_table litmus_dir_table[] = {
582 {
583 .procname = "litmus",
584 .mode = 0555,
585 .child = cache_table,
586 },
587 { }
588};
589
590
591static struct ctl_table_header *litmus_sysctls;
592
593
344/* 594/*
345 * Initialzie this proc 595 * Initialzie this proc
346 */ 596 */
347static int __init litmus_color_init(void) 597static int __init litmus_color_init(void)
348{ 598{
349 int err=0; 599 int err=0;
350 600 printk("Init bankproc.c\n");
601
351 //INIT_LIST_HEAD(&alloced_pages.list); 602 //INIT_LIST_HEAD(&alloced_pages.list);
352 //spin_lock_init(&alloced_pages.lock); 603 //spin_lock_init(&alloced_pages.lock);
353 init_variables(); 604 init_variables();
354// printk("Cache number = %d , Cache mask = 0x%lx\n", number_cachecolors, CACHE_MASK); 605
355// printk("Bank number = %d , Bank mask = 0x%lx\n", number_banks, BANK_MASK); 606 printk(KERN_INFO "Registering LITMUS^RT proc color sysctl.\n");
607
608 litmus_sysctls = register_sysctl_table(litmus_dir_table);
609 if (!litmus_sysctls) {
610 printk(KERN_WARNING "Could not register LITMUS^RT color sysctl.\n");
611 err = -EFAULT;
612 goto out;
613 }
614
356 init_color_groups(); 615 init_color_groups();
357 do_add_pages(); 616 do_add_pages();
358 617
359// printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n"); 618 printk(KERN_INFO "Registering LITMUS^RT color and bank proc.\n");
619out:
360 return err; 620 return err;
361} 621}
362 622
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 77c609b5a932..035207991732 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -390,7 +390,7 @@ do_return:
390extern int isolate_lru_page(struct page *page); 390extern int isolate_lru_page(struct page *page);
391extern void putback_lru_page(struct page *page); 391extern void putback_lru_page(struct page *page);
392 392
393#if 1 393#if 0
394static struct page *new_alloc_page(struct page *page, unsigned long node, int **x) 394static struct page *new_alloc_page(struct page *page, unsigned long node, int **x)
395{ 395{
396 return alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0); 396 return alloc_pages_exact_node(0, GFP_HIGHUSER_MOVABLE, 0);
@@ -501,7 +501,8 @@ asmlinkage long sys_set_page_color(int cpu)
501 node = 4; 501 node = 4;
502 else 502 else
503 node = cpu; 503 node = cpu;
504 504
505 //node= 0;
505 if (!list_empty(&pagelist)) { 506 if (!list_empty(&pagelist)) {
506 ret = migrate_pages(&pagelist, new_alloc_page, node, MIGRATE_ASYNC, MR_SYSCALL); 507 ret = migrate_pages(&pagelist, new_alloc_page, node, MIGRATE_ASYNC, MR_SYSCALL);
507 TRACE_TASK(current, "%ld pages not migrated.\n", ret); 508 TRACE_TASK(current, "%ld pages not migrated.\n", ret);