aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNamhoon Kim <namhoonk@cs.unc.edu>2015-04-09 23:27:52 -0400
committerNamhoon Kim <namhoonk@cs.unc.edu>2015-04-09 23:27:52 -0400
commite5d0df8359d1a297b4ffb59ebae18df63d7dab4f (patch)
tree5f60f0652bef05f51de5bf61778ba81ede89d439
parentc3079b56cfd3b62c08e02684bee671d2361ad9c9 (diff)
fix
-rw-r--r--litmus/cache_proc.c120
-rw-r--r--litmus/litmus.c14
-rw-r--r--litmus/sched_mc2.c14
3 files changed, 66 insertions, 82 deletions
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index 4f7fc0043b01..68b451deede1 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -192,24 +192,24 @@ static inline void cache_sync(void)
192 cache_wait(base + L2X0_CACHE_SYNC, 1); 192 cache_wait(base + L2X0_CACHE_SYNC, 1);
193} 193}
194 194
195static void print_lockdown_registers(void) 195static void print_lockdown_registers(int cpu)
196{ 196{
197 int i; 197 int i;
198
199 //for (i = 0; i < nr_lockregs; i++) { 198 //for (i = 0; i < nr_lockregs; i++) {
200 for (i = 0; i < 4; i++) { 199 for (i = 0; i < 4; i++) {
201 printk("Lockdown Data CPU %2d: 0x%04x\n", 200 printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
202 i, readl_relaxed(ld_d_reg(i))); 201 i, readl_relaxed(ld_d_reg(i)));
203 printk("Lockdown Inst CPU %2d: 0x%04x\n", 202 printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
204 i, readl_relaxed(ld_i_reg(i))); 203 i, readl_relaxed(ld_i_reg(i)));
205 } 204 }
206} 205}
207 206
208static void test_lockdown(void *ignore) 207static void test_lockdown(void *ignore)
209{ 208{
210 int i; 209 int i, cpu;
211 210
212 printk("Start lockdown test on CPU %d.\n", smp_processor_id()); 211 cpu = smp_processor_id();
212 printk("Start lockdown test on CPU %d.\n", cpu);
213 213
214 for (i = 0; i < nr_lockregs; i++) { 214 for (i = 0; i < nr_lockregs; i++) {
215 printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i)); 215 printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
@@ -217,7 +217,7 @@ static void test_lockdown(void *ignore)
217 } 217 }
218 218
219 printk("Lockdown initial state:\n"); 219 printk("Lockdown initial state:\n");
220 print_lockdown_registers(); 220 print_lockdown_registers(cpu);
221 printk("---\n"); 221 printk("---\n");
222 222
223 for (i = 0; i < nr_lockregs; i++) { 223 for (i = 0; i < nr_lockregs; i++) {
@@ -225,7 +225,7 @@ static void test_lockdown(void *ignore)
225 writel_relaxed(2, ld_i_reg(i)); 225 writel_relaxed(2, ld_i_reg(i));
226 } 226 }
227 printk("Lockdown all data=1 instr=2:\n"); 227 printk("Lockdown all data=1 instr=2:\n");
228 print_lockdown_registers(); 228 print_lockdown_registers(cpu);
229 printk("---\n"); 229 printk("---\n");
230 230
231 for (i = 0; i < nr_lockregs; i++) { 231 for (i = 0; i < nr_lockregs; i++) {
@@ -233,7 +233,7 @@ static void test_lockdown(void *ignore)
233 writel_relaxed(((1 << 8) >> i), ld_i_reg(i)); 233 writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
234 } 234 }
235 printk("Lockdown varies:\n"); 235 printk("Lockdown varies:\n");
236 print_lockdown_registers(); 236 print_lockdown_registers(cpu);
237 printk("---\n"); 237 printk("---\n");
238 238
239 for (i = 0; i < nr_lockregs; i++) { 239 for (i = 0; i < nr_lockregs; i++) {
@@ -241,7 +241,7 @@ static void test_lockdown(void *ignore)
241 writel_relaxed(UNLOCK_ALL, ld_i_reg(i)); 241 writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
242 } 242 }
243 printk("Lockdown all zero:\n"); 243 printk("Lockdown all zero:\n");
244 print_lockdown_registers(); 244 print_lockdown_registers(cpu);
245 245
246 printk("End lockdown test.\n"); 246 printk("End lockdown test.\n");
247} 247}
@@ -273,12 +273,14 @@ int way_partition_handler(struct ctl_table *table, int write, void __user *buffe
273 size_t *lenp, loff_t *ppos) 273 size_t *lenp, loff_t *ppos)
274{ 274{
275 int ret = 0, i; 275 int ret = 0, i;
276 unsigned long flags;
276 277
277 mutex_lock(&lockdown_proc); 278 mutex_lock(&lockdown_proc);
278 279
279 //flush_cache_all(); 280 //flush_cache_all();
280 //cache_sync(); 281 //cache_sync();
281 l2x0_flush_all(); 282 //l2x0_flush_all();
283 flush_cache();
282 284
283 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 285 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
284 if (ret) 286 if (ret)
@@ -296,8 +298,10 @@ int way_partition_handler(struct ctl_table *table, int write, void __user *buffe
296 i * L2X0_LOCKDOWN_STRIDE); 298 i * L2X0_LOCKDOWN_STRIDE);
297 } 299 }
298 } 300 }
299 print_lockdown_registers(); 301
300 302 local_irq_save(flags);
303 print_lockdown_registers(smp_processor_id());
304 local_irq_restore(flags);
301out: 305out:
302 mutex_unlock(&lockdown_proc); 306 mutex_unlock(&lockdown_proc);
303 return ret; 307 return ret;
@@ -307,6 +311,7 @@ int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
307 size_t *lenp, loff_t *ppos) 311 size_t *lenp, loff_t *ppos)
308{ 312{
309 int ret = 0, i; 313 int ret = 0, i;
314 unsigned long flags;
310 315
311 mutex_lock(&lockdown_proc); 316 mutex_lock(&lockdown_proc);
312 317
@@ -355,8 +360,9 @@ int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
355*/ 360*/
356 } 361 }
357 printk("LOCK_ALL HANDLER\n"); 362 printk("LOCK_ALL HANDLER\n");
358 print_lockdown_registers(); 363 local_irq_save(flags);
359 364 print_lockdown_registers(smp_processor_id());
365 local_irq_restore(flags);
360out: 366out:
361 mutex_unlock(&lockdown_proc); 367 mutex_unlock(&lockdown_proc);
362 return ret; 368 return ret;
@@ -383,27 +389,28 @@ void mem_lock(u32 lock_val, int cpu)
383void do_partition(enum crit_level lv, int cpu) 389void do_partition(enum crit_level lv, int cpu)
384{ 390{
385 u32 regs; 391 u32 regs;
386 //unsigned long flags; 392 unsigned long flags;
387 393
388 if (lock_all || !use_part) 394 if (lock_all || !use_part)
389 return; 395 return;
396 raw_spin_lock_irqsave(&cache_lock, flags);
390 switch(lv) { 397 switch(lv) {
391 case CRIT_LEVEL_A: 398 case CRIT_LEVEL_A:
392 regs = ~way_partitions[cpu*2]; 399 regs = ~way_partitions[cpu*2];
393 regs |= 0xffff0000; 400 //regs |= 0xffff0000;
394 writel_relaxed(regs, ld_d_reg(cpu)); 401 writel_relaxed(regs, ld_d_reg(cpu));
395 writel_relaxed(regs, ld_i_reg(cpu)); 402 writel_relaxed(regs, ld_i_reg(cpu));
396 break; 403 break;
397 case CRIT_LEVEL_B: 404 case CRIT_LEVEL_B:
398 regs = ~way_partitions[cpu*2+1]; 405 regs = ~way_partitions[cpu*2+1];
399 regs |= 0xffff0000; 406 //regs |= 0xffff0000;
400 writel_relaxed(regs, ld_d_reg(cpu)); 407 writel_relaxed(regs, ld_d_reg(cpu));
401 writel_relaxed(regs, ld_i_reg(cpu)); 408 writel_relaxed(regs, ld_i_reg(cpu));
402 break; 409 break;
403 case CRIT_LEVEL_C: 410 case CRIT_LEVEL_C:
404 case NUM_CRIT_LEVELS: 411 case NUM_CRIT_LEVELS:
405 regs = ~way_partitions[8]; 412 regs = ~way_partitions[8];
406 regs |= 0xffff0000; 413 //regs |= 0xffff0000;
407 writel_relaxed(regs, ld_d_reg(cpu)); 414 writel_relaxed(regs, ld_d_reg(cpu));
408 writel_relaxed(regs, ld_i_reg(cpu)); 415 writel_relaxed(regs, ld_i_reg(cpu));
409 break; 416 break;
@@ -411,33 +418,16 @@ void do_partition(enum crit_level lv, int cpu)
411 BUG(); 418 BUG();
412 419
413 } 420 }
421/*
422 printk(KERN_INFO "P%d lockdown on P%d\n", smp_processor_id(), cpu);
423 printk(KERN_INFO "CRIT_LEVEL %d\n", lv);
424 print_lockdown_registers(smp_processor_id());
425*/
426 raw_spin_unlock_irqrestore(&cache_lock, flags);
414 //cache_sync(); 427 //cache_sync();
415// barrier(); 428// barrier();
416// mem_lock(regs, cpu); 429// mem_lock(regs, cpu);
417// barrier(); 430// barrier();
418 //print_lockdown_registers();
419/*
420 if (use_set_partition == 1 && use_way_partition == 1)
421 printk(KERN_ALERT "BOTH SET, WAY ARE SET!!!!\n");
422
423 if (use_way_partition == 1) {
424 if (lv < CRIT_LEVEL_C) {
425 writel_relaxed(way_partitions[cpu], ld_d_reg(cpu));
426 writel_relaxed(way_partitions[cpu], ld_i_reg(cpu));
427 } else {
428 writel_relaxed(way_partitions[4], ld_d_reg(cpu));
429 writel_relaxed(way_partitions[4], ld_i_reg(cpu));
430 }
431 } else if (use_set_partition == 1) {
432 if (lv < CRIT_LEVEL_C) {
433 writel_relaxed(set_partitions[0], ld_d_reg(cpu));
434 writel_relaxed(set_partitions[0], ld_i_reg(cpu));
435 } else {
436 writel_relaxed(set_partitions[1], ld_d_reg(cpu));
437 writel_relaxed(set_partitions[1], ld_i_reg(cpu));
438 }
439 }
440*/
441} 431}
442 432
443int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer, 433int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
@@ -491,21 +481,21 @@ void inline enter_irq_mode(void)
491{ 481{
492 int cpu = smp_processor_id(); 482 int cpu = smp_processor_id();
493 483
494 return; 484 //return;
495 prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu)); 485 prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
496 prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu)); 486 prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
497 487
498 if (os_isolation == 0) 488 if (os_isolation == 0)
499 return; 489 return;
500 490
501 writel_relaxed(prev_lockdown_i_reg[4], ld_i_reg(cpu)); 491 writel_relaxed(way_partitions[8], ld_i_reg(cpu));
502 writel_relaxed(prev_lockdown_d_reg[4], ld_d_reg(cpu)); 492 writel_relaxed(way_partitions[8], ld_d_reg(cpu));
503} 493}
504 494
505void inline exit_irq_mode(void) 495void inline exit_irq_mode(void)
506{ 496{
507 int cpu = smp_processor_id(); 497 int cpu = smp_processor_id();
508 return; 498 //return;
509 if (os_isolation == 0) 499 if (os_isolation == 0)
510 return; 500 return;
511 501
@@ -1011,26 +1001,19 @@ out_free:
1011 1001
1012void flush_cache(void) 1002void flush_cache(void)
1013{ 1003{
1014/* int *dummy; 1004 int way, color, cpu;
1015 1005 unsigned long flags;
1016 flush_cache_all();
1017 int size = 128, i, t = 0;
1018
1019 dummy = kmalloc(PAGE_SIZE*size, GFP_KERNEL);
1020 for (i = 0; i<PAGE_SIZE*size/sizeof(int); i++) {
1021 dummy[i] = t++;
1022 }
1023
1024 kfree(dummy);
1025*/
1026 int way, color, i;
1027 1006
1028 for (i = 0; i < nr_lockregs; i++) { 1007 raw_spin_lock_irqsave(&cache_lock, flags);
1029 prev_lbm_i_reg[i] = readl_relaxed(ld_i_reg(i)); 1008 cpu = raw_smp_processor_id();
1030 prev_lbm_d_reg[i] = readl_relaxed(ld_d_reg(i));
1031 }
1032 1009
1010 prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
1011 prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
1012 //printk("P%d reg value = 0x%04x\n", cpu, prev_lbm_d_reg[cpu]);
1033 for (way=0;way<MAX_NR_WAYS;way++) { 1013 for (way=0;way<MAX_NR_WAYS;way++) {
1014 if ( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) )
1015 continue;
1016 //printk("P%d flushes way #%d\n", cpu, way);
1034 for (color=0;color<MAX_NR_COLORS;color++) { 1017 for (color=0;color<MAX_NR_COLORS;color++) {
1035 void *vaddr = flusher_pages[way][color]; 1018 void *vaddr = flusher_pages[way][color];
1036 u32 lvalue = unlocked_way[way]; 1019 u32 lvalue = unlocked_way[way];
@@ -1040,10 +1023,9 @@ void flush_cache(void)
1040 1023
1041 } 1024 }
1042 1025
1043 for (i = 0; i < nr_lockregs; i++) { 1026 writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
1044 writel_relaxed(prev_lbm_i_reg[i], ld_i_reg(i)); 1027 writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
1045 writel_relaxed(prev_lbm_d_reg[i], ld_d_reg(i)); 1028 raw_spin_unlock_irqrestore(&cache_lock, flags);
1046 }
1047} 1029}
1048 1030
1049#define TRIALS 1000 1031#define TRIALS 1000
@@ -1148,8 +1130,6 @@ static int __init litmus_sysctl_init(void)
1148 goto out; 1130 goto out;
1149 } 1131 }
1150 1132
1151 //setup_flusher_array();
1152 printk(KERN_INFO "Setup flush_array.\n");
1153 way_partition_min = 0x00000000; 1133 way_partition_min = 0x00000000;
1154 way_partition_max = 0x0000FFFF; 1134 way_partition_max = 0x0000FFFF;
1155 os_isolation = 0; 1135 os_isolation = 0;
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 70342e717fe0..569290545480 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -27,6 +27,7 @@
27#include <litmus/litmus_proc.h> 27#include <litmus/litmus_proc.h>
28#include <litmus/clock.h> 28#include <litmus/clock.h>
29#include <litmus/cache_proc.h> 29#include <litmus/cache_proc.h>
30#include <litmus/mc2_common.h>
30 31
31#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
32 33
@@ -394,6 +395,8 @@ extern struct page *new_alloc_page(struct page *page, unsigned long node, int **
394 395
395#endif 396#endif
396 397
398//static raw_spinlock_t migrate_lock;
399
397asmlinkage long sys_set_page_color(int cpu) 400asmlinkage long sys_set_page_color(int cpu)
398{ 401{
399 long ret = 0; 402 long ret = 0;
@@ -401,10 +404,12 @@ asmlinkage long sys_set_page_color(int cpu)
401 struct vm_area_struct *vma_itr = NULL; 404 struct vm_area_struct *vma_itr = NULL;
402 int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0; 405 int nr_pages = 0, nr_shared_pages = 0, nr_failed = 0;
403 unsigned long node; 406 unsigned long node;
407 enum crit_level lv;
404 408
405 LIST_HEAD(pagelist); 409 LIST_HEAD(pagelist);
406 LIST_HEAD(shared_pagelist); 410 LIST_HEAD(shared_pagelist);
407 411
412
408 down_read(&current->mm->mmap_sem); 413 down_read(&current->mm->mmap_sem);
409 TRACE_TASK(current, "SYSCALL set_page_color\n"); 414 TRACE_TASK(current, "SYSCALL set_page_color\n");
410 vma_itr = current->mm->mmap; 415 vma_itr = current->mm->mmap;
@@ -466,10 +471,11 @@ asmlinkage long sys_set_page_color(int cpu)
466// } 471// }
467 472
468 ret = 0; 473 ret = 0;
474 lv = tsk_rt(current)->mc2_data->crit;
469 if (cpu == -1) 475 if (cpu == -1)
470 node = 8; 476 node = 8;
471 else 477 else
472 node = cpu; 478 node = cpu*2 + lv;
473 479
474 //node= 0; 480 //node= 0;
475 if (!list_empty(&pagelist)) { 481 if (!list_empty(&pagelist)) {
@@ -492,13 +498,13 @@ asmlinkage long sys_set_page_color(int cpu)
492 } 498 }
493 499
494 up_read(&current->mm->mmap_sem); 500 up_read(&current->mm->mmap_sem);
495 501
496 list_for_each_entry(page_itr, &shared_pagelist, lru) { 502 list_for_each_entry(page_itr, &shared_pagelist, lru) {
497 TRACE("S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr)); 503 TRACE("S Anon=%d, pfn = %lu, _mapcount = %d, _count = %d\n", PageAnon(page_itr), __page_to_pfn(page_itr), page_mapcount(page_itr), page_count(page_itr));
498 } 504 }
499 505
500 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed); 506 TRACE_TASK(current, "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed);
501 printk(KERN_INFO "nr_pages = %d nr_failed = %d\n", nr_pages, nr_failed); 507 printk(KERN_INFO "node = %ld, nr_pages = %d, nr_failed = %d\n", node, nr_pages, nr_failed);
502 flush_cache(); 508 flush_cache();
503 509
504 return ret; 510 return ret;
@@ -911,6 +917,8 @@ static int __init _init_litmus(void)
911 //litmus_pmu_register(); 917 //litmus_pmu_register();
912 color_mask = ((cache_info_sets << line_size_log) - 1) ^ (PAGE_SIZE - 1); 918 color_mask = ((cache_info_sets << line_size_log) - 1) ^ (PAGE_SIZE - 1);
913 printk("Page color mask %lx\n", color_mask); 919 printk("Page color mask %lx\n", color_mask);
920
921 //raw_spin_lock_init(&migrate_lock);
914 return 0; 922 return 0;
915} 923}
916 924
diff --git a/litmus/sched_mc2.c b/litmus/sched_mc2.c
index 1e39362547c0..e59030f298ca 100644
--- a/litmus/sched_mc2.c
+++ b/litmus/sched_mc2.c
@@ -630,7 +630,6 @@ struct task_struct* mc2_dispatch(struct sup_reservation_environment* sup_env, st
630 return NULL; 630 return NULL;
631} 631}
632 632
633/* not used now */
634static inline void pre_schedule(struct task_struct *prev, int cpu) 633static inline void pre_schedule(struct task_struct *prev, int cpu)
635{ 634{
636 if (!prev || !is_realtime(prev)) 635 if (!prev || !is_realtime(prev))
@@ -639,16 +638,15 @@ static inline void pre_schedule(struct task_struct *prev, int cpu)
639 do_partition(CRIT_LEVEL_C, cpu); 638 do_partition(CRIT_LEVEL_C, cpu);
640} 639}
641 640
642/* not used now */
643static inline void post_schedule(struct task_struct *next, int cpu) 641static inline void post_schedule(struct task_struct *next, int cpu)
644{ 642{
645 enum crit_level lev; 643 enum crit_level lev;
646 if (!next) // || !is_realtime(next)) 644 if ((!next) || !is_realtime(next))
647 return; 645 return;
648 if (!is_realtime(next)) 646/* if (!is_realtime(next))
649 lev = NUM_CRIT_LEVELS; 647 lev = NUM_CRIT_LEVELS;
650 else 648 else */
651 lev = get_task_crit_level(next); 649 lev = get_task_crit_level(next);
652 do_partition(lev, cpu); 650 do_partition(lev, cpu);
653} 651}
654 652
@@ -660,7 +658,7 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
660 lt_t now; 658 lt_t now;
661 struct mc2_cpu_state *state = local_cpu_state(); 659 struct mc2_cpu_state *state = local_cpu_state();
662 660
663 //pre_schedule(prev, state->cpu); 661 pre_schedule(prev, state->cpu);
664 662
665 raw_spin_lock(&_global_env.lock); 663 raw_spin_lock(&_global_env.lock);
666 raw_spin_lock(&state->lock); 664 raw_spin_lock(&state->lock);
@@ -705,8 +703,6 @@ static struct task_struct* mc2_schedule(struct task_struct * prev)
705 /* NOTE: drops state->lock */ 703 /* NOTE: drops state->lock */
706 mc2_update_timer_and_unlock(state); 704 mc2_update_timer_and_unlock(state);
707 705
708
709
710 if (prev != state->scheduled && is_realtime(prev)) { 706 if (prev != state->scheduled && is_realtime(prev)) {
711 struct mc2_task_state* tinfo = get_mc2_state(prev); 707 struct mc2_task_state* tinfo = get_mc2_state(prev);
712 struct reservation* res = tinfo->res_info.client.reservation; 708 struct reservation* res = tinfo->res_info.client.reservation;