aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/cache_proc.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/cache_proc.c')
-rw-r--r--litmus/cache_proc.c257
1 files changed, 255 insertions, 2 deletions
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
index f5879f32232a..85d86c02d6e9 100644
--- a/litmus/cache_proc.c
+++ b/litmus/cache_proc.c
@@ -11,6 +11,7 @@
11#include <litmus/litmus_proc.h> 11#include <litmus/litmus_proc.h>
12#include <litmus/sched_trace.h> 12#include <litmus/sched_trace.h>
13#include <litmus/cache_proc.h> 13#include <litmus/cache_proc.h>
14#include <litmus/mc2_common.h>
14 15
15#include <asm/hardware/cache-l2x0.h> 16#include <asm/hardware/cache-l2x0.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
@@ -144,6 +145,8 @@ static int l1_prefetch_proc;
144static int l2_prefetch_hint_proc; 145static int l2_prefetch_hint_proc;
145static int l2_double_linefill_proc; 146static int l2_double_linefill_proc;
146static int l2_data_prefetch_proc; 147static int l2_data_prefetch_proc;
148static int os_isolation;
149static int use_part;
147 150
148u32 lockdown_reg[9] = { 151u32 lockdown_reg[9] = {
149 0x00000000, 152 0x00000000,
@@ -168,6 +171,7 @@ int lock_all;
168int nr_lockregs; 171int nr_lockregs;
169static raw_spinlock_t cache_lock; 172static raw_spinlock_t cache_lock;
170static raw_spinlock_t prefetch_lock; 173static raw_spinlock_t prefetch_lock;
174static void ***flusher_pages = NULL;
171 175
172extern void l2c310_flush_all(void); 176extern void l2c310_flush_all(void);
173 177
@@ -379,6 +383,79 @@ void cache_lockdown(u32 lock_val, int cpu)
379 //raw_spin_unlock_irqrestore(&cache_lock, flags); 383 //raw_spin_unlock_irqrestore(&cache_lock, flags);
380} 384}
381 385
386void do_partition(enum crit_level lv, int cpu)
387{
388 u32 regs;
389 unsigned long flags;
390
391 if (lock_all || !use_part)
392 return;
393 raw_spin_lock_irqsave(&cache_lock, flags);
394 switch(lv) {
395 case CRIT_LEVEL_A:
396 regs = ~way_partitions[cpu*2];
397 regs &= 0x0000ffff;
398 break;
399 case CRIT_LEVEL_B:
400 regs = ~way_partitions[cpu*2+1];
401 regs &= 0x0000ffff;
402 break;
403 case CRIT_LEVEL_C:
404 case NUM_CRIT_LEVELS:
405 regs = ~way_partitions[8];
406 regs &= 0x0000ffff;
407 break;
408 default:
409 BUG();
410
411 }
412 barrier();
413 cache_lockdown(regs, cpu);
414 barrier();
415
416 raw_spin_unlock_irqrestore(&cache_lock, flags);
417
418 flush_cache(0);
419}
420
421int use_part_proc_handler(struct ctl_table *table, int write, void __user *buffer,
422 size_t *lenp, loff_t *ppos)
423{
424 int ret = 0;
425
426 mutex_lock(&lockdown_proc);
427
428 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
429 if (ret)
430 goto out;
431
432
433 printk("USE_PART HANDLER = %d\n", use_part);
434
435out:
436 mutex_unlock(&lockdown_proc);
437 return ret;
438}
439
440int os_isolation_proc_handler(struct ctl_table *table, int write, void __user *buffer,
441 size_t *lenp, loff_t *ppos)
442{
443 int ret = 0;
444
445 mutex_lock(&lockdown_proc);
446
447 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
448 if (ret)
449 goto out;
450
451
452 printk("OS_ISOLATION HANDLER = %d\n", os_isolation);
453
454out:
455 mutex_unlock(&lockdown_proc);
456 return ret;
457}
458
382int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer, 459int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
383 size_t *lenp, loff_t *ppos) 460 size_t *lenp, loff_t *ppos)
384{ 461{
@@ -429,6 +506,30 @@ out:
429 return ret; 506 return ret;
430} 507}
431 508
509void inline enter_irq_mode(void)
510{
511 int cpu = smp_processor_id();
512
513 if (os_isolation == 0)
514 return;
515
516 prev_lockdown_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
517 prev_lockdown_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
518
519 writel_relaxed(way_partitions[8], ld_i_reg(cpu));
520 writel_relaxed(way_partitions[8], ld_d_reg(cpu));
521}
522
523void inline exit_irq_mode(void)
524{
525 int cpu = smp_processor_id();
526
527 if (os_isolation == 0)
528 return;
529 writel_relaxed(prev_lockdown_i_reg[cpu], ld_i_reg(cpu));
530 writel_relaxed(prev_lockdown_d_reg[cpu], ld_d_reg(cpu));
531}
532
432/* Operate on the Cortex-A9's ACTLR register */ 533/* Operate on the Cortex-A9's ACTLR register */
433#define ACTLR_L2_PREFETCH_HINT (1 << 1) 534#define ACTLR_L2_PREFETCH_HINT (1 << 1)
434#define ACTLR_L1_PREFETCH (1 << 2) 535#define ACTLR_L1_PREFETCH (1 << 2)
@@ -684,6 +785,20 @@ static struct ctl_table cache_table[] =
684 .maxlen = sizeof(l2_data_prefetch_proc), 785 .maxlen = sizeof(l2_data_prefetch_proc),
685 }, 786 },
686 { 787 {
788 .procname = "os_isolation",
789 .mode = 0644,
790 .proc_handler = os_isolation_proc_handler,
791 .data = &os_isolation,
792 .maxlen = sizeof(os_isolation),
793 },
794 {
795 .procname = "use_part",
796 .mode = 0644,
797 .proc_handler = use_part_proc_handler,
798 .data = &use_part,
799 .maxlen = sizeof(use_part),
800 },
801 {
687 .procname = "do_perf_test", 802 .procname = "do_perf_test",
688 .mode = 0644, 803 .mode = 0644,
689 .proc_handler = do_perf_test_proc_handler, 804 .proc_handler = do_perf_test_proc_handler,
@@ -838,8 +953,146 @@ extern void v7_flush_kern_cache_all(void);
838 */ 953 */
839void color_flush_page(void *vaddr, size_t size) 954void color_flush_page(void *vaddr, size_t size)
840{ 955{
841 //v7_flush_kern_dcache_area(vaddr, size); 956 v7_flush_kern_dcache_area(vaddr, size);
842 v7_flush_kern_cache_all(); 957 //v7_flush_kern_cache_all();
958}
959
960extern struct page* get_colored_page(unsigned long color);
961
962int setup_flusher_array(void)
963{
964 int color, way, ret = 0;
965 struct page *page;
966
967 if (flusher_pages != NULL)
968 goto out;
969
970 flusher_pages = (void***) kmalloc(MAX_NR_WAYS
971 * sizeof(*flusher_pages), GFP_KERNEL);
972 if (!flusher_pages) {
973 printk(KERN_WARNING "No memory for flusher array!\n");
974 ret = -EINVAL;
975 goto out;
976 }
977
978 for (way = 0; way < MAX_NR_WAYS; way++) {
979 void **flusher_color_arr;
980 flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
981 * MAX_NR_COLORS, GFP_KERNEL);
982 if (!flusher_color_arr) {
983 printk(KERN_WARNING "No memory for flusher array!\n");
984 ret = -ENOMEM;
985 goto out_free;
986 }
987
988 flusher_pages[way] = flusher_color_arr;
989
990 for (color = 0; color < MAX_NR_COLORS; color++) {
991 int node;
992 switch (color) {
993 case 0:
994 node = 32;
995 break;
996 case 1:
997 node = 33;
998 break;
999 case 2:
1000 node = 50;
1001 break;
1002 case 3:
1003 node = 51;
1004 break;
1005 case 4:
1006 node = 68;
1007 break;
1008 case 5:
1009 node = 69;
1010 break;
1011 case 6:
1012 node = 86;
1013 break;
1014 case 7:
1015 node = 87;
1016 break;
1017 case 8:
1018 node = 88;
1019 break;
1020 case 9:
1021 node = 105;
1022 break;
1023 case 10:
1024 node = 106;
1025 break;
1026 case 11:
1027 node = 107;
1028 break;
1029 case 12:
1030 node = 108;
1031 break;
1032 case 13:
1033 node = 125;
1034 break;
1035 case 14:
1036 node = 126;
1037 break;
1038 case 15:
1039 node = 127;
1040 break;
1041 }
1042 page = get_colored_page(node);
1043 if (!page) {
1044 printk(KERN_WARNING "no more colored pages\n");
1045 ret = -EINVAL;
1046 goto out_free;
1047 }
1048 flusher_pages[way][color] = page_address(page);
1049 if (!flusher_pages[way][color]) {
1050 printk(KERN_WARNING "bad page address\n");
1051 ret = -EINVAL;
1052 goto out_free;
1053 }
1054 }
1055 }
1056out:
1057 return ret;
1058out_free:
1059 for (way = 0; way < MAX_NR_WAYS; way++) {
1060 for (color = 0; color < MAX_NR_COLORS; color++) {
1061 /* not bothering to try and give back colored pages */
1062 }
1063 kfree(flusher_pages[way]);
1064 }
1065 kfree(flusher_pages);
1066 flusher_pages = NULL;
1067 return ret;
1068}
1069
1070void flush_cache(int all)
1071{
1072 int way, color, cpu;
1073 unsigned long flags;
1074
1075 raw_spin_lock_irqsave(&cache_lock, flags);
1076 cpu = raw_smp_processor_id();
1077
1078 prev_lbm_i_reg[cpu] = readl_relaxed(ld_i_reg(cpu));
1079 prev_lbm_d_reg[cpu] = readl_relaxed(ld_d_reg(cpu));
1080 for (way=0;way<MAX_NR_WAYS;way++) {
1081 if (( (0x00000001 << way) & (prev_lbm_d_reg[cpu]) ) &&
1082 !all)
1083 continue;
1084 for (color=0;color<MAX_NR_COLORS;color++) {
1085 void *vaddr = flusher_pages[way][color];
1086 u32 lvalue = unlocked_way[way];
1087 color_read_in_mem_lock(lvalue, LOCK_ALL,
1088 vaddr, vaddr + PAGE_SIZE);
1089 }
1090
1091 }
1092
1093 writel_relaxed(prev_lbm_i_reg[cpu], ld_i_reg(cpu));
1094 writel_relaxed(prev_lbm_d_reg[cpu], ld_d_reg(cpu));
1095 raw_spin_unlock_irqrestore(&cache_lock, flags);
843} 1096}
844 1097
845#define TRIALS 1000 1098#define TRIALS 1000