diff options
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/Makefile | 3 | ||||
-rw-r--r-- | litmus/color_proc.c | 5 | ||||
-rw-r--r-- | litmus/lockdown.c | 228 |
3 files changed, 235 insertions, 1 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index 76a07e8531c6..be5af62431bd 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -28,7 +28,8 @@ obj-y = sched_plugin.o litmus.o \ | |||
28 | sched_pfp.o \ | 28 | sched_pfp.o \ |
29 | sched_psn_edf.o \ | 29 | sched_psn_edf.o \ |
30 | srp.o \ | 30 | srp.o \ |
31 | sync.o | 31 | sync.o \ |
32 | lockdown.o | ||
32 | 33 | ||
33 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 34 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
34 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 35 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
diff --git a/litmus/color_proc.c b/litmus/color_proc.c index 8ee624860d71..90d89da0521b 100644 --- a/litmus/color_proc.c +++ b/litmus/color_proc.c | |||
@@ -104,6 +104,11 @@ static struct ctl_table color_table[] = | |||
104 | .mode = 0555, | 104 | .mode = 0555, |
105 | .child = cache_table, | 105 | .child = cache_table, |
106 | }, | 106 | }, |
107 | { | ||
108 | .procname = "prefetch_test", | ||
109 | .mode = 0644, | ||
110 | .proc_handler = litmus_test_prefetch, | ||
111 | }, | ||
107 | { } | 112 | { } |
108 | }; | 113 | }; |
109 | 114 | ||
diff --git a/litmus/lockdown.c b/litmus/lockdown.c new file mode 100644 index 000000000000..b8c25006bc4f --- /dev/null +++ b/litmus/lockdown.c | |||
@@ -0,0 +1,228 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/io.h> | ||
5 | #include <linux/spinlock.h> | ||
6 | |||
7 | #include <linux/smp.h> /* smp_call_func */ | ||
8 | #include <asm/processor.h> /* cpu relax */ | ||
9 | |||
10 | #include <asm/hardware/cache-l2x0.h> | ||
11 | #include <asm/cacheflush.h> | ||
12 | |||
13 | #include <litmus/color.h> | ||
14 | |||
15 | static void __iomem *cache_base; | ||
16 | static void __iomem *lockreg_d; | ||
17 | static void __iomem *lockreg_i; | ||
18 | static raw_spinlock_t prefetch_lock; | ||
19 | |||
20 | static u32 cache_id; | ||
21 | static int nr_lockregs; | ||
22 | |||
23 | #define ld_d_reg(cpu) ({ int __cpu = cpu; \ | ||
24 | void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \ | ||
25 | __cpu * L2X0_LOCKDOWN_STRIDE; __v; }) | ||
26 | #define ld_i_reg(cpu) ({ int __cpu = cpu; \ | ||
27 | void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \ | ||
28 | __cpu * L2X0_LOCKDOWN_STRIDE; __v; }) | ||
29 | |||
30 | #define MAX_NR_WAYS 16 | ||
31 | #define UNLOCK_ALL 0x00000000 /* allocation in any way */ | ||
32 | |||
33 | /* | ||
34 | * Prefetch by reading the first word of each cache line in a page. | ||
35 | * | ||
36 | * @lockdown_reg: address of the lockdown register to write | ||
37 | * @lock_val: value to be written to @lockdown_reg | ||
38 | * @unlock_val: will unlock the cache to this value | ||
39 | * @addr: start address to be prefetched | ||
40 | * @end_addr: end address to prefetch (exclusive) | ||
41 | * | ||
42 | * Assumes: addr < end_addr AND addr != end_addr | ||
43 | */ | ||
44 | static u32 read_in_page(u32 lock_val, | ||
45 | u32 unlock_val, | ||
46 | void *start, | ||
47 | void *end) | ||
48 | { | ||
49 | unsigned long flags; | ||
50 | u32 v = 0, tmp = 0; | ||
51 | |||
52 | raw_spin_lock_irqsave(&prefetch_lock, flags); | ||
53 | __asm__ __volatile__ ( | ||
54 | " .align 5\n" | ||
55 | " str %[lockval], [%[cachereg]]\n" | ||
56 | "1: ldr %[tmp], [%[addr]], #32 @ 32 bytes = 1 cache line\n" | ||
57 | " add %[val], %[val], %[tmp]\n" | ||
58 | " cmp %[addr], %[end]\n" | ||
59 | " bne 1b\n" | ||
60 | " str %[unlockval], [%[cachereg]]\n" | ||
61 | : [addr] "+r" (start), | ||
62 | [val] "+r" (v), | ||
63 | [tmp] "+r" (tmp) | ||
64 | : [end] "r" (end), | ||
65 | [cachereg] "r" (lockreg_d), | ||
66 | [lockval] "r" (lock_val), | ||
67 | [unlockval] "r" (unlock_val) | ||
68 | : "cc"); | ||
69 | raw_spin_unlock_irqrestore(&prefetch_lock, flags); | ||
70 | |||
71 | return v; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * unlocked[i] : allocation can occur in way i | ||
76 | * | ||
77 | * 0 = allocation can occur in the corresponding way | ||
78 | * 1 = allocation cannot occur in the corresponding way | ||
79 | */ | ||
80 | static u32 unlocked[MAX_NR_WAYS] = { | ||
81 | 0xFFFFFFFE, /* way 0 unlocked */ | ||
82 | 0xFFFFFFFD, | ||
83 | 0xFFFFFFFB, | ||
84 | 0xFFFFFFF7, | ||
85 | 0xFFFFFFEF, /* way 4 unlocked */ | ||
86 | 0xFFFFFFDF, | ||
87 | 0xFFFFFFBF, | ||
88 | 0xFFFFFF7F, | ||
89 | 0xFFFFFEFF, /* way 8 unlocked */ | ||
90 | 0xFFFFFDFF, | ||
91 | 0xFFFFFBFF, | ||
92 | 0xFFFFF7FF, | ||
93 | 0xFFFFEFFF, /* way 12 unlocked */ | ||
94 | 0xFFFFDFFF, | ||
95 | 0xFFFFBFFF, | ||
96 | 0xFFFF7FFF, | ||
97 | }; | ||
98 | |||
99 | static void dump_lockdown_registers(void) | ||
100 | { | ||
101 | int i; | ||
102 | |||
103 | for (i = 0; i < nr_lockregs; i++) { | ||
104 | printk("Lockdown Data CPU %2d: 0x%8x\n", | ||
105 | i, readl_relaxed(ld_d_reg(i))); | ||
106 | printk("Lockdown Inst CPU %2d: 0x%8x\n", | ||
107 | i, readl_relaxed(ld_d_reg(i))); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | static void test_lockdown(void *ignore) | ||
112 | { | ||
113 | int i; | ||
114 | |||
115 | printk("Start lockdown test on CPU %d.\n", smp_processor_id()); | ||
116 | |||
117 | for (i = 0; i < nr_lockregs; i++) { | ||
118 | printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i)); printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i)); } | ||
119 | |||
120 | printk("Lockdown initial state:\n"); | ||
121 | dump_lockdown_registers(); | ||
122 | printk("---\n"); | ||
123 | |||
124 | for (i = 0; i < nr_lockregs; i++) { | ||
125 | writel_relaxed(1, ld_d_reg(i)); | ||
126 | writel_relaxed(2, ld_i_reg(i)); | ||
127 | } | ||
128 | printk("Lockdown all data=1 instr=2:\n"); | ||
129 | dump_lockdown_registers(); | ||
130 | printk("---\n"); | ||
131 | |||
132 | for (i = 0; i < nr_lockregs; i++) { | ||
133 | writel_relaxed((1 << i), ld_d_reg(i)); | ||
134 | writel_relaxed(((1 << 8) >> i), ld_i_reg(i)); | ||
135 | } | ||
136 | printk("Lockdown varies:\n"); | ||
137 | dump_lockdown_registers(); | ||
138 | printk("---\n"); | ||
139 | |||
140 | for (i = 0; i < nr_lockregs; i++) { | ||
141 | writel_relaxed(0, ld_d_reg(i)); | ||
142 | writel_relaxed(0, ld_i_reg(i)); | ||
143 | } | ||
144 | printk("Lockdown all zero:\n"); | ||
145 | dump_lockdown_registers(); | ||
146 | |||
147 | /* Checks that the unlocked array is set up correctly. */ | ||
148 | for (i = 0; i < MAX_NR_WAYS; i++) { | ||
149 | unsigned long expected = 0xFFFFFFFF; | ||
150 | clear_bit(i, &expected); | ||
151 | if (expected != unlocked[i]) { | ||
152 | WARN(1, "Unlock %2d: expected 0x%8x but got 0x%8x\n", | ||
153 | i, ((u32)expected), unlocked[i]); | ||
154 | } | ||
155 | } | ||
156 | |||
157 | printk("End lockdown test.\n"); | ||
158 | } | ||
159 | |||
160 | #define LOCKREG_TEST_VAL 0x00000002 | ||
161 | #define PREFETCH_SUM_TEST_VAL 65024 /* = 0 + 8 + ... + 127 */ | ||
162 | int litmus_test_prefetch(struct ctl_table *table, int write, void __user *buffer, | ||
163 | size_t *lenp, loff_t *ppos) | ||
164 | { | ||
165 | struct page *page; | ||
166 | void *vaddr; | ||
167 | u32 *data; | ||
168 | u32 sum = 0; | ||
169 | int i; | ||
170 | |||
171 | if (!write) | ||
172 | return 0; | ||
173 | |||
174 | page = alloc_page(GFP_HIGHUSER | __GFP_MOVABLE); | ||
175 | if (!page) { | ||
176 | printk(KERN_WARNING "No memory\n"); | ||
177 | return -ENOMEM; | ||
178 | } | ||
179 | |||
180 | vaddr = phys_to_virt(page_to_phys(page)); | ||
181 | data = (u32*)vaddr; | ||
182 | |||
183 | for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) | ||
184 | data[i] = i; | ||
185 | |||
186 | sum = read_in_page(UNLOCK_ALL, LOCKREG_TEST_VAL, vaddr, vaddr + PAGE_SIZE); | ||
187 | if (PREFETCH_SUM_TEST_VAL != sum) { | ||
188 | printk("%s: Expected sum %u but got %u!\n", __FUNCTION__, | ||
189 | PREFETCH_SUM_TEST_VAL, sum); | ||
190 | } else { | ||
191 | printk("%s: Prefetch test passed.\n", __FUNCTION__); | ||
192 | } | ||
193 | |||
194 | if (LOCKREG_TEST_VAL != readl_relaxed(lockreg_d)) { | ||
195 | printk("%s: Expected lockreg value 0x%8x but got 0x%8x!\n", | ||
196 | __FUNCTION__, LOCKREG_TEST_VAL, | ||
197 | readl_relaxed(lockreg_d)); | ||
198 | } else { | ||
199 | printk("%s: Lockdown state after prefetch test passed.\n", | ||
200 | __FUNCTION__); | ||
201 | } | ||
202 | |||
203 | writel_relaxed(UNLOCK_ALL, ld_d_reg(i)); | ||
204 | |||
205 | free_page(page_to_pfn(page)); | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | void litmus_setup_lockdown(void __iomem *base, u32 id) | ||
211 | { | ||
212 | cache_base = base; | ||
213 | cache_id = id; | ||
214 | lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE; | ||
215 | lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE; | ||
216 | |||
217 | |||
218 | if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) { | ||
219 | nr_lockregs = 8; | ||
220 | } else { | ||
221 | printk("Unknown cache ID!\n"); | ||
222 | nr_lockregs = 1; | ||
223 | } | ||
224 | |||
225 | raw_spin_lock_init(&prefetch_lock); | ||
226 | |||
227 | test_lockdown(NULL); | ||
228 | } | ||