aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-10-12 01:41:18 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-10-12 15:11:38 -0400
commita15b72cc9e991b37b6929c5c6e0ee014fb542ecd (patch)
tree50b8a273c4a5dfe64be87ff27576f180bd5b22e0
parent12826089e20a61cc5afe2bf1108f561952ec7f9a (diff)
Basic lockdown functions like a page prefetching under lock method.
Conflicts: arch/arm/mm/cache-l2x0.c Signed-off-by: Christopher Kenna <cjk@cs.unc.edu>
-rw-r--r--arch/arm/mm/cache-l2x0.c28
-rw-r--r--include/litmus/color.h5
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/color_proc.c5
-rw-r--r--litmus/lockdown.c228
5 files changed, 256 insertions, 13 deletions
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 4f98e8e07835..7783b715f60c 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -31,6 +31,9 @@ static void __iomem *l2x0_base;
31static DEFINE_SPINLOCK(l2x0_lock); 31static DEFINE_SPINLOCK(l2x0_lock);
32static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 32static uint32_t l2x0_way_mask; /* Bitmask of active ways */
33static uint32_t l2x0_size; 33static uint32_t l2x0_size;
34static u32 l2x0_cache_id;
35static unsigned int l2x0_sets;
36static unsigned int l2x0_ways;
34 37
35static inline void cache_wait_way(void __iomem *reg, unsigned long mask) 38static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
36{ 39{
@@ -301,47 +304,46 @@ static void __init l2x0_unlock(__u32 cache_id)
301void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) 304void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
302{ 305{
303 __u32 aux; 306 __u32 aux;
304 __u32 cache_id;
305 __u32 way_size = 0; 307 __u32 way_size = 0;
306 int ways;
307 const char *type; 308 const char *type;
308 309
309 l2x0_base = base; 310 l2x0_base = base;
310 311
311 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); 312 l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
312 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 313 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
313 314
314 aux &= aux_mask; 315 aux &= aux_mask;
315 aux |= aux_val; 316 aux |= aux_val;
316 317
317 /* Determine the number of ways */ 318 /* Determine the number of ways */
318 switch (cache_id & L2X0_CACHE_ID_PART_MASK) { 319 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
319 case L2X0_CACHE_ID_PART_L310: 320 case L2X0_CACHE_ID_PART_L310:
320 if (aux & (1 << 16)) 321 if (aux & (1 << 16))
321 ways = 16; 322 l2x0_ways = 16;
322 else 323 else
323 ways = 8; 324 l2x0_ways = 8;
324 type = "L310"; 325 type = "L310";
325 break; 326 break;
326 case L2X0_CACHE_ID_PART_L210: 327 case L2X0_CACHE_ID_PART_L210:
327 ways = (aux >> 13) & 0xf; 328 l2x0_ways = (aux >> 13) & 0xf;
328 type = "L210"; 329 type = "L210";
329 break; 330 break;
330 default: 331 default:
331 /* Assume unknown chips have 8 ways */ 332 /* Assume unknown chips have 8 ways */
332 ways = 8; 333 l2x0_ways = 8;
333 type = "L2x0 series"; 334 type = "L2x0 series";
334 break; 335 break;
335 } 336 }
336 337
337 l2x0_way_mask = (1 << ways) - 1; 338 l2x0_way_mask = (1 << l2x0_ways) - 1;
338 339
339 /* 340 /*
340 * L2 cache Size = Way size * Number of ways 341 * L2 cache Size = Way size * Number of ways
341 */ 342 */
342 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; 343 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
343 way_size = 1 << (way_size + 3); 344 way_size = SZ_1K << (way_size + 3);
344 l2x0_size = ways * way_size * SZ_1K; 345 l2x0_size = l2x0_ways * way_size;
346 l2x0_sets = way_size / CACHE_LINE_SIZE;
345 347
346 /* 348 /*
347 * Check if l2x0 controller is already enabled. 349 * Check if l2x0 controller is already enabled.
@@ -377,5 +379,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
377 379
378 printk(KERN_INFO "%s cache controller enabled\n", type); 380 printk(KERN_INFO "%s cache controller enabled\n", type);
379 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 381 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
380 ways, cache_id, aux, l2x0_size); 382 l2x0_ways, l2x0_cache_id, aux, l2x0_size);
383
384 litmus_setup_lockdown(l2x0_base, l2x0_cache_id);
381} 385}
diff --git a/include/litmus/color.h b/include/litmus/color.h
index 615ebd80703c..5eef403a95c1 100644
--- a/include/litmus/color.h
+++ b/include/litmus/color.h
@@ -20,10 +20,15 @@ struct color_cache_info {
20 unsigned long nr_colors; 20 unsigned long nr_colors;
21}; 21};
22 22
23int litmus_test_prefetch(struct ctl_table *, int, void __user *,
24 size_t *, loff_t *);
25
23/* defined in litmus/color.c */ 26/* defined in litmus/color.c */
24extern struct color_cache_info color_cache_info; 27extern struct color_cache_info color_cache_info;
25extern unsigned long color_chunk; 28extern unsigned long color_chunk;
26 29
30void litmus_setup_lockdown(void __iomem*, u32);
31
27struct vm_area_struct; 32struct vm_area_struct;
28struct ctl_table; 33struct ctl_table;
29 34
diff --git a/litmus/Makefile b/litmus/Makefile
index 76a07e8531c6..be5af62431bd 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -28,7 +28,8 @@ obj-y = sched_plugin.o litmus.o \
28 sched_pfp.o \ 28 sched_pfp.o \
29 sched_psn_edf.o \ 29 sched_psn_edf.o \
30 srp.o \ 30 srp.o \
31 sync.o 31 sync.o \
32 lockdown.o
32 33
33obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 34obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
34obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 35obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
diff --git a/litmus/color_proc.c b/litmus/color_proc.c
index 8ee624860d71..90d89da0521b 100644
--- a/litmus/color_proc.c
+++ b/litmus/color_proc.c
@@ -104,6 +104,11 @@ static struct ctl_table color_table[] =
104 .mode = 0555, 104 .mode = 0555,
105 .child = cache_table, 105 .child = cache_table,
106 }, 106 },
107 {
108 .procname = "prefetch_test",
109 .mode = 0644,
110 .proc_handler = litmus_test_prefetch,
111 },
107 { } 112 { }
108}; 113};
109 114
diff --git a/litmus/lockdown.c b/litmus/lockdown.c
new file mode 100644
index 000000000000..b8c25006bc4f
--- /dev/null
+++ b/litmus/lockdown.c
@@ -0,0 +1,228 @@
1#include <linux/init.h>
2#include <linux/types.h>
3#include <linux/kernel.h>
4#include <linux/io.h>
5#include <linux/spinlock.h>
6
7#include <linux/smp.h> /* smp_call_func */
8#include <asm/processor.h> /* cpu relax */
9
10#include <asm/hardware/cache-l2x0.h>
11#include <asm/cacheflush.h>
12
13#include <litmus/color.h>
14
15static void __iomem *cache_base;
16static void __iomem *lockreg_d;
17static void __iomem *lockreg_i;
18static raw_spinlock_t prefetch_lock;
19
20static u32 cache_id;
21static int nr_lockregs;
22
23#define ld_d_reg(cpu) ({ int __cpu = cpu; \
24 void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
25 __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
26#define ld_i_reg(cpu) ({ int __cpu = cpu; \
27 void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
28 __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
29
30#define MAX_NR_WAYS 16
31#define UNLOCK_ALL 0x00000000 /* allocation in any way */
32
33/*
34 * Prefetch by reading the first word of each cache line in a page.
35 *
36 * @lockdown_reg: address of the lockdown register to write
37 * @lock_val: value to be written to @lockdown_reg
38 * @unlock_val: will unlock the cache to this value
39 * @addr: start address to be prefetched
40 * @end_addr: end address to prefetch (exclusive)
41 *
42 * Assumes: addr < end_addr AND addr != end_addr
43 */
44static u32 read_in_page(u32 lock_val,
45 u32 unlock_val,
46 void *start,
47 void *end)
48{
49 unsigned long flags;
50 u32 v = 0, tmp = 0;
51
52 raw_spin_lock_irqsave(&prefetch_lock, flags);
53 __asm__ __volatile__ (
54" .align 5\n"
55" str %[lockval], [%[cachereg]]\n"
56"1: ldr %[tmp], [%[addr]], #32 @ 32 bytes = 1 cache line\n"
57" add %[val], %[val], %[tmp]\n"
58" cmp %[addr], %[end]\n"
59" bne 1b\n"
60" str %[unlockval], [%[cachereg]]\n"
61 : [addr] "+r" (start),
62 [val] "+r" (v),
63 [tmp] "+r" (tmp)
64 : [end] "r" (end),
65 [cachereg] "r" (lockreg_d),
66 [lockval] "r" (lock_val),
67 [unlockval] "r" (unlock_val)
68 : "cc");
69 raw_spin_unlock_irqrestore(&prefetch_lock, flags);
70
71 return v;
72}
73
74/*
75 * unlocked[i] : allocation can occur in way i
76 *
77 * 0 = allocation can occur in the corresponding way
78 * 1 = allocation cannot occur in the corresponding way
79 */
80static u32 unlocked[MAX_NR_WAYS] = {
81 0xFFFFFFFE, /* way 0 unlocked */
82 0xFFFFFFFD,
83 0xFFFFFFFB,
84 0xFFFFFFF7,
85 0xFFFFFFEF, /* way 4 unlocked */
86 0xFFFFFFDF,
87 0xFFFFFFBF,
88 0xFFFFFF7F,
89 0xFFFFFEFF, /* way 8 unlocked */
90 0xFFFFFDFF,
91 0xFFFFFBFF,
92 0xFFFFF7FF,
93 0xFFFFEFFF, /* way 12 unlocked */
94 0xFFFFDFFF,
95 0xFFFFBFFF,
96 0xFFFF7FFF,
97};
98
99static void dump_lockdown_registers(void)
100{
101 int i;
102
103 for (i = 0; i < nr_lockregs; i++) {
104 printk("Lockdown Data CPU %2d: 0x%8x\n",
105 i, readl_relaxed(ld_d_reg(i)));
106 printk("Lockdown Inst CPU %2d: 0x%8x\n",
107 i, readl_relaxed(ld_d_reg(i)));
108 }
109}
110
111static void test_lockdown(void *ignore)
112{
113 int i;
114
115 printk("Start lockdown test on CPU %d.\n", smp_processor_id());
116
117 for (i = 0; i < nr_lockregs; i++) {
118 printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i)); printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i)); }
119
120 printk("Lockdown initial state:\n");
121 dump_lockdown_registers();
122 printk("---\n");
123
124 for (i = 0; i < nr_lockregs; i++) {
125 writel_relaxed(1, ld_d_reg(i));
126 writel_relaxed(2, ld_i_reg(i));
127 }
128 printk("Lockdown all data=1 instr=2:\n");
129 dump_lockdown_registers();
130 printk("---\n");
131
132 for (i = 0; i < nr_lockregs; i++) {
133 writel_relaxed((1 << i), ld_d_reg(i));
134 writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
135 }
136 printk("Lockdown varies:\n");
137 dump_lockdown_registers();
138 printk("---\n");
139
140 for (i = 0; i < nr_lockregs; i++) {
141 writel_relaxed(0, ld_d_reg(i));
142 writel_relaxed(0, ld_i_reg(i));
143 }
144 printk("Lockdown all zero:\n");
145 dump_lockdown_registers();
146
147 /* Checks that the unlocked array is set up correctly. */
148 for (i = 0; i < MAX_NR_WAYS; i++) {
149 unsigned long expected = 0xFFFFFFFF;
150 clear_bit(i, &expected);
151 if (expected != unlocked[i]) {
152 WARN(1, "Unlock %2d: expected 0x%8x but got 0x%8x\n",
153 i, ((u32)expected), unlocked[i]);
154 }
155 }
156
157 printk("End lockdown test.\n");
158}
159
160#define LOCKREG_TEST_VAL 0x00000002
161#define PREFETCH_SUM_TEST_VAL 65024 /* = 0 + 8 + ... + 127 */
162int litmus_test_prefetch(struct ctl_table *table, int write, void __user *buffer,
163 size_t *lenp, loff_t *ppos)
164{
165 struct page *page;
166 void *vaddr;
167 u32 *data;
168 u32 sum = 0;
169 int i;
170
171 if (!write)
172 return 0;
173
174 page = alloc_page(GFP_HIGHUSER | __GFP_MOVABLE);
175 if (!page) {
176 printk(KERN_WARNING "No memory\n");
177 return -ENOMEM;
178 }
179
180 vaddr = phys_to_virt(page_to_phys(page));
181 data = (u32*)vaddr;
182
183 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
184 data[i] = i;
185
186 sum = read_in_page(UNLOCK_ALL, LOCKREG_TEST_VAL, vaddr, vaddr + PAGE_SIZE);
187 if (PREFETCH_SUM_TEST_VAL != sum) {
188 printk("%s: Expected sum %u but got %u!\n", __FUNCTION__,
189 PREFETCH_SUM_TEST_VAL, sum);
190 } else {
191 printk("%s: Prefetch test passed.\n", __FUNCTION__);
192 }
193
194 if (LOCKREG_TEST_VAL != readl_relaxed(lockreg_d)) {
195 printk("%s: Expected lockreg value 0x%8x but got 0x%8x!\n",
196 __FUNCTION__, LOCKREG_TEST_VAL,
197 readl_relaxed(lockreg_d));
198 } else {
199 printk("%s: Lockdown state after prefetch test passed.\n",
200 __FUNCTION__);
201 }
202
203 writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
204
205 free_page(page_to_pfn(page));
206
207 return 0;
208}
209
210void litmus_setup_lockdown(void __iomem *base, u32 id)
211{
212 cache_base = base;
213 cache_id = id;
214 lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
215 lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
216
217
218 if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
219 nr_lockregs = 8;
220 } else {
221 printk("Unknown cache ID!\n");
222 nr_lockregs = 1;
223 }
224
225 raw_spin_lock_init(&prefetch_lock);
226
227 test_lockdown(NULL);
228}