aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/boot/compressed/Makefile1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/mm/cache-l2x0.c10
-rw-r--r--arch/x86/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/syscalls/syscall_64.tbl1
-rw-r--r--include/litmus/cache_proc.h11
-rw-r--r--include/litmus/unistd_32.h3
-rw-r--r--include/litmus/unistd_64.h4
-rw-r--r--litmus/Makefile3
-rw-r--r--litmus/cache_proc.c965
-rw-r--r--litmus/litmus.c39
12 files changed, 1036 insertions, 5 deletions
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 6e1fb2b2ecc7..e2284fef1ce3 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -107,6 +107,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
107ORIG_CFLAGS := $(KBUILD_CFLAGS) 107ORIG_CFLAGS := $(KBUILD_CFLAGS)
108KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 108KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
109endif 109endif
110KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
110 111
111ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) 112ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
112asflags-y := -DZIMAGE 113asflags-y := -DZIMAGE
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 7197bbe4dda1..3b7d36b921d3 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -19,7 +19,7 @@
19 * This may need to be greater than __NR_last_syscall+1 in order to 19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table 20 * account for the padding in the syscall table
21 */ 21 */
22#define __NR_syscalls (388 + NR_litmus_syscalls + 3) 22#define __NR_syscalls (388 + NR_litmus_syscalls + 2)
23 23
24 24
25/* 25/*
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 3e002969469a..a272b84a2fe7 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -410,6 +410,7 @@
410 CALL(sys_release_ts) 410 CALL(sys_release_ts)
411 CALL(sys_null_call) 411 CALL(sys_null_call)
412/* 400 */ CALL(sys_get_current_budget) 412/* 400 */ CALL(sys_get_current_budget)
413 CALL(sys_test_call)
413 414
414 415
415#ifndef syscalls_counted 416#ifndef syscalls_counted
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index e309c8f35af5..71c969a1d790 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -33,6 +33,8 @@
33#include "cache-tauros3.h" 33#include "cache-tauros3.h"
34#include "cache-aurora-l2.h" 34#include "cache-aurora-l2.h"
35 35
36#include <litmus/cache_proc.h>
37
36struct l2c_init_data { 38struct l2c_init_data {
37 const char *type; 39 const char *type;
38 unsigned way_size_0; 40 unsigned way_size_0;
@@ -726,7 +728,6 @@ static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
726 728
727 if (n) { 729 if (n) {
728 unsigned i; 730 unsigned i;
729
730 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); 731 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
731 for (i = 0; i < n; i++) 732 for (i = 0; i < n; i++)
732 pr_cont(" %s", errata[i]); 733 pr_cont(" %s", errata[i]);
@@ -774,6 +775,11 @@ static const struct l2c_init_data l2c310_init_fns __initconst = {
774 }, 775 },
775}; 776};
776 777
778void l2c310_flush_all(void)
779{
780 l2c210_flush_all();
781};
782
777static int __init __l2c_init(const struct l2c_init_data *data, 783static int __init __l2c_init(const struct l2c_init_data *data,
778 u32 aux_val, u32 aux_mask, u32 cache_id) 784 u32 aux_val, u32 aux_mask, u32 cache_id)
779{ 785{
@@ -876,6 +882,8 @@ static int __init __l2c_init(const struct l2c_init_data *data,
876 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", 882 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
877 data->type, cache_id, aux); 883 data->type, cache_id, aux);
878 884
885 litmus_setup_lockdown(l2x0_base, cache_id);
886
879 return 0; 887 return 0;
880} 888}
881 889
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index a0ec8abc51f3..af5e3ccf8b31 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -378,3 +378,4 @@
378369 i386 release_ts sys_release_ts 378369 i386 release_ts sys_release_ts
379370 i386 null_call sys_null_call 379370 i386 null_call sys_null_call
380371 i386 get_current_budget sys_get_current_budget 380371 i386 get_current_budget sys_get_current_budget
381372 i386 test_call sys_test_call \ No newline at end of file
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 04f5b7483db3..e87042d413e9 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -343,6 +343,7 @@
343361 common release_ts sys_release_ts 343361 common release_ts sys_release_ts
344362 common null_call sys_null_call 344362 common null_call sys_null_call
345363 common get_current_budget sys_get_current_budget 345363 common get_current_budget sys_get_current_budget
346364 common test_call sys_test_call
346 347
347 348
348# 349#
diff --git a/include/litmus/cache_proc.h b/include/litmus/cache_proc.h
new file mode 100644
index 000000000000..586224118435
--- /dev/null
+++ b/include/litmus/cache_proc.h
@@ -0,0 +1,11 @@
1#ifndef LITMUS_CACHE_PROC_H
2#define LITMUS_CACHE_PROC_H
3
4#ifdef __KERNEL__
5
6void litmus_setup_lockdown(void __iomem*, u32);
7
8#endif
9
10#endif
11
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index 570b1f54d534..43527ee2f6b6 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -18,5 +18,6 @@
18#define __NR_release_ts __LSC(10) 18#define __NR_release_ts __LSC(10)
19#define __NR_null_call __LSC(11) 19#define __NR_null_call __LSC(11)
20#define __NR_get_current_budget __LSC(12) 20#define __NR_get_current_budget __LSC(12)
21#define __NR_test_call __LSC(13)
21 22
22#define NR_litmus_syscalls 13 23#define NR_litmus_syscalls 14
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index 3096bf2f2798..1a741bd3e5da 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -31,5 +31,7 @@ __SYSCALL(__NR_release_ts, sys_release_ts)
31__SYSCALL(__NR_null_call, sys_null_call) 31__SYSCALL(__NR_null_call, sys_null_call)
32#define __NR_get_current_budget __LSC(12) 32#define __NR_get_current_budget __LSC(12)
33__SYSCALL(____NR_get_current_budget, sys_get_current_budget) 33__SYSCALL(____NR_get_current_budget, sys_get_current_budget)
34#define __NR_test_call __LSC(13)
35__SYSCALL(__NR_test_call, sys_test_call)
34 36
35#define NR_litmus_syscalls 13 37#define NR_litmus_syscalls 14
diff --git a/litmus/Makefile b/litmus/Makefile
index 7970cd55e7fd..f80a3c0d05aa 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -21,7 +21,8 @@ obj-y = sched_plugin.o litmus.o \
21 uncachedev.o \ 21 uncachedev.o \
22 sched_gsn_edf.o \ 22 sched_gsn_edf.o \
23 sched_psn_edf.o \ 23 sched_psn_edf.o \
24 sched_pfp.o 24 sched_pfp.o \
25 cache_proc.o
25 26
26obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 27obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
27obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 28obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
diff --git a/litmus/cache_proc.c b/litmus/cache_proc.c
new file mode 100644
index 000000000000..f5879f32232a
--- /dev/null
+++ b/litmus/cache_proc.c
@@ -0,0 +1,965 @@
1#include <linux/init.h>
2#include <linux/types.h>
3#include <linux/kernel.h>
4#include <linux/module.h>
5#include <linux/sysctl.h>
6#include <linux/slab.h>
7#include <linux/io.h>
8#include <linux/mutex.h>
9#include <linux/time.h>
10
11#include <litmus/litmus_proc.h>
12#include <litmus/sched_trace.h>
13#include <litmus/cache_proc.h>
14
15#include <asm/hardware/cache-l2x0.h>
16#include <asm/cacheflush.h>
17
18
19#define UNLOCK_ALL 0x00000000 /* allocation in any way */
20#define LOCK_ALL (~UNLOCK_ALL)
21#define MAX_NR_WAYS 16
22#define MAX_NR_COLORS 16
23
24void mem_lock(u32 lock_val, int cpu);
25
26/*
27 * unlocked_way[i] : allocation can occur in way i
28 *
29 * 0 = allocation can occur in the corresponding way
30 * 1 = allocation cannot occur in the corresponding way
31 */
32u32 unlocked_way[MAX_NR_WAYS] = {
33 0xFFFFFFFE, /* way 0 unlocked */
34 0xFFFFFFFD,
35 0xFFFFFFFB,
36 0xFFFFFFF7,
37 0xFFFFFFEF, /* way 4 unlocked */
38 0xFFFFFFDF,
39 0xFFFFFFBF,
40 0xFFFFFF7F,
41 0xFFFFFEFF, /* way 8 unlocked */
42 0xFFFFFDFF,
43 0xFFFFFBFF,
44 0xFFFFF7FF,
45 0xFFFFEFFF, /* way 12 unlocked */
46 0xFFFFDFFF,
47 0xFFFFBFFF,
48 0xFFFF7FFF,
49};
50
51u32 nr_unlocked_way[MAX_NR_WAYS+1] = {
52 0x0000FFFF, /* all ways are locked. usable = 0*/
53 0x0000FFFE, /* way ~0 unlocked. usable = 1 */
54 0x0000FFFC,
55 0x0000FFF8,
56 0x0000FFF0,
57 0x0000FFE0,
58 0x0000FFC0,
59 0x0000FF80,
60 0x0000FF00,
61 0x0000FE00,
62 0x0000FC00,
63 0x0000F800,
64 0x0000F000,
65 0x0000E000,
66 0x0000C000,
67 0x00008000,
68 0x00000000, /* way ~15 unlocked. usable = 16 */
69};
70
71u32 way_partition[4] = {
72 0xfffffff0, /* cpu0 */
73 0xffffff0f, /* cpu1 */
74 0xfffff0ff, /* cpu2 */
75 0xffff0fff, /* cpu3 */
76};
77
78u32 way_partitions[9] = {
79 0xffff0003, /* cpu0 A */
80 0xffff0003, /* cpu0 B */
81 0xffff000C, /* cpu1 A */
82 0xffff000C, /* cpu1 B */
83 0xffff0030, /* cpu2 A */
84 0xffff0030, /* cpu2 B */
85 0xffff00C0, /* cpu3 A */
86 0xffff00C0, /* cpu3 B */
87 0xffffff00, /* lv C */
88};
89
90u32 prev_lockdown_d_reg[5] = {
91 0x0000FF00,
92 0x0000FF00,
93 0x0000FF00,
94 0x0000FF00,
95 0x000000FF, /* share with level-C */
96};
97
98u32 prev_lockdown_i_reg[5] = {
99 0x0000FF00,
100 0x0000FF00,
101 0x0000FF00,
102 0x0000FF00,
103 0x000000FF, /* share with level-C */
104};
105
106u32 prev_lbm_i_reg[8] = {
107 0x00000000,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x00000000,
115};
116
117u32 prev_lbm_d_reg[8] = {
118 0x00000000,
119 0x00000000,
120 0x00000000,
121 0x00000000,
122 0x00000000,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126};
127
128static void __iomem *cache_base;
129static void __iomem *lockreg_d;
130static void __iomem *lockreg_i;
131
132static u32 cache_id;
133
134struct mutex actlr_mutex;
135struct mutex l2x0_prefetch_mutex;
136struct mutex lockdown_proc;
137static u32 way_partition_min;
138static u32 way_partition_max;
139
140static int zero = 0;
141static int one = 1;
142
143static int l1_prefetch_proc;
144static int l2_prefetch_hint_proc;
145static int l2_double_linefill_proc;
146static int l2_data_prefetch_proc;
147
148u32 lockdown_reg[9] = {
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157};
158
159
160#define ld_d_reg(cpu) ({ int __cpu = cpu; \
161 void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_D_BASE + \
162 __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
163#define ld_i_reg(cpu) ({ int __cpu = cpu; \
164 void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
165 __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
166
167int lock_all;
168int nr_lockregs;
169static raw_spinlock_t cache_lock;
170static raw_spinlock_t prefetch_lock;
171
172extern void l2c310_flush_all(void);
173
174static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
175{
176 /* wait for cache operation by line or way to complete */
177 while (readl_relaxed(reg) & mask)
178 cpu_relax();
179}
180
181#ifdef CONFIG_CACHE_L2X0
182static inline void cache_wait(void __iomem *reg, unsigned long mask)
183{
184 /* cache operations by line are atomic on PL310 */
185}
186#else
187#define cache_wait cache_wait_way
188#endif
189
190static inline void cache_sync(void)
191{
192 void __iomem *base = cache_base;
193
194 writel_relaxed(0, base + L2X0_CACHE_SYNC);
195 cache_wait(base + L2X0_CACHE_SYNC, 1);
196}
197
198static void print_lockdown_registers(int cpu)
199{
200 int i;
201 //for (i = 0; i < nr_lockregs; i++) {
202 for (i = 0; i < 4; i++) {
203 printk("P%d Lockdown Data CPU %2d: 0x%04x\n", cpu,
204 i, readl_relaxed(ld_d_reg(i)));
205 printk("P%d Lockdown Inst CPU %2d: 0x%04x\n", cpu,
206 i, readl_relaxed(ld_i_reg(i)));
207 }
208}
209
210static void test_lockdown(void *ignore)
211{
212 int i, cpu;
213
214 cpu = smp_processor_id();
215 printk("Start lockdown test on CPU %d.\n", cpu);
216
217 for (i = 0; i < nr_lockregs; i++) {
218 printk("CPU %2d data reg: 0x%8p\n", i, ld_d_reg(i));
219 printk("CPU %2d inst reg: 0x%8p\n", i, ld_i_reg(i));
220 }
221
222 printk("Lockdown initial state:\n");
223 print_lockdown_registers(cpu);
224 printk("---\n");
225
226 for (i = 0; i < nr_lockregs; i++) {
227 writel_relaxed(1, ld_d_reg(i));
228 writel_relaxed(2, ld_i_reg(i));
229 }
230 printk("Lockdown all data=1 instr=2:\n");
231 print_lockdown_registers(cpu);
232 printk("---\n");
233
234 for (i = 0; i < nr_lockregs; i++) {
235 writel_relaxed((1 << i), ld_d_reg(i));
236 writel_relaxed(((1 << 8) >> i), ld_i_reg(i));
237 }
238 printk("Lockdown varies:\n");
239 print_lockdown_registers(cpu);
240 printk("---\n");
241
242 for (i = 0; i < nr_lockregs; i++) {
243 writel_relaxed(UNLOCK_ALL, ld_d_reg(i));
244 writel_relaxed(UNLOCK_ALL, ld_i_reg(i));
245 }
246 printk("Lockdown all zero:\n");
247 print_lockdown_registers(cpu);
248
249 printk("End lockdown test.\n");
250}
251
252void litmus_setup_lockdown(void __iomem *base, u32 id)
253{
254 cache_base = base;
255 cache_id = id;
256 lockreg_d = cache_base + L2X0_LOCKDOWN_WAY_D_BASE;
257 lockreg_i = cache_base + L2X0_LOCKDOWN_WAY_I_BASE;
258
259 if (L2X0_CACHE_ID_PART_L310 == (cache_id & L2X0_CACHE_ID_PART_MASK)) {
260 nr_lockregs = 8;
261 } else {
262 printk("Unknown cache ID!\n");
263 nr_lockregs = 1;
264 }
265
266 mutex_init(&actlr_mutex);
267 mutex_init(&l2x0_prefetch_mutex);
268 mutex_init(&lockdown_proc);
269 raw_spin_lock_init(&cache_lock);
270 raw_spin_lock_init(&prefetch_lock);
271
272 test_lockdown(NULL);
273}
274
275int way_partition_handler(struct ctl_table *table, int write, void __user *buffer,
276 size_t *lenp, loff_t *ppos)
277{
278 int ret = 0, i;
279 unsigned long flags;
280
281 mutex_lock(&lockdown_proc);
282
283 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
284 if (ret)
285 goto out;
286
287 if (write) {
288 printk("Way-partition settings:\n");
289 for (i = 0; i < 9; i++) {
290 printk("0x%08X\n", way_partitions[i]);
291 }
292 for (i = 0; i < 4; i++) {
293 writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
294 i * L2X0_LOCKDOWN_STRIDE);
295 writel_relaxed(~way_partitions[i*2], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
296 i * L2X0_LOCKDOWN_STRIDE);
297 }
298 }
299
300 local_irq_save(flags);
301 print_lockdown_registers(smp_processor_id());
302 l2c310_flush_all();
303 local_irq_restore(flags);
304out:
305 mutex_unlock(&lockdown_proc);
306 return ret;
307}
308
309int lock_all_handler(struct ctl_table *table, int write, void __user *buffer,
310 size_t *lenp, loff_t *ppos)
311{
312 int ret = 0, i;
313 unsigned long flags;
314
315 mutex_lock(&lockdown_proc);
316
317 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
318 if (ret)
319 goto out;
320
321 if (write && lock_all == 1) {
322 for (i = 0; i < nr_lockregs; i++) {
323 writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
324 i * L2X0_LOCKDOWN_STRIDE);
325 writel_relaxed(0xFFFF, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
326 i * L2X0_LOCKDOWN_STRIDE);
327 }
328/*
329 for (i = 0; i < nr_lockregs; i++) {
330 barrier();
331 mem_lock(LOCK_ALL, i);
332 barrier();
333 //writel_relaxed(nr_unlocked_way[0], ld_d_reg(i));
334 //writel_relaxed(nr_unlocked_way[0], ld_i_reg(i));
335 }
336*/
337 }
338 if (write && lock_all == 0) {
339 for (i = 0; i < nr_lockregs; i++) {
340 writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
341 i * L2X0_LOCKDOWN_STRIDE);
342 writel_relaxed(0x0, cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
343 i * L2X0_LOCKDOWN_STRIDE);
344 }
345/*
346 for (i = 0; i < nr_lockregs; i++) {
347 barrier();
348 mem_lock(UNLOCK_ALL, i);
349 barrier();
350 //writel_relaxed(nr_unlocked_way[16], ld_d_reg(i));
351 //writel_relaxed(nr_unlocked_way[16], ld_i_reg(i));
352 }
353*/
354 }
355 printk("LOCK_ALL HANDLER\n");
356 local_irq_save(flags);
357 print_lockdown_registers(smp_processor_id());
358 l2c310_flush_all();
359 local_irq_restore(flags);
360out:
361 mutex_unlock(&lockdown_proc);
362 return ret;
363}
364
365void cache_lockdown(u32 lock_val, int cpu)
366{
367 //unsigned long flags;
368 //raw_spin_lock_irqsave(&cache_lock, flags);
369
370 __asm__ __volatile__ (
371" str %[lockval], [%[dcachereg]]\n"
372" str %[lockval], [%[icachereg]]\n"
373 :
374 : [dcachereg] "r" (ld_d_reg(cpu)),
375 [icachereg] "r" (ld_i_reg(cpu)),
376 [lockval] "r" (lock_val)
377 : "cc");
378
379 //raw_spin_unlock_irqrestore(&cache_lock, flags);
380}
381
382int lockdown_reg_handler(struct ctl_table *table, int write, void __user *buffer,
383 size_t *lenp, loff_t *ppos)
384{
385 int ret = 0, i;
386
387 mutex_lock(&lockdown_proc);
388
389 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
390 if (ret)
391 goto out;
392
393 if (write) {
394 for (i = 0; i < nr_lockregs; i++) {
395 writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
396 i * L2X0_LOCKDOWN_STRIDE);
397 writel_relaxed(lockdown_reg[i], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
398 i * L2X0_LOCKDOWN_STRIDE);
399 }
400 }
401
402out:
403 mutex_unlock(&lockdown_proc);
404 return ret;
405}
406
407int lockdown_global_handler(struct ctl_table *table, int write, void __user *buffer,
408 size_t *lenp, loff_t *ppos)
409{
410 int ret = 0, i;
411
412 mutex_lock(&lockdown_proc);
413
414 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
415 if (ret)
416 goto out;
417
418 if (write) {
419 for (i = 0; i < nr_lockregs; i++) {
420 writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_D_BASE +
421 i * L2X0_LOCKDOWN_STRIDE);
422 writel_relaxed(lockdown_reg[8], cache_base + L2X0_LOCKDOWN_WAY_I_BASE +
423 i * L2X0_LOCKDOWN_STRIDE);
424 }
425 }
426
427out:
428 mutex_unlock(&lockdown_proc);
429 return ret;
430}
431
432/* Operate on the Cortex-A9's ACTLR register */
433#define ACTLR_L2_PREFETCH_HINT (1 << 1)
434#define ACTLR_L1_PREFETCH (1 << 2)
435
436/*
437 * Change the ACTLR.
438 * @mode - If 1 (0), set (clear) the bit given in @mask in the ACTLR.
439 * @mask - A mask in which one bit is set to operate on the ACTLR.
440 */
441static void actlr_change(int mode, int mask)
442{
443 u32 orig_value, new_value, reread_value;
444
445 if (0 != mode && 1 != mode) {
446 printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
447 __FUNCTION__);
448 return;
449 }
450
451 /* get the original value */
452 asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (orig_value));
453
454 if (0 == mode)
455 new_value = orig_value & ~(mask);
456 else
457 new_value = orig_value | mask;
458
459 asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (new_value));
460 asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (reread_value));
461
462 printk("ACTLR: orig: 0x%8x wanted: 0x%8x new: 0x%8x\n",
463 orig_value, new_value, reread_value);
464}
465
466int litmus_l1_prefetch_proc_handler(struct ctl_table *table, int write,
467 void __user *buffer, size_t *lenp, loff_t *ppos)
468{
469 int ret, mode;
470
471 mutex_lock(&actlr_mutex);
472 ret = proc_dointvec(table, write, buffer, lenp, ppos);
473
474 if (!ret && write) {
475 mode = *((int*)table->data);
476 actlr_change(mode, ACTLR_L1_PREFETCH);
477 }
478 mutex_unlock(&actlr_mutex);
479
480 return ret;
481}
482
483int litmus_l2_prefetch_hint_proc_handler(struct ctl_table *table, int write,
484 void __user *buffer, size_t *lenp, loff_t *ppos)
485{
486 int ret, mode;
487
488 mutex_lock(&actlr_mutex);
489 ret = proc_dointvec(table, write, buffer, lenp, ppos);
490 if (!ret && write) {
491 mode = *((int*)table->data);
492 actlr_change(mode, ACTLR_L2_PREFETCH_HINT);
493 }
494 mutex_unlock(&actlr_mutex);
495
496 return ret;
497}
498
499
500/* Operate on the PL-310's Prefetch Control Register, L310_PREFETCH_CTRL */
501#define L2X0_PREFETCH_DOUBLE_LINEFILL (1 << 30)
502#define L2X0_PREFETCH_INST_PREFETCH (1 << 29)
503#define L2X0_PREFETCH_DATA_PREFETCH (1 << 28)
504static void l2x0_prefetch_change(int mode, int mask)
505{
506 u32 orig_value, new_value, reread_value;
507
508 if (0 != mode && 1 != mode) {
509 printk(KERN_WARNING "Called %s with mode != 0 and mode != 1.\n",
510 __FUNCTION__);
511 return;
512 }
513
514 orig_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
515
516 if (0 == mode)
517 new_value = orig_value & ~(mask);
518 else
519 new_value = orig_value | mask;
520
521 writel_relaxed(new_value, cache_base + L310_PREFETCH_CTRL);
522 reread_value = readl_relaxed(cache_base + L310_PREFETCH_CTRL);
523
524 printk("l2x0 prefetch: orig: 0x%8x wanted: 0x%8x new: 0x%8x\n",
525 orig_value, new_value, reread_value);
526}
527
528int litmus_l2_double_linefill_proc_handler(struct ctl_table *table, int write,
529 void __user *buffer, size_t *lenp, loff_t *ppos)
530{
531 int ret, mode;
532
533 mutex_lock(&l2x0_prefetch_mutex);
534 ret = proc_dointvec(table, write, buffer, lenp, ppos);
535 if (!ret && write) {
536 mode = *((int*)table->data);
537 l2x0_prefetch_change(mode, L2X0_PREFETCH_DOUBLE_LINEFILL);
538 }
539 mutex_unlock(&l2x0_prefetch_mutex);
540
541 return ret;
542}
543
544int litmus_l2_data_prefetch_proc_handler(struct ctl_table *table, int write,
545 void __user *buffer, size_t *lenp, loff_t *ppos)
546{
547 int ret, mode;
548
549 mutex_lock(&l2x0_prefetch_mutex);
550 ret = proc_dointvec(table, write, buffer, lenp, ppos);
551 if (!ret && write) {
552 mode = *((int*)table->data);
553 l2x0_prefetch_change(mode, L2X0_PREFETCH_DATA_PREFETCH|L2X0_PREFETCH_INST_PREFETCH);
554 }
555 mutex_unlock(&l2x0_prefetch_mutex);
556
557 return ret;
558}
559
560int do_perf_test_proc_handler(struct ctl_table *table, int write,
561 void __user *buffer, size_t *lenp, loff_t *ppos);
562
563int setup_flusher_proc_handler(struct ctl_table *table, int write,
564 void __user *buffer, size_t *lenp, loff_t *ppos);
565
566static struct ctl_table cache_table[] =
567{
568 {
569 .procname = "C0_LA_way",
570 .mode = 0666,
571 .proc_handler = way_partition_handler,
572 .data = &way_partitions[0],
573 .maxlen = sizeof(way_partitions[0]),
574 .extra1 = &way_partition_min,
575 .extra2 = &way_partition_max,
576 },
577 {
578 .procname = "C0_LB_way",
579 .mode = 0666,
580 .proc_handler = way_partition_handler,
581 .data = &way_partitions[1],
582 .maxlen = sizeof(way_partitions[1]),
583 .extra1 = &way_partition_min,
584 .extra2 = &way_partition_max,
585 },
586 {
587 .procname = "C1_LA_way",
588 .mode = 0666,
589 .proc_handler = way_partition_handler,
590 .data = &way_partitions[2],
591 .maxlen = sizeof(way_partitions[2]),
592 .extra1 = &way_partition_min,
593 .extra2 = &way_partition_max,
594 },
595 {
596 .procname = "C1_LB_way",
597 .mode = 0666,
598 .proc_handler = way_partition_handler,
599 .data = &way_partitions[3],
600 .maxlen = sizeof(way_partitions[3]),
601 .extra1 = &way_partition_min,
602 .extra2 = &way_partition_max,
603 },
604 {
605 .procname = "C2_LA_way",
606 .mode = 0666,
607 .proc_handler = way_partition_handler,
608 .data = &way_partitions[4],
609 .maxlen = sizeof(way_partitions[4]),
610 .extra1 = &way_partition_min,
611 .extra2 = &way_partition_max,
612 },
613 {
614 .procname = "C2_LB_way",
615 .mode = 0666,
616 .proc_handler = way_partition_handler,
617 .data = &way_partitions[5],
618 .maxlen = sizeof(way_partitions[5]),
619 .extra1 = &way_partition_min,
620 .extra2 = &way_partition_max,
621 },
622 {
623 .procname = "C3_LA_way",
624 .mode = 0666,
625 .proc_handler = way_partition_handler,
626 .data = &way_partitions[6],
627 .maxlen = sizeof(way_partitions[6]),
628 .extra1 = &way_partition_min,
629 .extra2 = &way_partition_max,
630 },
631 {
632 .procname = "C3_LB_way",
633 .mode = 0666,
634 .proc_handler = way_partition_handler,
635 .data = &way_partitions[7],
636 .maxlen = sizeof(way_partitions[7]),
637 .extra1 = &way_partition_min,
638 .extra2 = &way_partition_max,
639 },
640 {
641 .procname = "Call_LC_way",
642 .mode = 0666,
643 .proc_handler = way_partition_handler,
644 .data = &way_partitions[8],
645 .maxlen = sizeof(way_partitions[8]),
646 .extra1 = &way_partition_min,
647 .extra2 = &way_partition_max,
648 },
649 {
650 .procname = "lock_all",
651 .mode = 0666,
652 .proc_handler = lock_all_handler,
653 .data = &lock_all,
654 .maxlen = sizeof(lock_all),
655 .extra1 = &zero,
656 .extra2 = &one,
657 },
658 {
659 .procname = "l1_prefetch",
660 .mode = 0644,
661 .proc_handler = litmus_l1_prefetch_proc_handler,
662 .data = &l1_prefetch_proc,
663 .maxlen = sizeof(l1_prefetch_proc),
664 },
665 {
666 .procname = "l2_prefetch_hint",
667 .mode = 0644,
668 .proc_handler = litmus_l2_prefetch_hint_proc_handler,
669 .data = &l2_prefetch_hint_proc,
670 .maxlen = sizeof(l2_prefetch_hint_proc),
671 },
672 {
673 .procname = "l2_double_linefill",
674 .mode = 0644,
675 .proc_handler = litmus_l2_double_linefill_proc_handler,
676 .data = &l2_double_linefill_proc,
677 .maxlen = sizeof(l2_double_linefill_proc),
678 },
679 {
680 .procname = "l2_data_prefetch",
681 .mode = 0644,
682 .proc_handler = litmus_l2_data_prefetch_proc_handler,
683 .data = &l2_data_prefetch_proc,
684 .maxlen = sizeof(l2_data_prefetch_proc),
685 },
686 {
687 .procname = "do_perf_test",
688 .mode = 0644,
689 .proc_handler = do_perf_test_proc_handler,
690 },
691 {
692 .procname = "lockdown_reg_0",
693 .mode = 0644,
694 .proc_handler = lockdown_reg_handler,
695 .data = &lockdown_reg[0],
696 .maxlen = sizeof(lockdown_reg[0]),
697 .extra1 = &way_partition_min,
698 .extra2 = &way_partition_max,
699 },
700 {
701 .procname = "lockdown_reg_1",
702 .mode = 0644,
703 .proc_handler = lockdown_reg_handler,
704 .data = &lockdown_reg[1],
705 .maxlen = sizeof(lockdown_reg[1]),
706 .extra1 = &way_partition_min,
707 .extra2 = &way_partition_max,
708 },
709 {
710 .procname = "lockdown_reg_2",
711 .mode = 0644,
712 .proc_handler = lockdown_reg_handler,
713 .data = &lockdown_reg[2],
714 .maxlen = sizeof(lockdown_reg[2]),
715 .extra1 = &way_partition_min,
716 .extra2 = &way_partition_max,
717 },
718 {
719 .procname = "lockdown_reg_3",
720 .mode = 0644,
721 .proc_handler = lockdown_reg_handler,
722 .data = &lockdown_reg[3],
723 .maxlen = sizeof(lockdown_reg[3]),
724 .extra1 = &way_partition_min,
725 .extra2 = &way_partition_max,
726 },
727 {
728 .procname = "lockdown_regs",
729 .mode = 0644,
730 .proc_handler = lockdown_global_handler,
731 .data = &lockdown_reg[8],
732 .maxlen = sizeof(lockdown_reg[8]),
733 .extra1 = &way_partition_min,
734 .extra2 = &way_partition_max,
735 },
736 { }
737};
738
739static struct ctl_table litmus_dir_table[] = {
740 {
741 .procname = "litmus",
742 .mode = 0555,
743 .child = cache_table,
744 },
745 { }
746};
747
748u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
749{
750 u32 v = 0;
751
752 __asm__ __volatile__ (
753" .align 5\n"
754" str %[lockval], [%[cachereg]]\n"
755"1: ldr %[val], [%[addr]], #32 @ 32 bytes = 1 cache line\n"
756" cmp %[end], %[addr] @ subtracts addr from end\n"
757" bgt 1b\n @ read more, if necessary\n"
758 : [addr] "+r" (start),
759 [val] "+r" (v)
760 : [end] "r" (end),
761#ifdef CONFIG_CACHE_L2X0
762 [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
763#else
764 [cachereg] "r" (lockreg_d),
765#endif
766 [lockval] "r" (lock_val)
767 : "cc");
768
769 return v;
770}
771
772
773/*
774 * Prefetch by reading the first word of each cache line in a page.
775 *
776 * @lockdown_reg: address of the lockdown register to write
777 * @lock_val: value to be written to @lockdown_reg
778 * @unlock_val: will unlock the cache to this value
779 * @addr: start address to be prefetched
780 * @end_addr: end address to prefetch (exclusive)
781 *
782 * Assumes: addr < end_addr AND addr != end_addr
783 */
784u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
785{
786#ifndef CONFIG_CACHE_L2X0
787 unsigned long flags;
788#endif
789 u32 v = 0;
790
791#ifndef CONFIG_CACHE_L2X0
792 raw_spin_lock_irqsave(&prefetch_lock, flags);
793#endif
794
795 __asm__ __volatile__ (
796" .align 5\n"
797" str %[lockval], [%[cachereg]]\n"
798"1: ldr %[val], [%[addr]], #32 @ 32 bytes = 1 cache line\n"
799" cmp %[end], %[addr] @ subtracts addr from end\n"
800" bgt 1b\n @ read more, if necessary\n"
801" str %[unlockval], [%[cachereg]]\n"
802 : [addr] "+r" (start),
803 [val] "+r" (v)
804 : [end] "r" (end),
805#ifdef CONFIG_CACHE_L2X0
806 [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
807#else
808 [cachereg] "r" (lockreg_d),
809#endif
810 [lockval] "r" (lock_val),
811 [unlockval] "r" (unlock_val)
812 : "cc");
813
814#ifndef CONFIG_CACHE_L2X0
815 raw_spin_unlock_irqrestore(&prefetch_lock, flags);
816#endif
817
818 return v;
819}
820
821static long update_timeval(struct timespec lhs, struct timespec rhs)
822{
823 long val;
824 struct timespec ts;
825
826 ts = timespec_sub(rhs, lhs);
827 val = ts.tv_sec*NSEC_PER_SEC + ts.tv_nsec;
828
829 return val;
830}
831
832extern void v7_flush_kern_dcache_area(void *, size_t);
833extern void v7_flush_kern_cache_all(void);
834/*
835 * Ensure that this page is not in the L1 or L2 cache.
836 * Since the L1 cache is VIPT and the L2 cache is PIPT, we can use either the
837 * kernel or user vaddr.
838 */
839void color_flush_page(void *vaddr, size_t size)
840{
841 //v7_flush_kern_dcache_area(vaddr, size);
842 v7_flush_kern_cache_all();
843}
844
845#define TRIALS 1000
846
847static int perf_test(void) {
848 struct timespec before, after;
849 struct page *page;
850 void *vaddr;
851 u32 *data;
852 long time, flush_time;
853 int i, num_pages = 1;
854 unsigned int order = 4;
855
856 for (i = 0; i < order; i++) {
857 num_pages = num_pages*2;
858 }
859
860 printk("Number of pages: %d\n", num_pages);
861 //page = alloc_page(__GFP_MOVABLE);
862 page = alloc_pages(__GFP_MOVABLE, order);
863 if (!page) {
864 printk(KERN_WARNING "No memory\n");
865 return -ENOMEM;
866 }
867
868 vaddr = page_address(page);
869 if (!vaddr)
870 printk(KERN_WARNING "%s: vaddr is null\n", __FUNCTION__);
871 data = (u32*) vaddr;
872
873 getnstimeofday(&before);
874 barrier();
875 for (i = 0; i < TRIALS; i++) {
876 color_flush_page(vaddr, PAGE_SIZE*num_pages);
877 }
878 barrier();
879 getnstimeofday(&after);
880 time = update_timeval(before, after);
881 printk("Average for flushes without re-reading: %ld\n", time / TRIALS);
882 flush_time = time / TRIALS;
883
884 color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
885
886 barrier();
887 getnstimeofday(&before);
888 barrier();
889 for (i = 0; i < TRIALS; i++) {
890 color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
891 }
892 barrier();
893 getnstimeofday(&after);
894 time = update_timeval(before, after);
895 printk("Average for read from cache: %ld\n", time / TRIALS);
896
897 getnstimeofday(&before);
898 barrier();
899 for (i = 0; i < TRIALS; i++) {
900 color_read_in_mem(nr_unlocked_way[2], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
901 color_flush_page(vaddr, PAGE_SIZE*num_pages);
902 }
903 barrier();
904 getnstimeofday(&after);
905 time = update_timeval(before, after);
906 printk("Average for read from mem: %ld (%ld)\n", time / TRIALS - flush_time, time / TRIALS);
907
908 // write in locked way
909 color_read_in_mem_lock(nr_unlocked_way[2], LOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
910 for (i = 0; i < PAGE_SIZE*num_pages/sizeof(u32); i++) {
911 data[i] = i%63353;
912 }
913 // read
914 barrier();
915 getnstimeofday(&before);
916 barrier();
917 for (i = 0; i < TRIALS; i++) {
918 color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE*num_pages);
919 }
920 barrier();
921 getnstimeofday(&after);
922 time = update_timeval(before, after);
923 printk("Average for read in after write: %ld\n", time / TRIALS);
924
925
926 //free_page((unsigned long)vaddr);
927 free_pages((unsigned long)vaddr, order);
928
929 return 0;
930}
931
932int do_perf_test_proc_handler(struct ctl_table *table, int write,
933 void __user *buffer, size_t *lenp, loff_t *ppos)
934{
935 int ret = 0;
936
937 if (write) {
938 ret = perf_test();
939 }
940
941 return ret;
942}
943
944static struct ctl_table_header *litmus_sysctls;
945
946static int __init litmus_sysctl_init(void)
947{
948 int ret = 0;
949
950 printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n");
951 litmus_sysctls = register_sysctl_table(litmus_dir_table);
952 if (!litmus_sysctls) {
953 printk(KERN_WARNING "Could not register LITMUS^RT sysctl.\n");
954 ret = -EFAULT;
955 goto out;
956 }
957
958 way_partition_min = 0x00000000;
959 way_partition_max = 0x0000FFFF;
960
961out:
962 return ret;
963}
964
965module_init(litmus_sysctl_init);
diff --git a/litmus/litmus.c b/litmus/litmus.c
index db5ce0e9c76e..27efb22d1d2f 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -31,6 +31,8 @@
31#include <trace/events/litmus.h> 31#include <trace/events/litmus.h>
32#endif 32#endif
33 33
34extern void l2c310_flush_all(void);
35
34/* Number of RT tasks that exist in the system */ 36/* Number of RT tasks that exist in the system */
35atomic_t rt_task_count = ATOMIC_INIT(0); 37atomic_t rt_task_count = ATOMIC_INIT(0);
36 38
@@ -314,6 +316,43 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
314 return ret; 316 return ret;
315} 317}
316 318
319/* sys_test_call() is a test system call for developing */
320asmlinkage long sys_test_call(unsigned int param)
321{
322 long ret = 0;
323 unsigned long flags;
324 struct vm_area_struct *vma_itr = NULL;
325
326 TRACE_CUR("test_call param = %d\n", param);
327
328 down_read(&current->mm->mmap_sem);
329 vma_itr = current->mm->mmap;
330 while (vma_itr != NULL) {
331 printk(KERN_INFO "vm_start : %lx\n", vma_itr->vm_start);
332 printk(KERN_INFO "vm_end : %lx\n", vma_itr->vm_end);
333 printk(KERN_INFO "vm_flags : %lx\n", vma_itr->vm_flags);
334 printk(KERN_INFO "vm_prot : %x\n", pgprot_val(vma_itr->vm_page_prot));
335 printk(KERN_INFO "VM_SHARED? %ld\n", vma_itr->vm_flags & VM_SHARED);
336 if (vma_itr->vm_file) {
337 struct file *fp = vma_itr->vm_file;
338 unsigned long fcount = atomic_long_read(&(fp->f_count));
339 printk(KERN_INFO "f_count : %ld\n", fcount);
340 if (fcount > 1) {
341 vma_itr->vm_page_prot = pgprot_noncached(vma_itr->vm_page_prot);
342 }
343 }
344 printk(KERN_INFO "vm_prot2 : %x\n", pgprot_val(vma_itr->vm_page_prot));
345 vma_itr = vma_itr->vm_next;
346 }
347 up_read(&current->mm->mmap_sem);
348
349 local_irq_save(flags);
350 l2c310_flush_all();
351 local_irq_restore(flags);
352
353 return ret;
354}
355
317/* p is a real-time task. Re-init its state as a best-effort task. */ 356/* p is a real-time task. Re-init its state as a best-effort task. */
318static void reinit_litmus_state(struct task_struct* p, int restore) 357static void reinit_litmus_state(struct task_struct* p, int restore)
319{ 358{