aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-10-18 02:31:12 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-10-18 13:08:55 -0400
commit00691f091b810f1da96f4e18a5cf0f9875e8e942 (patch)
tree64e18171f9d11f95556051e916e0817bea609695 /litmus
parent582d750a1bd79087e510d1b38e4c2457aa3d8b41 (diff)
A queue for reading in pages.
This is not tested and probably won't work. Signed-off-by: Christopher Kenna <cjk@cs.unc.edu>
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/color.c4
-rw-r--r--litmus/color_dev.c55
-rw-r--r--litmus/color_queue.c361
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/lockdown.c41
6 files changed, 447 insertions, 17 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index fab72b14eac8..d63a8eb51280 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -10,6 +10,7 @@ obj-y = sched_plugin.o litmus.o \
10 color.o \ 10 color.o \
11 color_dev.o \ 11 color_dev.o \
12 color_proc.o \ 12 color_proc.o \
13 color_queue.o \
13 ctrldev.o \ 14 ctrldev.o \
14 dgl.o \ 15 dgl.o \
15 domain.o \ 16 domain.o \
diff --git a/litmus/color.c b/litmus/color.c
index 96ccf3efd228..9888c3f1599c 100644
--- a/litmus/color.c
+++ b/litmus/color.c
@@ -15,6 +15,7 @@
15#include <litmus/color.h> 15#include <litmus/color.h>
16#include <litmus/litmus.h> /* for in_list(...) */ 16#include <litmus/litmus.h> /* for in_list(...) */
17#include <litmus/clock.h> 17#include <litmus/clock.h>
18#include <litmus/color_queue.h>
18 19
19#define PAGES_PER_COLOR 1024 20#define PAGES_PER_COLOR 1024
20 21
@@ -146,6 +147,9 @@ static int do_add_pages(void)
146 list_del(&page->lru); 147 list_del(&page->lru);
147 __free_page(page); 148 __free_page(page);
148 } 149 }
150
151 /* setup the color queue stuff */
152 ret = setup_flusher_array();
149out: 153out:
150 return ret; 154 return ret;
151} 155}
diff --git a/litmus/color_dev.c b/litmus/color_dev.c
index df43c9f1e37d..278c8829c8fa 100644
--- a/litmus/color_dev.c
+++ b/litmus/color_dev.c
@@ -5,12 +5,14 @@
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/highmem.h> 7#include <linux/highmem.h>
8#include <linux/slab.h>
8#include <asm/io.h> /* page_to_phys on SPARC */ 9#include <asm/io.h> /* page_to_phys on SPARC */
9 10
10/* #include <asm/glue-cache.h> */ 11/* #include <asm/glue-cache.h> */
11 12
12#include <litmus/litmus.h> 13#include <litmus/litmus.h>
13#include <litmus/color.h> 14#include <litmus/color.h>
15#include <litmus/color_queue.h>
14 16
15#define ALLOC_NAME "litmus/color_alloc" 17#define ALLOC_NAME "litmus/color_alloc"
16#define CTRL_NAME "litmus/color_ctrl" 18#define CTRL_NAME "litmus/color_ctrl"
@@ -185,6 +187,42 @@ out:
185 * Allocation device 187 * Allocation device
186***********************************************************/ 188***********************************************************/
187 189
190static int create_color_page_info(int color, struct page *page)
191{
192 struct color_page_info *info;
193 void *vaddr;
194 int i, err = 0;
195
196 vaddr = page_address(page);
197 if (!vaddr) {
198 TRACE_CUR("Could not get apge address.\n");
199 err = -EINVAL;
200 goto out;
201 }
202
203 info = (struct color_page_info*) kmalloc(sizeof(*info), GFP_KERNEL);
204 if (!info) {
205 TRACE_CUR("Could not kmalloc\n");
206 err = -EINVAL;
207 goto out;
208 }
209
210 INIT_LIST_HEAD(&info->list);
211 info->color = color;
212 info->vaddr = vaddr;
213
214 for (i = 0; i < COLOR_REQUESTS_PER_PAGE; i++) {
215 struct color_queue_request *req = &info->requests[i];
216 INIT_LIST_HEAD(&req->list);
217 req->color_page_info = info;
218 req->request_type = COLOR_QUEUE_IDLE;
219 }
220
221 list_add_tail(&info->list, &tsk_rt(current)->color_page_info_list);
222out:
223 return err;
224}
225
188#define vma_nr_pages(vma) \ 226#define vma_nr_pages(vma) \
189 ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;}) 227 ({unsigned long v = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); v;})
190 228
@@ -213,6 +251,12 @@ static int do_map_colored_page_set(struct vm_area_struct *vma,
213 /* TODO For some reason this causes problems on ARM. */ 251 /* TODO For some reason this causes problems on ARM. */
214 /* clear_user_highpage(page, addr); */ 252 /* clear_user_highpage(page, addr); */
215 253
254 err = create_color_page_info(color_no, page);
255 if (err) {
256 /* TODO cleanup? */
257 goto out;
258 }
259
216 TRACE_CUR("inserting page (pa: 0x%"FMT_PA") at vaddr: 0x%10lx " 260 TRACE_CUR("inserting page (pa: 0x%"FMT_PA") at vaddr: 0x%10lx "
217 "flags: 0x%10lx prot: 0x%10lx\n", 261 "flags: 0x%10lx prot: 0x%10lx\n",
218 page_to_phys(page), addr, 262 page_to_phys(page), addr,
@@ -304,12 +348,23 @@ out:
304 348
305static void litmus_color_alloc_vm_close(struct vm_area_struct *vma) 349static void litmus_color_alloc_vm_close(struct vm_area_struct *vma)
306{ 350{
351 struct color_page_info *cur, *tmp;
352
307 TRACE_CUR("flags=0x%lx prot=0x%lx\n", 353 TRACE_CUR("flags=0x%lx prot=0x%lx\n",
308 vma->vm_flags, pgprot_val(vma->vm_page_prot)); 354 vma->vm_flags, pgprot_val(vma->vm_page_prot));
309 355
310 TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n", 356 TRACE_CUR("%p:%p vma:%p vma->vm_private_data:%p closed.\n",
311 (void*) vma->vm_start, (void*) vma->vm_end, vma, 357 (void*) vma->vm_start, (void*) vma->vm_end, vma,
312 vma->vm_private_data); 358 vma->vm_private_data);
359
360 /* clean up the color_page_infos */
361 list_for_each_entry_safe(cur, tmp,
362 &tsk_rt(current)->color_page_info_list,
363 list)
364 {
365 kfree(cur);
366 }
367
313 reclaim_pages(vma); 368 reclaim_pages(vma);
314} 369}
315 370
diff --git a/litmus/color_queue.c b/litmus/color_queue.c
new file mode 100644
index 000000000000..a5a702332da8
--- /dev/null
+++ b/litmus/color_queue.c
@@ -0,0 +1,361 @@
1#include <linux/slab.h>
2#include <linux/mm.h>
3#include <linux/sched.h>
4
5#include <litmus/color_queue.h>
6#include <litmus/color.h>
7#include <litmus/lockdown.h>
8#include <litmus/color.h>
9#include <litmus/litmus.h>
10
11struct cpu_entry {
12 int phase;
13 /* what phase a CPU is in */
14
15 struct list_head enqueue;
16 /* used as a PENDING list of work to enqueue in the color queue */
17
18 int nr_work;
19 /* outstanding work elements this CPU has */
20
21 raw_spinlock_t lock;
22 /* guard nr_work when CPU is "inside" the queue (its work is added) */
23};
24
25DEFINE_PER_CPU(struct cpu_entry, cpu_entries);
26/* Per-CPU state. */
27
28extern u32 unlocked_way[];
29/* Index i means way i is unlocked. Comes from litmus/lockdown.c */
30
31struct color_queue color_queue;
32/* The queue. */
33
34static u32 lockdown_value;
35/* Cached lockdown value so that we don't need to look it up each time.
36 * Should agree with color_queue.way.
37 */
38
39static u32 lockdown_value_idle = UNLOCK_ALL;
40/* Lockdown state when the queue is inactive. Lets us lock down some ways
41 * if we choose to.
42 */
43
44static void ***flusher_pages;
45/* One page per [way, color] pair used for flushing. */
46
47/*
48 * Add work to the cpu_entry's PENDING list of work for the color queue.
49 */
50static void cpu_add_work(struct color_queue_request *req)
51{
52 struct cpu_entry *entry = &__get_cpu_var(cpu_entries);
53 struct list_head *new = &req->list;
54 list_add_tail(new, &entry->enqueue);
55 entry->nr_work++;
56}
57
58/*
59 * Add a pending read in of a page. Does the chunking into work units.
60 *
61 * You should already set @way (in @color_page_info) by the time you
62 * call this!
63 *
64 * @vaddr_start changes depending upon if this is a read-in or a "flush".
65 */
66static void color_page_info_add_work(struct color_page_info *info,
67 void *vaddr_start)
68{
69 int i;
70
71 for (i = 0; i < COLOR_REQUESTS_PER_PAGE; i++) {
72 struct color_queue_request *req = &info->requests[i];
73 void *work_vaddr = vaddr_start + i * COLOR_QUEUE_REQ_SIZE;
74
75 WARN(req->request_type != COLOR_QUEUE_IDLE,
76 "request was not idle!\n");
77
78 req->request_type = COLOR_QUEUE_READ;
79 req->cpu = smp_processor_id();
80 req->request_data.read.vaddr = work_vaddr;
81 cpu_add_work(req);
82 }
83}
84
85static void color_queue_submit_work(void);
86
87/*
88 * Assumes this is called on the CPU that @ts ran on and that this CPU that is
89 * requesting the work. Do not try and schedule work for someone else!
90 *
91 * Also assumes that @way set for each color_page_info before calling!
92 */
93void color_queue_enqueue_read(struct task_struct *ts)
94{
95 struct color_page_info *cur_info;
96
97 list_for_each_entry(cur_info,
98 &tsk_rt(ts)->color_page_info_list,
99 list)
100 {
101 void *vaddr_start = cur_info->vaddr;
102 /* since this is a read, uses the page's address */
103
104 color_page_info_add_work(cur_info, vaddr_start);
105 }
106
107 color_queue_submit_work();
108}
109
110/*
111 * Assumes this is called on the CPU that @ts ran on and that this CPU that is
112 * requesting the work. Do not try and schedule work for someone else!
113 *
114 * Also assumes that @way is still set to the way this page was read into to
115 * find the proper address to flush.
116 */
117void color_queue_enqueue_flush(struct task_struct *ts)
118{
119 struct color_page_info *cur_info;
120
121 list_for_each_entry(cur_info,
122 &tsk_rt(ts)->color_page_info_list,
123 list)
124 {
125 void *vaddr_start;
126 vaddr_start = flusher_pages[cur_info->way][cur_info->color];
127 /* now we use the corresponding flusher page */
128 color_page_info_add_work(cur_info, vaddr_start);
129 }
130
131 color_queue_submit_work();
132}
133
134static void do_work_read(struct color_queue_request *request)
135{
136 void *vaddr = request->request_data.read.vaddr;
137
138 /* Don't know which CPU is first to do work in a given phase, so have
139 * all CPUs set the lockdown register to the same value. Also, the
140 * "unlock_val" is also the lock value, since we don't know if other
141 * CPUs are still reading. We could take the unlock out of the read_in
142 * function, but it's one store operation and probably takes a few
143 * nanoseconds...
144 */
145 color_read_in_mem(lockdown_value, lockdown_value,
146 vaddr, vaddr + COLOR_QUEUE_REQ_SIZE);
147}
148
149/*
150 * This just does a unit of work, ... son.
151 */
152static void do_work_son(struct color_queue_request *request)
153{
154 struct cpu_entry *entry;
155
156 switch (request->request_type) {
157 case COLOR_QUEUE_IDLE:
158 WARN(1, "Idle work in the queue!\n");
159 break;
160 case COLOR_QUEUE_READ:
161 do_work_read(request);
162 break;
163 }
164
165 /* Tell the (possibly remote) CPU that we're a bro and helped out. */
166 entry = &per_cpu(cpu_entries, request->cpu);
167 raw_spin_lock(&entry->lock);
168 entry->nr_work--;
169 raw_spin_unlock(&entry->lock);
170
171 /* work is done, set it idle */
172 request->request_type = COLOR_QUEUE_IDLE;
173}
174
175static void wait_next_phase(void)
176{
177 struct cpu_entry *entry = &__get_cpu_var(cpu_entries);
178 struct color_queue_request *request;
179
180 for (;;) {
181 raw_spin_lock(&color_queue.lock);
182 if (entry->phase < color_queue.phase) {
183 /* Move on to the next phase (or later). Another CPU already
184 * set up the lockdown value and updated the queue phase.
185 */
186 entry->phase = color_queue.phase;
187 raw_spin_unlock(&color_queue.lock);
188 return;
189 }
190
191 if (color_queue.nr_cpus == color_queue.at_barrier) {
192 int next_way;
193
194 /* Ready to start the next phase. */
195 if (unlikely(list_empty(&color_queue.queue))) {
196 /* This should not happen! Will loop forever? */
197 WARN(1, "color queue list was empty!\n");
198 raw_spin_unlock(&color_queue.lock);
199 continue;
200 }
201 request = list_first_entry(&color_queue.queue,
202 struct color_queue_request, list);
203 next_way = request->color_page_info->way;
204 lockdown_value = unlocked_way[next_way];
205 color_queue.way = next_way;
206 color_queue.at_barrier = 0;
207 color_queue.phase++;
208 raw_spin_unlock(&color_queue.lock);
209 return;
210 } else {
211 /* Wait for work from the last phase to complete. */
212 raw_spin_unlock(&color_queue.lock);
213 cpu_relax();
214 }
215 }
216}
217
218static void color_queue_loop(void)
219{
220 struct cpu_entry *entry = &__get_cpu_var(cpu_entries);
221 struct color_queue_request *request;
222 int nr_work;
223
224 for (;;) {
225 raw_spin_lock(&entry->lock);
226 nr_work = entry->nr_work;
227 raw_spin_unlock(&entry->lock);
228
229 if (0 == nr_work) {
230 /* All the work is done for this CPU. We can leave. */
231 raw_spin_lock(&color_queue.lock);
232 color_queue.nr_cpus--;
233 if (0 == color_queue.nr_cpus) {
234 /* Queue is going idle. Restore lockdown state. */
235 set_lockdown(lockdown_value_idle);
236 }
237 raw_spin_unlock(&color_queue.lock);
238 return;
239 }
240
241 /* Our work is not done, so continue processing more work. */
242
243 raw_spin_lock(&color_queue.lock);
244 if (unlikely(list_empty(&color_queue.queue))) {
245 /* can this happen? */
246 WARN(1, "color queue list was empty...\n");
247 raw_spin_unlock(&color_queue.lock);
248 continue;
249 }
250 request = list_first_entry(&color_queue.queue,
251 struct color_queue_request, list);
252 if (color_queue.way == request->color_page_info->way) {
253 /* we're going to do this work */
254 list_del(&request->list);
255 raw_spin_unlock(&color_queue.lock);
256 do_work_son(request);
257 } else {
258 /* we need to wait for the next phase */
259 color_queue.at_barrier++;
260 raw_spin_unlock(&color_queue.lock);
261 wait_next_phase();
262 }
263 }
264}
265
266/*
267 * Actually enqueues the work on the color queue and enters the work loop.
268 */
269static void color_queue_submit_work(void)
270{
271 struct cpu_entry *entry = &__get_cpu_var(cpu_entries);
272
273 raw_spin_lock(&color_queue.lock);
274 entry->phase = color_queue.phase;
275 color_queue.nr_cpus++;
276 list_splice_tail_init(&entry->enqueue, &color_queue.queue);
277 raw_spin_unlock(&color_queue.lock);
278 color_queue_loop();
279}
280
281/* called when user does add_pages proc handler */
282int setup_flusher_array(void)
283{
284 int color, way, ret = 0;
285 struct page *page;
286
287 if (0 == color_cache_info.ways || 0 == color_cache_info.nr_colors) {
288 WARN(1, "Cache information not initialized!\n");
289 ret = -EINVAL;
290 goto out;
291 }
292
293 flusher_pages = (void***) kmalloc(color_cache_info.ways
294 * sizeof(*flusher_pages), GFP_KERNEL);
295 if (!flusher_pages) {
296 printk(KERN_WARNING "No memory for flusher array!\n");
297 ret = -EINVAL;
298 goto out;
299 }
300
301 for (way = 0; way < color_cache_info.ways; way++) {
302 void **flusher_color_arr;
303 flusher_color_arr = (void**) kmalloc(sizeof(**flusher_pages)
304 * color_cache_info.nr_colors, GFP_KERNEL);
305 if (!flusher_color_arr) {
306 printk(KERN_WARNING "No memory for flusher array!\n");
307 ret = -ENOMEM;
308 goto out_free;
309 }
310
311 flusher_pages[way] = flusher_color_arr;
312
313 for (color = 0; color < color_cache_info.nr_colors; color++) {
314 page = get_colored_page(color);
315 if (!page) {
316 printk(KERN_WARNING "no more colored pages\n");
317 ret = -EINVAL;
318 goto out_free;
319 }
320 flusher_pages[way][color] = page_address(page);
321 if (!flusher_pages[way][color]) {
322 printk(KERN_WARNING "bad page address\n");
323 ret = -EINVAL;
324 goto out_free;
325 }
326 }
327 }
328out:
329 return ret;
330out_free:
331 for (way = 0; way < color_cache_info.ways; way++) {
332 for (color = 0; color < color_cache_info.nr_colors; color++) {
333 /* not bothering to try and give back colored pages */
334 }
335 kfree(flusher_pages[way]);
336 }
337 kfree(flusher_pages);
338 return ret;
339}
340
341static int __init init_color_queue(void)
342{
343 struct cpu_entry *cpu_entry;
344 int cpu;
345
346 BUILD_BUG_ON((PAGE_SIZE % COLOR_QUEUE_REQ_SIZE) != 0);
347
348 for_each_online_cpu(cpu) {
349 cpu_entry = &per_cpu(cpu_entries, cpu);
350 INIT_LIST_HEAD(&cpu_entry->enqueue);
351 cpu_entry->nr_work = 0;
352 raw_spin_lock_init(&cpu_entry->lock);
353 }
354
355 raw_spin_lock_init(&color_queue.lock);
356 INIT_LIST_HEAD(&color_queue.queue);
357
358 return 0;
359}
360
361module_init(init_color_queue);
diff --git a/litmus/litmus.c b/litmus/litmus.c
index ba6397a461d5..74e798992fa0 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -466,6 +466,8 @@ long litmus_admit_task(struct task_struct* tsk)
466 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 466 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
467 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); 467 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
468 468
469 INIT_LIST_HEAD(&tsk_rt(tsk)->color_page_info_list);
470
469 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { 471 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
470 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 472 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
471 473
diff --git a/litmus/lockdown.c b/litmus/lockdown.c
index ac7b9a24a512..f1da7b4145d5 100644
--- a/litmus/lockdown.c
+++ b/litmus/lockdown.c
@@ -17,6 +17,7 @@
17 17
18#include <litmus/color.h> 18#include <litmus/color.h>
19#include <litmus/debug_trace.h> 19#include <litmus/debug_trace.h>
20#include <litmus/lockdown.h>
20 21
21static void __iomem *cache_base; 22static void __iomem *cache_base;
22static void __iomem *lockreg_d; 23static void __iomem *lockreg_d;
@@ -38,7 +39,11 @@ struct mutex lockdown_proc;
38 __cpu * L2X0_LOCKDOWN_STRIDE; __v; }) 39 __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
39 40
40#define MAX_NR_WAYS 16 41#define MAX_NR_WAYS 16
41#define UNLOCK_ALL 0x00000000 /* allocation in any way */ 42
43void set_lockdown(u32 lockdown_state)
44{
45 writel_relaxed(lockdown_state, lockreg_d);
46}
42 47
43/* 48/*
44 * Prefetch by reading the first word of each cache line in a page. 49 * Prefetch by reading the first word of each cache line in a page.
@@ -51,10 +56,7 @@ struct mutex lockdown_proc;
51 * 56 *
52 * Assumes: addr < end_addr AND addr != end_addr 57 * Assumes: addr < end_addr AND addr != end_addr
53 */ 58 */
54static u32 read_in_page(u32 lock_val, 59u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
55 u32 unlock_val,
56 void *start,
57 void *end)
58{ 60{
59 unsigned long flags; 61 unsigned long flags;
60 u32 v = 0; 62 u32 v = 0;
@@ -90,12 +92,12 @@ void color_flush_page(void *vaddr)
90} 92}
91 93
92/* 94/*
93 * unlocked[i] : allocation can occur in way i 95 * unlocked_way[i] : allocation can occur in way i
94 * 96 *
95 * 0 = allocation can occur in the corresponding way 97 * 0 = allocation can occur in the corresponding way
96 * 1 = allocation cannot occur in the corresponding way 98 * 1 = allocation cannot occur in the corresponding way
97 */ 99 */
98static u32 unlocked[MAX_NR_WAYS] = { 100u32 unlocked_way[MAX_NR_WAYS] = {
99 0xFFFFFFFE, /* way 0 unlocked */ 101 0xFFFFFFFE, /* way 0 unlocked */
100 0xFFFFFFFD, 102 0xFFFFFFFD,
101 0xFFFFFFFB, 103 0xFFFFFFFB,
@@ -334,7 +336,7 @@ static void thrash(void *vaddr)
334 v7_flush_kern_dcache_area(vaddr, CTRL_PAGES * PAGE_SIZE); 336 v7_flush_kern_dcache_area(vaddr, CTRL_PAGES * PAGE_SIZE);
335 337
336 /* thrash. don't lock down, we want to fill the dcache with these */ 338 /* thrash. don't lock down, we want to fill the dcache with these */
337 read_in_page(UNLOCK_ALL, UNLOCK_ALL, thrash_pages, 339 color_read_in_mem(UNLOCK_ALL, UNLOCK_ALL, thrash_pages,
338 thrash_pages + THRASH_PAGES * PAGE_SIZE); 340 thrash_pages + THRASH_PAGES * PAGE_SIZE);
339} 341}
340 342
@@ -386,7 +388,7 @@ static int test_read_in(void)
386 preempt_disable(); 388 preempt_disable();
387 local_irq_save(flags); 389 local_irq_save(flags);
388 start = litmus_get_cycles(); 390 start = litmus_get_cycles();
389 read_in_page(unlocked[WAY_OFFSET], UNLOCK_ALL, 391 color_read_in_mem(unlocked_way[WAY_OFFSET], UNLOCK_ALL,
390 remapped, remapped + i); 392 remapped, remapped + i);
391 stop = litmus_get_cycles(); 393 stop = litmus_get_cycles();
392 local_irq_restore(flags); 394 local_irq_restore(flags);
@@ -421,7 +423,7 @@ static int test_read_in(void)
421 vaddr_start = remapped + PAGE_SIZE * read_start; 423 vaddr_start = remapped + PAGE_SIZE * read_start;
422 vaddr_end = remapped + PAGE_SIZE * read_end; 424 vaddr_end = remapped + PAGE_SIZE * read_end;
423 425
424 read_in_page(unlocked[way], UNLOCK_ALL, 426 color_read_in_mem(unlocked_way[way], UNLOCK_ALL,
425 vaddr_start, vaddr_end); 427 vaddr_start, vaddr_end);
426 428
427 READ_TRACE("i:%d j:%d read_start:%d read_end:%d way:%d\n", 429 READ_TRACE("i:%d j:%d read_start:%d read_end:%d way:%d\n",
@@ -500,9 +502,9 @@ static void test_lockdown(void *ignore)
500 for (i = 0; i < MAX_NR_WAYS; i++) { 502 for (i = 0; i < MAX_NR_WAYS; i++) {
501 unsigned long expected = 0xFFFFFFFF; 503 unsigned long expected = 0xFFFFFFFF;
502 clear_bit(i, &expected); 504 clear_bit(i, &expected);
503 if (expected != unlocked[i]) { 505 if (expected != unlocked_way[i]) {
504 WARN(1, "Unlock %2d: expected 0x%8x but got 0x%8x\n", 506 WARN(1, "Unlock %2d: expected 0x%8x but got 0x%8x\n",
505 i, ((u32)expected), unlocked[i]); 507 i, ((u32)expected), unlocked_way[i]);
506 } 508 }
507 } 509 }
508 510
@@ -538,12 +540,12 @@ static int perf_test(void) {
538 time = update_timeval(before, after); 540 time = update_timeval(before, after);
539 printk("Average for flushes without re-reading: %ld\n", time / TRIALS); 541 printk("Average for flushes without re-reading: %ld\n", time / TRIALS);
540 542
541 read_in_page(unlocked[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE); 543 color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE);
542 barrier(); 544 barrier();
543 getnstimeofday(&before); 545 getnstimeofday(&before);
544 barrier(); 546 barrier();
545 for (i = 0; i < TRIALS; i++) { 547 for (i = 0; i < TRIALS; i++) {
546 read_in_page(unlocked[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE); 548 color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE);
547 } 549 }
548 barrier(); 550 barrier();
549 getnstimeofday(&after); 551 getnstimeofday(&after);
@@ -553,7 +555,7 @@ static int perf_test(void) {
553 getnstimeofday(&before); 555 getnstimeofday(&before);
554 barrier(); 556 barrier();
555 for (i = 0; i < TRIALS; i++) { 557 for (i = 0; i < TRIALS; i++) {
556 read_in_page(unlocked[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE); 558 color_read_in_mem(unlocked_way[0], UNLOCK_ALL, vaddr, vaddr + PAGE_SIZE);
557 color_flush_page(vaddr); 559 color_flush_page(vaddr);
558 } 560 }
559 barrier(); 561 barrier();
@@ -574,8 +576,10 @@ int litmus_test_prefetch_proc_handler(struct ctl_table *table, int write,
574 u32 *data; 576 u32 *data;
575 int i; 577 int i;
576 578
577 if (!write) 579 if (!write) {
580 *lenp = 0;
578 return 0; 581 return 0;
582 }
579 583
580 page = alloc_page(__GFP_MOVABLE); 584 page = alloc_page(__GFP_MOVABLE);
581 if (!page) { 585 if (!page) {
@@ -591,7 +595,7 @@ int litmus_test_prefetch_proc_handler(struct ctl_table *table, int write,
591 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) 595 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
592 data[i] = i; 596 data[i] = i;
593 597
594 read_in_page(UNLOCK_ALL, LOCKREG_TEST_VAL, vaddr, vaddr + PAGE_SIZE); 598 color_read_in_mem(UNLOCK_ALL, LOCKREG_TEST_VAL, vaddr, vaddr + PAGE_SIZE);
595 599
596 if (LOCKREG_TEST_VAL != readl_relaxed(lockreg_d)) { 600 if (LOCKREG_TEST_VAL != readl_relaxed(lockreg_d)) {
597 printk("%s: Expected lockreg value 0x%8x but got 0x%8x!\n", 601 printk("%s: Expected lockreg value 0x%8x but got 0x%8x!\n",
@@ -632,5 +636,8 @@ void litmus_setup_lockdown(void __iomem *base, u32 id)
632 mutex_init(&l2x0_prefetch_mutex); 636 mutex_init(&l2x0_prefetch_mutex);
633 mutex_init(&lockdown_proc); 637 mutex_init(&lockdown_proc);
634 638
639 WARN(MAX_NR_WAYS < color_cache_info.ways,
640 "Statically defined way maximum too small.\n");
641
635 test_lockdown(NULL); 642 test_lockdown(NULL);
636} 643}