aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-10-19 00:29:15 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-10-19 00:29:15 -0400
commit2326b2b05adee72ad7fa811e3a766355c732cfda (patch)
tree20abd3f4f192b29739ea11f8aa1d243839430cbf
parent3b286b3c7d25f2abd71ccb7853d1398430a65552 (diff)
Try and debug the queue.wip-mc
-rw-r--r--litmus/Makefile4
-rw-r--r--litmus/color_dev.c4
-rw-r--r--litmus/color_queue.c15
-rw-r--r--litmus/litmus.c4
-rw-r--r--litmus/lockdown.c83
-rw-r--r--litmus/sched_psn_edf.c10
6 files changed, 85 insertions, 35 deletions
diff --git a/litmus/Makefile b/litmus/Makefile
index d63a8eb5128..9ad6260f7ad 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -31,10 +31,10 @@ obj-y = sched_plugin.o litmus.o \
31 server.o \ 31 server.o \
32 srp.o \ 32 srp.o \
33 sync.o \ 33 sync.o \
34 way_tracker.o 34 way_tracker.o \
35 lockdown.o
35 36
36obj-$(CONFIG_EXYNOS_MCT) += clock.o 37obj-$(CONFIG_EXYNOS_MCT) += clock.o
37obj-$(CONFIG_CPU_V7) += lockdown.o
38obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 38obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
39obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 39obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
40# obj-$(CONFIG_PLUGIN_COLOR) += sched_color.o 40# obj-$(CONFIG_PLUGIN_COLOR) += sched_color.o
diff --git a/litmus/color_dev.c b/litmus/color_dev.c
index 96e0edc54b2..9edaf0ec7e4 100644
--- a/litmus/color_dev.c
+++ b/litmus/color_dev.c
@@ -93,7 +93,9 @@ static int create_color_page_info(int color, struct page *page)
93 } 93 }
94 94
95 list_add_tail(&info->list, &tsk_rt(current)->color_page_info_list); 95 list_add_tail(&info->list, &tsk_rt(current)->color_page_info_list);
96 TRACE_CUR("Added color_page_info %p to list.\n", info); 96 TRACE_CUR("Added color_page_info %p to list. prev: 0x%p next: 0x%p\n",
97 info, tsk_rt(current)->color_page_info_list.prev,
98 tsk_rt(current)->color_page_info_list.next);
97 99
98out: 100out:
99 return err; 101 return err;
diff --git a/litmus/color_queue.c b/litmus/color_queue.c
index 6543811fd06..a1030f5dc19 100644
--- a/litmus/color_queue.c
+++ b/litmus/color_queue.c
@@ -99,6 +99,8 @@ static void color_page_info_add_work(struct color_page_info *info, void *vaddr_s
99{ 99{
100 int i; 100 int i;
101 101
102 TRACE_CUR("adding work for color_page_info: 0x%p\n", info);
103
102 for (i = 0; i < COLOR_REQUESTS_PER_PAGE; i++) { 104 for (i = 0; i < COLOR_REQUESTS_PER_PAGE; i++) {
103 struct color_queue_request *req = &info->requests[i]; 105 struct color_queue_request *req = &info->requests[i];
104 void *work_vaddr = vaddr_start + i * COLOR_QUEUE_REQ_SIZE; 106 void *work_vaddr = vaddr_start + i * COLOR_QUEUE_REQ_SIZE;
@@ -125,6 +127,10 @@ void color_queue_enqueue_read(struct task_struct *ts)
125{ 127{
126 struct color_page_info *cur_info; 128 struct color_page_info *cur_info;
127 129
130 TRACE_CUR("enqueue read prev: %p next: %p\n",
131 tsk_rt(ts)->color_page_info_list.prev,
132 tsk_rt(ts)->color_page_info_list.next);
133
128 list_for_each_entry(cur_info, 134 list_for_each_entry(cur_info,
129 &tsk_rt(ts)->color_page_info_list, 135 &tsk_rt(ts)->color_page_info_list,
130 list) 136 list)
@@ -218,7 +224,9 @@ static void wait_next_phase(void)
218 * set up the lockdown value and updated the queue phase. 224 * set up the lockdown value and updated the queue phase.
219 */ 225 */
220 entry->phase = color_queue.phase; 226 entry->phase = color_queue.phase;
221 QTRACE(color_queue, "moving on to next phase\n"); 227 QTRACE(color_queue,
228 "cpu->phase: %d advances to a higher phase\n",
229 entry->phase);
222 raw_spin_unlock(&color_queue.lock); 230 raw_spin_unlock(&color_queue.lock);
223 return; 231 return;
224 } 232 }
@@ -243,9 +251,10 @@ static void wait_next_phase(void)
243 color_queue.way = next_way; 251 color_queue.way = next_way;
244 color_queue.at_barrier = 0; 252 color_queue.at_barrier = 0;
245 color_queue.phase++; 253 color_queue.phase++;
246 QTRACE(color_queue, "moving on to start on the next way\n"); 254 QTRACE(color_queue, "bumped the phase and way\n");
247 raw_spin_unlock(&color_queue.lock); 255 raw_spin_unlock(&color_queue.lock);
248 return; 256 } if (color_queue.nr_cpus < color_queue.at_barrier) {
257 BUG();
249 } else { 258 } else {
250 /* Wait for work from the last phase to complete. */ 259 /* Wait for work from the last phase to complete. */
251 QTRACE(color_queue, "still waiting for others\n"); 260 QTRACE(color_queue, "still waiting for others\n");
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 070ecefaeac..654815a124b 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -397,6 +397,8 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
397 void * ctrl_page_orig = NULL; 397 void * ctrl_page_orig = NULL;
398#endif 398#endif
399 399
400 TRACE_CUR("restore: %d\n", restore);
401
400 if (restore) { 402 if (restore) {
401 /* Safe user-space provided configuration data. 403 /* Safe user-space provided configuration data.
402 * and allocated page. */ 404 * and allocated page. */
@@ -469,8 +471,6 @@ long litmus_admit_task(struct task_struct* tsk)
469 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 471 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
470 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); 472 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
471 473
472 INIT_LIST_HEAD(&tsk_rt(tsk)->color_page_info_list);
473
474 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { 474 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
475 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 475 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
476 476
diff --git a/litmus/lockdown.c b/litmus/lockdown.c
index f1da7b4145d..bd24b5839bc 100644
--- a/litmus/lockdown.c
+++ b/litmus/lockdown.c
@@ -9,16 +9,70 @@
9#include <linux/math64.h> 9#include <linux/math64.h>
10#include <linux/vmalloc.h> 10#include <linux/vmalloc.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sched.h>
12 13
14#include <litmus/litmus.h>
13#include <litmus/clock.h> 15#include <litmus/clock.h>
14 16
17#ifdef CONFIG_CPU_V7
15#include <asm/hardware/cache-l2x0.h> 18#include <asm/hardware/cache-l2x0.h>
16#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20#endif
17 21
18#include <litmus/color.h> 22#include <litmus/color.h>
19#include <litmus/debug_trace.h> 23#include <litmus/debug_trace.h>
20#include <litmus/lockdown.h> 24#include <litmus/lockdown.h>
21 25
26
27#define MAX_NR_WAYS 16
28
29/*
30 * unlocked_way[i] : allocation can occur in way i
31 *
32 * 0 = allocation can occur in the corresponding way
33 * 1 = allocation cannot occur in the corresponding way
34 */
35u32 unlocked_way[MAX_NR_WAYS] = {
36 0xFFFFFFFE, /* way 0 unlocked */
37 0xFFFFFFFD,
38 0xFFFFFFFB,
39 0xFFFFFFF7,
40 0xFFFFFFEF, /* way 4 unlocked */
41 0xFFFFFFDF,
42 0xFFFFFFBF,
43 0xFFFFFF7F,
44 0xFFFFFEFF, /* way 8 unlocked */
45 0xFFFFFDFF,
46 0xFFFFFBFF,
47 0xFFFFF7FF,
48 0xFFFFEFFF, /* way 12 unlocked */
49 0xFFFFDFFF,
50 0xFFFFBFFF,
51 0xFFFF7FFF,
52};
53
54#ifndef CONFIG_CPU_V7
55u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
56{
57 TRACE_CUR("Dummy read_in_mem: lock_val: 0x%x unlock_val: 0x%x "
58 "start: 0x%p end: 0x%p\n", lock_val, unlock_val,
59 start, end);
60 return 0;
61}
62
63void set_lockdown(u32 lockdown_state)
64{
65 TRACE_CUR("Dummy set_lockdown function lockdown_state: 0x%x\n",
66 lockdown_state);
67}
68
69void litmus_setup_lockdown(void __iomem *base, u32 id)
70{
71 printk("LITMUS^RT Dummy Lockdown\n");
72}
73
74#else
75
22static void __iomem *cache_base; 76static void __iomem *cache_base;
23static void __iomem *lockreg_d; 77static void __iomem *lockreg_d;
24static void __iomem *lockreg_i; 78static void __iomem *lockreg_i;
@@ -38,8 +92,6 @@ struct mutex lockdown_proc;
38 void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \ 92 void __iomem *__v = cache_base + L2X0_LOCKDOWN_WAY_I_BASE + \
39 __cpu * L2X0_LOCKDOWN_STRIDE; __v; }) 93 __cpu * L2X0_LOCKDOWN_STRIDE; __v; })
40 94
41#define MAX_NR_WAYS 16
42
43void set_lockdown(u32 lockdown_state) 95void set_lockdown(u32 lockdown_state)
44{ 96{
45 writel_relaxed(lockdown_state, lockreg_d); 97 writel_relaxed(lockdown_state, lockreg_d);
@@ -91,31 +143,6 @@ void color_flush_page(void *vaddr)
91 v7_flush_kern_dcache_area(vaddr, PAGE_SIZE); 143 v7_flush_kern_dcache_area(vaddr, PAGE_SIZE);
92} 144}
93 145
94/*
95 * unlocked_way[i] : allocation can occur in way i
96 *
97 * 0 = allocation can occur in the corresponding way
98 * 1 = allocation cannot occur in the corresponding way
99 */
100u32 unlocked_way[MAX_NR_WAYS] = {
101 0xFFFFFFFE, /* way 0 unlocked */
102 0xFFFFFFFD,
103 0xFFFFFFFB,
104 0xFFFFFFF7,
105 0xFFFFFFEF, /* way 4 unlocked */
106 0xFFFFFFDF,
107 0xFFFFFFBF,
108 0xFFFFFF7F,
109 0xFFFFFEFF, /* way 8 unlocked */
110 0xFFFFFDFF,
111 0xFFFFFBFF,
112 0xFFFFF7FF,
113 0xFFFFEFFF, /* way 12 unlocked */
114 0xFFFFDFFF,
115 0xFFFFBFFF,
116 0xFFFF7FFF,
117};
118
119static void print_lockdown_registers(void) 146static void print_lockdown_registers(void)
120{ 147{
121 int i; 148 int i;
@@ -641,3 +668,5 @@ void litmus_setup_lockdown(void __iomem *base, u32 id)
641 668
642 test_lockdown(NULL); 669 test_lockdown(NULL);
643} 670}
671
672#endif
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 4e117be9546..b26a5ff439c 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -23,6 +23,8 @@
23#include <litmus/sched_trace.h> 23#include <litmus/sched_trace.h>
24#include <litmus/trace.h> 24#include <litmus/trace.h>
25 25
26#include <litmus/color.h>
27
26typedef struct { 28typedef struct {
27 rt_domain_t domain; 29 rt_domain_t domain;
28 int cpu; 30 int cpu;
@@ -271,6 +273,14 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
271 TRACE("becoming idle at %llu\n", litmus_clock()); 273 TRACE("becoming idle at %llu\n", litmus_clock());
272 } 274 }
273 275
276 if (prev && next != prev && is_realtime(prev) && get_rt_job(prev) > 1) {
277 color_sched_out_task(prev);
278 }
279
280 if (next && next != prev && is_realtime(next)) {
281 color_sched_in_task(next);
282 }
283
274 pedf->scheduled = next; 284 pedf->scheduled = next;
275 sched_state_task_picked(); 285 sched_state_task_picked();
276 raw_spin_unlock(&pedf->slock); 286 raw_spin_unlock(&pedf->slock);