diff options
-rw-r--r-- | include/litmus/clock.h | 1 | ||||
-rw-r--r-- | include/litmus/color_queue.h | 3 | ||||
-rw-r--r-- | include/litmus/debug_trace.h | 18 | ||||
-rw-r--r-- | include/litmus/lockdown.h | 2 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 79 | ||||
-rw-r--r-- | litmus/Kconfig | 2 | ||||
-rw-r--r-- | litmus/color.c | 13 | ||||
-rw-r--r-- | litmus/color_queue.c | 34 | ||||
-rw-r--r-- | litmus/dgl.c | 88 | ||||
-rw-r--r-- | litmus/lockdown.c | 34 | ||||
-rw-r--r-- | litmus/sched_mc.c | 7 | ||||
-rw-r--r-- | litmus/sched_trace.c | 13 | ||||
-rw-r--r-- | litmus/way_tracker.c | 3 |
13 files changed, 212 insertions, 85 deletions
diff --git a/include/litmus/clock.h b/include/litmus/clock.h index 9b285db7f60c..bb493e73d502 100644 --- a/include/litmus/clock.h +++ b/include/litmus/clock.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LITMUS_CLOCK_H_ | 2 | #define _LITMUS_CLOCK_H_ |
3 | 3 | ||
4 | #if defined(CONFIG_EXYNOS_MCT) | 4 | #if defined(CONFIG_EXYNOS_MCT) |
5 | |||
5 | /* | 6 | /* |
6 | * Only used if we are using the EXYNOS MCT clock. | 7 | * Only used if we are using the EXYNOS MCT clock. |
7 | */ | 8 | */ |
diff --git a/include/litmus/color_queue.h b/include/litmus/color_queue.h index 95dd395c0b9c..7a49afedf929 100644 --- a/include/litmus/color_queue.h +++ b/include/litmus/color_queue.h | |||
@@ -60,4 +60,7 @@ int color_queue_enqueue_flush(struct task_struct *ts); | |||
60 | 60 | ||
61 | void cleanup_color_page_infos(struct list_head *head); | 61 | void cleanup_color_page_infos(struct list_head *head); |
62 | 62 | ||
63 | int color_cache_task(struct task_struct *t); | ||
64 | int color_uncache_task(struct task_struct *t); | ||
65 | |||
63 | #endif | 66 | #endif |
diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h index ee4a7b4844c8..2808bf66f55a 100644 --- a/include/litmus/debug_trace.h +++ b/include/litmus/debug_trace.h | |||
@@ -17,10 +17,22 @@ extern atomic_t __log_seq_no; | |||
17 | #define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ | 17 | #define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ |
18 | raw_smp_processor_id(), \ | 18 | raw_smp_processor_id(), \ |
19 | __FUNCTION__, __FILE__, __LINE__ | 19 | __FUNCTION__, __FILE__, __LINE__ |
20 | #define STRACE(fmt, args...) \ | ||
21 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ | ||
22 | TRACE_ARGS, ## args) | ||
23 | #define STRACE2(fmt, args...) \ | ||
24 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ | ||
25 | TRACE_ARGS, ## args) | ||
20 | #else | 26 | #else |
21 | #define TRACE_PREFIX "%d P%d: " | 27 | #define TRACE_PREFIX "%d P%d: " |
22 | #define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ | 28 | #define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ |
23 | raw_smp_processor_id() | 29 | raw_smp_processor_id() |
30 | #define STRACE(fmt, args...) \ | ||
31 | sched_trace_log_message("%d P%d : " fmt, \ | ||
32 | TRACE_ARGS, ## args) | ||
33 | #define STRACE2(fmt, args...) \ | ||
34 | sched_trace_log_message("%d P%d : " fmt, \ | ||
35 | TRACE_ARGS, ## args) | ||
24 | #endif | 36 | #endif |
25 | 37 | ||
26 | #define TRACE(fmt, args...) \ | 38 | #define TRACE(fmt, args...) \ |
@@ -33,12 +45,6 @@ extern atomic_t __log_seq_no; | |||
33 | 45 | ||
34 | #define TRACE_CUR(fmt, args...) \ | 46 | #define TRACE_CUR(fmt, args...) \ |
35 | TRACE_TASK(current, fmt, ## args) | 47 | TRACE_TASK(current, fmt, ## args) |
36 | #define STRACE(fmt, args...) \ | ||
37 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ | ||
38 | TRACE_ARGS, ## args) | ||
39 | #define STRACE2(fmt, args...) \ | ||
40 | sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \ | ||
41 | TRACE_ARGS, ## args) | ||
42 | 48 | ||
43 | 49 | ||
44 | 50 | ||
diff --git a/include/litmus/lockdown.h b/include/litmus/lockdown.h index 78cd9c856571..ddd50accd2c9 100644 --- a/include/litmus/lockdown.h +++ b/include/litmus/lockdown.h | |||
@@ -4,8 +4,10 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | #define UNLOCK_ALL 0x00000000 /* allocation in any way */ | 6 | #define UNLOCK_ALL 0x00000000 /* allocation in any way */ |
7 | #define LOCK_ALL (~UNLOCK_ALL) | ||
7 | 8 | ||
8 | u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end); | 9 | u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end); |
10 | u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end); | ||
9 | 11 | ||
10 | void set_lockdown(u32 lockdown_state); | 12 | void set_lockdown(u32 lockdown_state); |
11 | 13 | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index cb8758f8aebf..0580340d0170 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -51,6 +51,7 @@ struct st_switch_away_data { /* A process was switched away from on a given CPU. | |||
51 | u64 exec_time; | 51 | u64 exec_time; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* changed: like everything */ | ||
54 | struct st_completion_data { /* A job completed. */ | 55 | struct st_completion_data { /* A job completed. */ |
55 | u32 exec; | 56 | u32 exec; |
56 | u16 flush_work; | 57 | u16 flush_work; |
@@ -76,8 +77,8 @@ struct st_resume_data { /* A task resumes. */ | |||
76 | 77 | ||
77 | struct st_action_data { | 78 | struct st_action_data { |
78 | u64 when; | 79 | u64 when; |
79 | u32 action; | 80 | u8 action; |
80 | u8 __unused[4]; | 81 | u8 __unused[7]; |
81 | }; | 82 | }; |
82 | 83 | ||
83 | struct st_sys_release_data { | 84 | struct st_sys_release_data { |
@@ -85,11 +86,13 @@ struct st_sys_release_data { | |||
85 | u64 release; | 86 | u64 release; |
86 | }; | 87 | }; |
87 | 88 | ||
89 | /* changed: easy enough to remove */ | ||
88 | struct st_task_exit_data { | 90 | struct st_task_exit_data { |
89 | u64 avg_exec_time; | 91 | u64 avg_exec_time; |
90 | u64 max_exec_time; | 92 | u64 max_exec_time; |
91 | }; | 93 | }; |
92 | 94 | ||
95 | /* changed: calculate yoself */ | ||
93 | struct st_task_tardy_data { | 96 | struct st_task_tardy_data { |
94 | u64 total_tardy; | 97 | u64 total_tardy; |
95 | u32 max_tardy; | 98 | u32 max_tardy; |
@@ -232,26 +235,26 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, | |||
232 | trace_litmus_task_param(t); \ | 235 | trace_litmus_task_param(t); \ |
233 | } while (0) | 236 | } while (0) |
234 | 237 | ||
235 | #define sched_trace_task_release(t)/* \ */ | 238 | #define sched_trace_task_release(t) \ |
236 | /* do { \ */ | 239 | do { \ |
237 | /* SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \ */ | 240 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \ |
238 | /* do_sched_trace_task_release, t); \ */ | 241 | do_sched_trace_task_release, t); \ |
239 | /* trace_litmus_task_release(t); \ */ | 242 | trace_litmus_task_release(t); \ |
240 | /* } while (0) */ | 243 | } while (0) |
241 | 244 | ||
242 | #define sched_trace_task_switch_to(t)/* \ */ | 245 | #define sched_trace_task_switch_to(t) \ |
243 | /* do { \ */ | 246 | do { \ |
244 | /* SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \ */ | 247 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \ |
245 | /* do_sched_trace_task_switch_to, t); \ */ | 248 | do_sched_trace_task_switch_to, t); \ |
246 | /* trace_litmus_switch_to(t); \ */ | 249 | trace_litmus_switch_to(t); \ |
247 | /* } while (0) */ | 250 | } while (0) |
248 | 251 | ||
249 | #define sched_trace_task_switch_away(t)/* \ */ | 252 | #define sched_trace_task_switch_away(t) \ |
250 | /* do { \ */ | 253 | do { \ |
251 | /* SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \ */ | 254 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \ |
252 | /* do_sched_trace_task_switch_away, t); \ */ | 255 | do_sched_trace_task_switch_away, t); \ |
253 | /* trace_litmus_switch_away(t); \ */ | 256 | trace_litmus_switch_away(t); \ |
254 | /* } while (0) */ | 257 | } while (0) |
255 | 258 | ||
256 | #define sched_trace_task_completion(t, forced) \ | 259 | #define sched_trace_task_completion(t, forced) \ |
257 | do { \ | 260 | do { \ |
@@ -261,30 +264,30 @@ feather_callback void do_sched_trace_task_tardy(unsigned long id, | |||
261 | trace_litmus_task_completion(t, forced); \ | 264 | trace_litmus_task_completion(t, forced); \ |
262 | } while (0) | 265 | } while (0) |
263 | 266 | ||
264 | #define sched_trace_task_block_on(t, i)/* \ */ | 267 | #define sched_trace_task_block_on(t, i) \ |
265 | /* do { \ */ | 268 | do { \ |
266 | /* SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \ */ | 269 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \ |
267 | /* do_sched_trace_task_block, t); \ */ | 270 | do_sched_trace_task_block, t); \ |
268 | /* trace_litmus_task_block(t, i); \ */ | 271 | trace_litmus_task_block(t, i); \ |
269 | /* } while (0) */ | 272 | } while (0) |
270 | 273 | ||
271 | #define sched_trace_task_block(t) \ | 274 | #define sched_trace_task_block(t) \ |
272 | sched_trace_task_block_on(t, 0) | 275 | sched_trace_task_block_on(t, 0) |
273 | 276 | ||
274 | #define sched_trace_task_resume_on(t, i)/* \ */ | 277 | #define sched_trace_task_resume_on(t, i) \ |
275 | /* do { \ */ | 278 | do { \ |
276 | /* SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \ */ | 279 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \ |
277 | /* do_sched_trace_task_resume, t); \ */ | 280 | do_sched_trace_task_resume, t); \ |
278 | /* trace_litmus_task_resume(t, i); \ */ | 281 | trace_litmus_task_resume(t, i); \ |
279 | /* } while (0) */ | 282 | } while (0) |
280 | 283 | ||
281 | #define sched_trace_task_resume(t) \ | 284 | #define sched_trace_task_resume(t) \ |
282 | sched_trace_task_resume_on(t, 0) | 285 | sched_trace_task_resume_on(t, 0) |
283 | 286 | ||
284 | #define sched_trace_resource_acquire(t, i)/* \ */ | 287 | #define sched_trace_resource_acquire(t, i) \ |
285 | /* do { \ */ | 288 | do { \ |
286 | /* trace_litmus_resource_acquire(t, i); \ */ | 289 | trace_litmus_resource_acquire(t, i); \ |
287 | /* } while (0) */ | 290 | } while (0) |
288 | 291 | ||
289 | #define sched_trace_resource_released(t, i) \ | 292 | #define sched_trace_resource_released(t, i) \ |
290 | do { \ | 293 | do { \ |
diff --git a/litmus/Kconfig b/litmus/Kconfig index e71f7a332f21..1ba3a1368f68 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -39,7 +39,7 @@ config PLUGIN_PFAIR | |||
39 | config MERGE_TIMERS | 39 | config MERGE_TIMERS |
40 | bool "Timer-merging Support" | 40 | bool "Timer-merging Support" |
41 | depends on HIGH_RES_TIMERS | 41 | depends on HIGH_RES_TIMERS |
42 | default y | 42 | default n |
43 | help | 43 | help |
44 | Include support for merging timers. | 44 | Include support for merging timers. |
45 | 45 | ||
diff --git a/litmus/color.c b/litmus/color.c index 26f800e785ae..c9826dd48ec2 100644 --- a/litmus/color.c +++ b/litmus/color.c | |||
@@ -16,7 +16,9 @@ | |||
16 | #include <litmus/color.h> | 16 | #include <litmus/color.h> |
17 | #include <litmus/litmus.h> /* for in_list(...) */ | 17 | #include <litmus/litmus.h> /* for in_list(...) */ |
18 | #include <litmus/clock.h> | 18 | #include <litmus/clock.h> |
19 | #ifndef CONFIG_CACHE_PL310 | ||
19 | #include <litmus/color_queue.h> | 20 | #include <litmus/color_queue.h> |
21 | #endif | ||
20 | #include <litmus/way_tracker.h> | 22 | #include <litmus/way_tracker.h> |
21 | #include <litmus/trace.h> | 23 | #include <litmus/trace.h> |
22 | 24 | ||
@@ -222,7 +224,13 @@ void reclaim_pages(struct vm_area_struct *vma) | |||
222 | int color_sched_in_task(struct task_struct *t) | 224 | int color_sched_in_task(struct task_struct *t) |
223 | { | 225 | { |
224 | color_page_info_take_ways(t); | 226 | color_page_info_take_ways(t); |
227 | |||
228 | #ifdef CONFIG_CACHE_PL310 | ||
229 | return color_cache_task(t); | ||
230 | #else | ||
225 | return color_queue_enqueue_read(t); | 231 | return color_queue_enqueue_read(t); |
232 | #endif | ||
233 | |||
226 | } | 234 | } |
227 | 235 | ||
228 | /* | 236 | /* |
@@ -231,7 +239,12 @@ int color_sched_in_task(struct task_struct *t) | |||
231 | int color_sched_out_task(struct task_struct *t) | 239 | int color_sched_out_task(struct task_struct *t) |
232 | { | 240 | { |
233 | color_page_info_release_ways(t); | 241 | color_page_info_release_ways(t); |
242 | |||
243 | #ifdef CONFIG_CACHE_PL310 | ||
244 | return color_uncache_task(t); | ||
245 | #else | ||
234 | return color_queue_enqueue_flush(t); | 246 | return color_queue_enqueue_flush(t); |
247 | #endif | ||
235 | } | 248 | } |
236 | 249 | ||
237 | asmlinkage long sys_set_color_page_info(struct color_ctrl_page __user *user_color_ctrl) | 250 | asmlinkage long sys_set_color_page_info(struct color_ctrl_page __user *user_color_ctrl) |
diff --git a/litmus/color_queue.c b/litmus/color_queue.c index 0b87217a931a..913b2f317c16 100644 --- a/litmus/color_queue.c +++ b/litmus/color_queue.c | |||
@@ -456,6 +456,40 @@ out_free: | |||
456 | return ret; | 456 | return ret; |
457 | } | 457 | } |
458 | 458 | ||
459 | int color_cache_task(struct task_struct *t) | ||
460 | { | ||
461 | struct color_page_info *info; | ||
462 | int npages = 0; | ||
463 | |||
464 | list_for_each_entry(info, &tsk_rt(t)->color_page_info_list, list) { | ||
465 | u32 lvalue = unlocked_way[info->way]; | ||
466 | color_read_in_mem_lock(lvalue, LOCK_ALL, | ||
467 | info->vaddr, info->vaddr + PAGE_SIZE); | ||
468 | ++npages; | ||
469 | } | ||
470 | |||
471 | return npages; | ||
472 | } | ||
473 | |||
474 | int color_uncache_task(struct task_struct *t) | ||
475 | { | ||
476 | struct color_page_info *info; | ||
477 | int npages = 0; | ||
478 | |||
479 | |||
480 | list_for_each_entry(info, &tsk_rt(t)->color_page_info_list, list) { | ||
481 | u32 lvalue = unlocked_way[info->way]; | ||
482 | void *vaddr = flusher_pages[info->way][info->color]; | ||
483 | |||
484 | color_read_in_mem_lock(lvalue, LOCK_ALL, | ||
485 | vaddr, vaddr + PAGE_SIZE); | ||
486 | |||
487 | ++npages; | ||
488 | } | ||
489 | |||
490 | return npages; | ||
491 | } | ||
492 | |||
459 | static int __init init_color_queue(void) | 493 | static int __init init_color_queue(void) |
460 | { | 494 | { |
461 | struct cpu_entry *cpu_entry; | 495 | struct cpu_entry *cpu_entry; |
diff --git a/litmus/dgl.c b/litmus/dgl.c index f9cede7ef546..3d6d3201928e 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c | |||
@@ -9,12 +9,10 @@ | |||
9 | 9 | ||
10 | #ifdef DEBUG_DGL | 10 | #ifdef DEBUG_DGL |
11 | #define TRACE(fmt, args...) STRACE(fmt, ## args) | 11 | #define TRACE(fmt, args...) STRACE(fmt, ## args) |
12 | #define STRACE2(fmt, args...) STRACE(fmt, ## args) | ||
13 | #define TRACE_GREQ(greq, fmt, args...) \ | 12 | #define TRACE_GREQ(greq, fmt, args...) \ |
14 | TRACE("(greq-%s/%llu) " fmt, (greq->task ? greq->task->comm : "greq"), \ | 13 | TRACE("(greq-%s/%llu) " fmt, (greq->task ? greq->task->comm : "greq"), \ |
15 | (greq->task ? greq->task->pid : (unsigned long long)greq), ## args) | 14 | (greq->task ? greq->task->pid : (unsigned long long)greq), ## args) |
16 | #else | 15 | #else |
17 | #define STRACE2(fmt, args...) printk(KERN_ERR fmt, ## args) | ||
18 | #define TRACE(fmt, args...) | 16 | #define TRACE(fmt, args...) |
19 | #define TRACE_GREQ(greq, fmt, args...) | 17 | #define TRACE_GREQ(greq, fmt, args...) |
20 | #endif | 18 | #endif |
@@ -72,53 +70,62 @@ static int arr_to_bool(struct dgl *dgl, unsigned long *arr) | |||
72 | return (ret != 0); | 70 | return (ret != 0); |
73 | } | 71 | } |
74 | 72 | ||
75 | static void print_queue(struct dgl *dgl, struct list_head *list) | 73 | #ifdef DEBUG_DGL |
74 | #define DEBUG_BUF_LEN 10000 | ||
75 | DEFINE_PER_CPU(char *, debug_buf); | ||
76 | |||
77 | static char* print_queue(char *buf, struct dgl *dgl, struct list_head *list) | ||
76 | { | 78 | { |
77 | struct dgl_req *pos; | 79 | struct dgl_req *pos; |
78 | struct dgl_group_req *greq; | 80 | struct dgl_group_req *greq; |
79 | 81 | ||
80 | list_for_each_entry(pos, list, list) { | 82 | list_for_each_entry(pos, list, list) { |
81 | greq = pos->greq; | 83 | greq = pos->greq; |
82 | sched_trace_log_message("(%s-%d:r%d-p%x-b%x)->", greq->task->comm, | 84 | buf += sprintf(buf, "(%s/%d:r%d-p%x-b%x)->", greq->task->comm, |
83 | greq->task->pid, pos->replicas, | 85 | greq->task->pid, pos->replicas, |
84 | greq->need_prio[0], | 86 | greq->need_prio[0], greq->blocked[0]); |
85 | greq->blocked[0]); | ||
86 | } | 87 | } |
87 | sched_trace_log_message("\n"); | 88 | buf += sprintf(buf, "\n"); |
89 | |||
90 | return buf; | ||
88 | } | 91 | } |
89 | 92 | ||
90 | static void print_resource(struct dgl *dgl, struct dgl_resource *resource) | 93 | static char* print_resource(char *buf, struct dgl *dgl, |
94 | struct dgl_resource *resource) | ||
91 | { | 95 | { |
92 | STRACE2("\tResource %d, real_free: %d, goal_free: %d\n", | 96 | buf += sprintf(buf, "\tResource %d, real_free: %d, goal_free: %d\n", |
93 | resource_id(dgl, resource), | 97 | resource_id(dgl, resource), |
94 | resource->real_free, | 98 | resource->real_free, |
95 | resource->goal_free); | 99 | resource->goal_free); |
96 | STRACE2("\t acquired: "); | 100 | buf += sprintf(buf, "\t acquired:"); |
97 | print_queue(dgl, &resource->acquired); | 101 | buf = print_queue(buf, dgl, &resource->acquired); |
98 | STRACE2("\t will_acquire:"); | 102 | buf += sprintf(buf, "\t will_acquire:"); |
99 | print_queue(dgl, &resource->will_acquire); | 103 | buf = print_queue(buf, dgl, &resource->will_acquire); |
100 | STRACE2("\t waiting:"); | 104 | buf += sprintf(buf, "\t waiting:"); |
101 | print_queue(dgl, &resource->waiting); | 105 | buf = print_queue(buf, dgl, &resource->waiting); |
102 | STRACE2("\t will_wait:"); | 106 | buf += sprintf(buf, "\t will_wait:"); |
103 | print_queue(dgl, &resource->will_wait); | 107 | buf = print_queue(buf, dgl, &resource->will_wait); |
108 | return buf; | ||
104 | } | 109 | } |
105 | 110 | ||
111 | |||
106 | /* | 112 | /* |
107 | * Print stats and queues of every resource to the trace log. | 113 | * Print stats and queues of every resource to the trace log. |
108 | */ | 114 | */ |
109 | static void print_state(struct dgl *dgl) | 115 | static void print_state(struct dgl *dgl) |
110 | { | 116 | { |
111 | int i; | 117 | int i; |
118 | char *buf, *start; | ||
112 | struct dgl_resource *resource; | 119 | struct dgl_resource *resource; |
113 | 120 | ||
114 | sched_trace_log_message("\n"); | 121 | start = __get_cpu_var(debug_buf); |
115 | STRACE2("\t\tDGL: requests: %d\n", dgl->requests); | 122 | buf = start + sprintf(start, "\n\t\tDGL: requests: %d\n", dgl->requests); |
116 | 123 | ||
117 | for (i = 0; i < dgl->num_resources; ++i) { | 124 | for (i = 0; i < dgl->num_resources; ++i) { |
118 | resource = &dgl->resources[i]; | 125 | resource = &dgl->resources[i]; |
119 | 126 | ||
120 | if (!resource) { | 127 | if (!resource) { |
121 | STRACE2("\tResource %d is null!\n", i); | 128 | buf += sprintf(buf, "\tResource %d is null!\n", i); |
122 | } | 129 | } |
123 | 130 | ||
124 | if (!list_empty(&resource->waiting) || | 131 | if (!list_empty(&resource->waiting) || |
@@ -126,14 +133,16 @@ static void print_state(struct dgl *dgl) | |||
126 | !list_empty(&resource->acquired) || | 133 | !list_empty(&resource->acquired) || |
127 | !list_empty(&resource->will_acquire)) { | 134 | !list_empty(&resource->will_acquire)) { |
128 | 135 | ||
129 | print_resource(dgl, resource); | 136 | buf = print_resource(buf, dgl, resource); |
130 | } | 137 | } |
131 | } | 138 | } |
132 | STRACE2("Dump complete\n"); | 139 | |
133 | sched_trace_log_message("\n"); | 140 | buf += sprintf(buf, "Dump complete\n\n"); |
141 | |||
142 | BUG_ON(buf - start > DEBUG_BUF_LEN); | ||
143 | sched_trace_log_message(start); | ||
134 | } | 144 | } |
135 | 145 | ||
136 | #ifdef DEBUG_DGL | ||
137 | #define BUG_DUMP(dgl, cond) \ | 146 | #define BUG_DUMP(dgl, cond) \ |
138 | do { \ | 147 | do { \ |
139 | if (cond) { \ | 148 | if (cond) { \ |
@@ -142,10 +151,9 @@ static void print_state(struct dgl *dgl) | |||
142 | BUG(); \ | 151 | BUG(); \ |
143 | }} while(0) | 152 | }} while(0) |
144 | #else | 153 | #else |
145 | #define BUG_DUMP(dgl, cond) | 154 | #define BUG_DUMP(dgl, cond) BUG_ON(cond) |
146 | #endif | 155 | #endif |
147 | 156 | ||
148 | |||
149 | static int higher_prio(struct dgl_group_req *a, struct dgl_group_req *b) | 157 | static int higher_prio(struct dgl_group_req *a, struct dgl_group_req *b) |
150 | { | 158 | { |
151 | return (a->priority < b->priority || | 159 | return (a->priority < b->priority || |
@@ -223,14 +231,14 @@ static int get_lp_replicas(struct dgl *dgl, struct dgl_group_req *greq, | |||
223 | * Add @req to @list in priority order. | 231 | * Add @req to @list in priority order. |
224 | * @reverse Reverse priority | 232 | * @reverse Reverse priority |
225 | */ | 233 | */ |
226 | static void add_request(struct list_head *list, struct dgl_req *req, | 234 | static void add_request(struct dgl *dgl, struct list_head *list, |
227 | int reverse) | 235 | struct dgl_req *req, int reverse) |
228 | { | 236 | { |
229 | struct list_head *last; | 237 | struct list_head *last; |
230 | struct dgl_req *acquired; | 238 | struct dgl_req *acquired; |
231 | struct dgl_group_req *greqa, *greqb; | 239 | struct dgl_group_req *greqa, *greqb; |
232 | 240 | ||
233 | BUG_ON(in_list(&req->list)); | 241 | BUG_DUMP(dgl, in_list(&req->list)); |
234 | 242 | ||
235 | last = list; | 243 | last = list; |
236 | list_for_each_entry(acquired, list, list) { | 244 | list_for_each_entry(acquired, list, list) { |
@@ -255,7 +263,7 @@ static void add_request(struct list_head *list, struct dgl_req *req, | |||
255 | static void add_waiting(struct dgl *dgl, struct list_head *list, struct dgl_req *req) | 263 | static void add_waiting(struct dgl *dgl, struct list_head *list, struct dgl_req *req) |
256 | { | 264 | { |
257 | BUG_DUMP(dgl, !arr_to_bool(dgl, req->greq->need_prio)); | 265 | BUG_DUMP(dgl, !arr_to_bool(dgl, req->greq->need_prio)); |
258 | add_request(list, req, 0); | 266 | add_request(dgl, list, req, 0); |
259 | } | 267 | } |
260 | 268 | ||
261 | /* | 269 | /* |
@@ -264,7 +272,7 @@ static void add_waiting(struct dgl *dgl, struct list_head *list, struct dgl_req | |||
264 | static void add_acquired(struct dgl *dgl, struct list_head *list, struct dgl_req *req) | 272 | static void add_acquired(struct dgl *dgl, struct list_head *list, struct dgl_req *req) |
265 | { | 273 | { |
266 | BUG_DUMP(dgl, arr_to_bool(dgl, req->greq->need_prio)); | 274 | BUG_DUMP(dgl, arr_to_bool(dgl, req->greq->need_prio)); |
267 | add_request(list, req, 1); | 275 | add_request(dgl, list, req, 1); |
268 | } | 276 | } |
269 | 277 | ||
270 | /* | 278 | /* |
@@ -888,3 +896,21 @@ void dgl_group_req_free(struct dgl_group_req *greq) | |||
888 | kfree(greq->need_prio); | 896 | kfree(greq->need_prio); |
889 | kfree(greq->requests); | 897 | kfree(greq->requests); |
890 | } | 898 | } |
899 | |||
900 | #ifdef DEBUG_DGL | ||
901 | static int __init init_dgl_debug(void) | ||
902 | { | ||
903 | int cpu; | ||
904 | char **buf; | ||
905 | |||
906 | for_each_online_cpu(cpu) { | ||
907 | buf = &per_cpu(debug_buf, cpu); | ||
908 | *buf = kmalloc(DEBUG_BUF_LEN * sizeof(char), GFP_ATOMIC); | ||
909 | } | ||
910 | |||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | module_init(init_dgl_debug); | ||
915 | |||
916 | #endif | ||
diff --git a/litmus/lockdown.c b/litmus/lockdown.c index bb93690c00c9..09712554c5b9 100644 --- a/litmus/lockdown.c +++ b/litmus/lockdown.c | |||
@@ -77,6 +77,7 @@ void litmus_setup_lockdown(void __iomem *base, u32 id) | |||
77 | #else | 77 | #else |
78 | 78 | ||
79 | static void __iomem *cache_base; | 79 | static void __iomem *cache_base; |
80 | |||
80 | static void __iomem *lockreg_d; | 81 | static void __iomem *lockreg_d; |
81 | static void __iomem *lockreg_i; | 82 | static void __iomem *lockreg_i; |
82 | static raw_spinlock_t prefetch_lock; | 83 | static raw_spinlock_t prefetch_lock; |
@@ -113,7 +114,11 @@ u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end) | |||
113 | : [addr] "+r" (start), | 114 | : [addr] "+r" (start), |
114 | [val] "+r" (v) | 115 | [val] "+r" (v) |
115 | : [end] "r" (end), | 116 | : [end] "r" (end), |
117 | #ifdef CONFIG_CACHE_PL310 | ||
118 | [cachereg] "r" (ld_d_reg(raw_smp_processor_id())), | ||
119 | #else | ||
116 | [cachereg] "r" (lockreg_d), | 120 | [cachereg] "r" (lockreg_d), |
121 | #endif | ||
117 | [lockval] "r" (lock_val) | 122 | [lockval] "r" (lock_val) |
118 | : "cc"); | 123 | : "cc"); |
119 | 124 | ||
@@ -137,7 +142,10 @@ u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end) | |||
137 | unsigned long flags; | 142 | unsigned long flags; |
138 | u32 v = 0; | 143 | u32 v = 0; |
139 | 144 | ||
145 | #ifndef CONFIG_CACHE_PL310 | ||
140 | raw_spin_lock_irqsave(&prefetch_lock, flags); | 146 | raw_spin_lock_irqsave(&prefetch_lock, flags); |
147 | #endif | ||
148 | |||
141 | __asm__ __volatile__ ( | 149 | __asm__ __volatile__ ( |
142 | " .align 5\n" | 150 | " .align 5\n" |
143 | " str %[lockval], [%[cachereg]]\n" | 151 | " str %[lockval], [%[cachereg]]\n" |
@@ -148,11 +156,18 @@ u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end) | |||
148 | : [addr] "+r" (start), | 156 | : [addr] "+r" (start), |
149 | [val] "+r" (v) | 157 | [val] "+r" (v) |
150 | : [end] "r" (end), | 158 | : [end] "r" (end), |
159 | #ifdef CONFIG_CACHE_PL310 | ||
160 | [cachereg] "r" (ld_d_reg(raw_smp_processor_id())), | ||
161 | #else | ||
151 | [cachereg] "r" (lockreg_d), | 162 | [cachereg] "r" (lockreg_d), |
163 | #endif | ||
152 | [lockval] "r" (lock_val), | 164 | [lockval] "r" (lock_val), |
153 | [unlockval] "r" (unlock_val) | 165 | [unlockval] "r" (unlock_val) |
154 | : "cc"); | 166 | : "cc"); |
167 | |||
168 | #ifndef CONFIG_CACHE_PL310 | ||
155 | raw_spin_unlock_irqrestore(&prefetch_lock, flags); | 169 | raw_spin_unlock_irqrestore(&prefetch_lock, flags); |
170 | #endif | ||
156 | 171 | ||
157 | return v; | 172 | return v; |
158 | } | 173 | } |
@@ -330,9 +345,16 @@ int litmus_lockdown_proc_handler(struct ctl_table *table, int write, | |||
330 | 345 | ||
331 | #define TRIALS 1000 | 346 | #define TRIALS 1000 |
332 | 347 | ||
348 | static void sleep_ns(int ns) | ||
349 | { | ||
350 | int i; | ||
351 | lt_t start = litmus_clock(); | ||
352 | for (i = 0; litmus_clock() - start < ns;i++); | ||
353 | } | ||
354 | |||
333 | static int test_get_cycles_overhead(void) | 355 | static int test_get_cycles_overhead(void) |
334 | { | 356 | { |
335 | u64 sum = 0, min = (u64)-1, max = 0; | 357 | u64 avg = 0, min = (u64)-1, max = 0; |
336 | unsigned long flags; | 358 | unsigned long flags; |
337 | cycles_t a, b; | 359 | cycles_t a, b; |
338 | int i; | 360 | int i; |
@@ -342,6 +364,7 @@ static int test_get_cycles_overhead(void) | |||
342 | local_irq_save(flags); | 364 | local_irq_save(flags); |
343 | preempt_disable(); | 365 | preempt_disable(); |
344 | a = litmus_get_cycles(); | 366 | a = litmus_get_cycles(); |
367 | sleep_ns(15000); | ||
345 | b = litmus_get_cycles(); | 368 | b = litmus_get_cycles(); |
346 | preempt_enable(); | 369 | preempt_enable(); |
347 | local_irq_restore(flags); | 370 | local_irq_restore(flags); |
@@ -350,10 +373,11 @@ static int test_get_cycles_overhead(void) | |||
350 | max = diff; | 373 | max = diff; |
351 | if (diff < min) | 374 | if (diff < min) |
352 | min = diff; | 375 | min = diff; |
353 | sum += diff; | 376 | avg += div64_u64(diff, TRIALS); |
354 | } | 377 | } |
355 | printk("cycle test: avg: %llu min: %llu max: %llu\n", | 378 | printk("cycle test 15us: avg: %llu min: %llu max: %llu\n", |
356 | div64_u64(sum, TRIALS), min, max); | 379 | avg, min, max); |
380 | |||
357 | return 0; | 381 | return 0; |
358 | } | 382 | } |
359 | 383 | ||
@@ -661,7 +685,7 @@ int litmus_test_prefetch_proc_handler(struct ctl_table *table, int write, | |||
661 | 685 | ||
662 | free_page((unsigned long)vaddr); | 686 | free_page((unsigned long)vaddr); |
663 | 687 | ||
664 | //test_get_cycles_overhead(); | 688 | test_get_cycles_overhead(); |
665 | test_read_in(); | 689 | test_read_in(); |
666 | 690 | ||
667 | return 0; | 691 | return 0; |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 64de4ef9c466..6edf86935a29 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <litmus/dgl.h> | 30 | #include <litmus/dgl.h> |
31 | #include <litmus/color.h> | 31 | #include <litmus/color.h> |
32 | #include <litmus/way_tracker.h> | 32 | #include <litmus/way_tracker.h> |
33 | 33 | #warning "MUST ADD CHECK FOR MAX WAYS" | |
34 | struct mc_signal { | 34 | struct mc_signal { |
35 | int update:1; | 35 | int update:1; |
36 | int preempt:1; | 36 | int preempt:1; |
@@ -58,6 +58,10 @@ struct cpu_entry { | |||
58 | #endif | 58 | #endif |
59 | }; | 59 | }; |
60 | 60 | ||
61 | #ifndef CONFIG_NP_SECTION | ||
62 | #error "Need NP section for coloring!" | ||
63 | #endif | ||
64 | |||
61 | static struct dgl group_lock; | 65 | static struct dgl group_lock; |
62 | static raw_spinlock_t dgl_lock; | 66 | static raw_spinlock_t dgl_lock; |
63 | 67 | ||
@@ -97,7 +101,6 @@ static int acquire_resources(struct task_struct *t) | |||
97 | 101 | ||
98 | BUG_ON(tsk_rt(t)->linked_on == NO_CPU); | 102 | BUG_ON(tsk_rt(t)->linked_on == NO_CPU); |
99 | 103 | ||
100 | |||
101 | raw_spin_lock(&dgl_lock); | 104 | raw_spin_lock(&dgl_lock); |
102 | 105 | ||
103 | cpu = tsk_rt(t)->linked_on; | 106 | cpu = tsk_rt(t)->linked_on; |
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c index f4171fddbbb1..5acb4a48fb6b 100644 --- a/litmus/sched_trace.c +++ b/litmus/sched_trace.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | /* Max length for one write --- by TRACE() --- to the buffer. This is used to | 27 | /* Max length for one write --- by TRACE() --- to the buffer. This is used to |
28 | * allocate a per-cpu buffer for printf() formatting. */ | 28 | * allocate a per-cpu buffer for printf() formatting. */ |
29 | #define MSG_SIZE 255 | 29 | #define MSG_SIZE 10000 |
30 | 30 | ||
31 | 31 | ||
32 | static DEFINE_MUTEX(reader_mutex); | 32 | static DEFINE_MUTEX(reader_mutex); |
@@ -35,7 +35,7 @@ static DEFINE_KFIFO(debug_buffer, char, LITMUS_TRACE_BUF_SIZE); | |||
35 | 35 | ||
36 | 36 | ||
37 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); | 37 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); |
38 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | 38 | static DEFINE_PER_CPU(char*, fmt_buffer); |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * sched_trace_log_message - Write to the trace buffer (log_buffer) | 41 | * sched_trace_log_message - Write to the trace buffer (log_buffer) |
@@ -230,8 +230,17 @@ static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | |||
230 | 230 | ||
231 | static int __init init_sched_trace(void) | 231 | static int __init init_sched_trace(void) |
232 | { | 232 | { |
233 | int cpu; | ||
234 | char **buf; | ||
235 | |||
236 | |||
233 | printk("Initializing TRACE() device\n"); | 237 | printk("Initializing TRACE() device\n"); |
234 | 238 | ||
239 | for_each_online_cpu(cpu) { | ||
240 | buf = &per_cpu(fmt_buffer, cpu); | ||
241 | *buf = kmalloc(MSG_SIZE * sizeof(char), GFP_ATOMIC); | ||
242 | } | ||
243 | |||
235 | #ifdef CONFIG_MAGIC_SYSRQ | 244 | #ifdef CONFIG_MAGIC_SYSRQ |
236 | /* offer some debugging help */ | 245 | /* offer some debugging help */ |
237 | if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) | 246 | if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) |
diff --git a/litmus/way_tracker.c b/litmus/way_tracker.c index a9e8ef62f37e..ff392ab09c4d 100644 --- a/litmus/way_tracker.c +++ b/litmus/way_tracker.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/bitops.h> | 6 | #include <linux/bitops.h> |
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/list.h> | 8 | #include <linux/list.h> |
9 | #include <linux/kgdb.h> | ||
9 | 10 | ||
10 | #include <litmus/litmus.h> | 11 | #include <litmus/litmus.h> |
11 | #include <litmus/color.h> | 12 | #include <litmus/color.h> |
@@ -38,7 +39,9 @@ static int take_next_way(unsigned int color) | |||
38 | clear_bit(idx, &ways[color]); | 39 | clear_bit(idx, &ways[color]); |
39 | TRACE("Took, now %lu free of color %d\n", hweight_long(ways[color]), color); | 40 | TRACE("Took, now %lu free of color %d\n", hweight_long(ways[color]), color); |
40 | } else { | 41 | } else { |
42 | printk(KERN_WARNING "Vury bad\n"); | ||
41 | /* Seriously bad. */ | 43 | /* Seriously bad. */ |
44 | kgdb_breakpoint(); | ||
42 | BUG(); | 45 | BUG(); |
43 | } | 46 | } |
44 | raw_spin_unlock(&lock); | 47 | raw_spin_unlock(&lock); |