aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristopher Kenna <cjk@cs.unc.edu>2012-10-22 16:17:48 -0400
committerChristopher Kenna <cjk@cs.unc.edu>2012-10-22 16:19:29 -0400
commit2b4b7e1362910eda268ea25124c83064119a522c (patch)
tree64927e655e3d6121fbfd9c8449c1f489a54ce7e0
parentf47cc33e99cec21a323d751abc4a38afb7ef5da2 (diff)
Bugfixed for ARM systems.
Compile with CONFIG_NP_SECTION=y Signed-off-by: Christopher Kenna <cjk@cs.unc.edu>
-rw-r--r--include/litmus/clock.h2
-rw-r--r--litmus/color.c1
-rw-r--r--litmus/dgl.c13
-rw-r--r--litmus/lockdown.c3
-rw-r--r--litmus/sched_mc.c32
5 files changed, 28 insertions, 23 deletions
diff --git a/include/litmus/clock.h b/include/litmus/clock.h
index d6f1cfd2ca60..9b285db7f60c 100644
--- a/include/litmus/clock.h
+++ b/include/litmus/clock.h
@@ -24,6 +24,8 @@ static inline s64 litmus_cycles_to_ns(cycles_t cycles)
24 24
25#elif defined(CONFIG_CPU_V7) && !defined(CONFIG_HW_PERF_EVENTS) 25#elif defined(CONFIG_CPU_V7) && !defined(CONFIG_HW_PERF_EVENTS)
26 26
27#include <asm/timex.h>
28
27static inline cycles_t v7_get_cycles (void) 29static inline cycles_t v7_get_cycles (void)
28{ 30{
29 u32 value; 31 u32 value;
diff --git a/litmus/color.c b/litmus/color.c
index 8b160258e81d..2ba67f1dc06f 100644
--- a/litmus/color.c
+++ b/litmus/color.c
@@ -18,6 +18,7 @@
18#include <litmus/clock.h> 18#include <litmus/clock.h>
19#include <litmus/color_queue.h> 19#include <litmus/color_queue.h>
20#include <litmus/way_tracker.h> 20#include <litmus/way_tracker.h>
21#include <litmus/trace.h>
21 22
22#define PAGES_PER_COLOR 1024 23#define PAGES_PER_COLOR 1024
23 24
diff --git a/litmus/dgl.c b/litmus/dgl.c
index 85250676572d..f6580a9234dc 100644
--- a/litmus/dgl.c
+++ b/litmus/dgl.c
@@ -114,9 +114,13 @@ static void print_state(struct dgl *dgl)
114 sched_trace_log_message("\n"); 114 sched_trace_log_message("\n");
115 STRACE2("\t\tDGL: requests: %d\n", dgl->requests); 115 STRACE2("\t\tDGL: requests: %d\n", dgl->requests);
116 116
117 for (i = 0; i < dgl->num_resources; i++) { 117 for (i = 0; i < dgl->num_resources; ++i) {
118 resource = &dgl->resources[i]; 118 resource = &dgl->resources[i];
119 119
120 if (!resource) {
121 STRACE2("\tResource %d is null!\n", i);
122 }
123
120 if (!list_empty(&resource->waiting) || 124 if (!list_empty(&resource->waiting) ||
121 !list_empty(&resource->will_wait) || 125 !list_empty(&resource->will_wait) ||
122 !list_empty(&resource->acquired) || 126 !list_empty(&resource->acquired) ||
@@ -125,6 +129,7 @@ static void print_state(struct dgl *dgl)
125 print_resource(dgl, resource); 129 print_resource(dgl, resource);
126 } 130 }
127 } 131 }
132 STRACE2("Dump complete\n");
128 sched_trace_log_message("\n"); 133 sched_trace_log_message("\n");
129} 134}
130 135
@@ -132,7 +137,7 @@ static void print_state(struct dgl *dgl)
132#define BUG_DUMP(dgl, cond) \ 137#define BUG_DUMP(dgl, cond) \
133 do { \ 138 do { \
134 if (cond) { \ 139 if (cond) { \
135 TRACE("BAD: %s", #cond); \ 140 STRACE2("BAD: %s", #cond); \
136 print_state(dgl); \ 141 print_state(dgl); \
137 BUG(); \ 142 BUG(); \
138 }} while(0) 143 }} while(0)
@@ -694,8 +699,6 @@ void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu)
694 } 699 }
695 } 700 }
696 } 701 }
697
698 print_state(dgl);
699} 702}
700 703
701/** 704/**
@@ -715,7 +718,6 @@ void update_group_req(struct dgl *dgl, struct dgl_group_req *greq)
715 718
716 BUG_DUMP(dgl, arr_to_bool(dgl, greq->blocked) && 719 BUG_DUMP(dgl, arr_to_bool(dgl, greq->blocked) &&
717 dgl->acquired[greq->cpu] == greq); 720 dgl->acquired[greq->cpu] == greq);
718 print_state(dgl);
719} 721}
720 722
721/** 723/**
@@ -773,7 +775,6 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq)
773 } 775 }
774 776
775 --dgl->requests; 777 --dgl->requests;
776 print_state(dgl);
777} 778}
778 779
779/** 780/**
diff --git a/litmus/lockdown.c b/litmus/lockdown.c
index bd24b5839bc9..d0dee3ae2d83 100644
--- a/litmus/lockdown.c
+++ b/litmus/lockdown.c
@@ -23,6 +23,9 @@
23#include <litmus/debug_trace.h> 23#include <litmus/debug_trace.h>
24#include <litmus/lockdown.h> 24#include <litmus/lockdown.h>
25 25
26#ifndef litmus_cycles_to_ns
27#define litmus_cycles_to_ns(x) 0
28#endif
26 29
27#define MAX_NR_WAYS 16 30#define MAX_NR_WAYS 16
28 31
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index c8677f688f7c..2d0e8da92377 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -62,11 +62,7 @@ static raw_spinlock_t dgl_lock;
62 62
63DEFINE_PER_CPU(struct cpu_entry, cpus); 63DEFINE_PER_CPU(struct cpu_entry, cpus);
64static int interrupt_cpu; 64static int interrupt_cpu;
65#ifdef CONFIG_NP_SECTION
66#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) 65#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c])
67#else
68#define has_resources(t, c) (1)
69#endif
70 66
71#define domain_data(dom) (container_of(dom, struct domain_data, domain)) 67#define domain_data(dom) (container_of(dom, struct domain_data, domain))
72#define is_global(dom) (domain_data(dom)->heap) 68#define is_global(dom) (domain_data(dom)->heap)
@@ -211,7 +207,7 @@ static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
211 * Update crit entry position in a global heap. Caller must hold 207 * Update crit entry position in a global heap. Caller must hold
212 * @ce's domain lock. 208 * @ce's domain lock.
213 */ 209 */
214static inline void update_crit_position(struct crit_entry *ce) 210static void update_crit_position(struct crit_entry *ce)
215{ 211{
216 struct bheap *heap; 212 struct bheap *heap;
217 if (is_global(ce->domain)) { 213 if (is_global(ce->domain)) {
@@ -240,7 +236,7 @@ static void fix_crit_position(struct crit_entry *ce)
240 * Return next CPU which should preempted or NULL if the domain has no 236 * Return next CPU which should preempted or NULL if the domain has no
241 * preemptable CPUs. Caller must hold the @dom lock. 237 * preemptable CPUs. Caller must hold the @dom lock.
242 */ 238 */
243static inline struct crit_entry* lowest_prio_cpu(struct domain *dom) 239static struct crit_entry* lowest_prio_cpu(struct domain *dom)
244{ 240{
245 struct bheap *heap = domain_data(dom)->heap; 241 struct bheap *heap = domain_data(dom)->heap;
246 struct bheap_node* hn; 242 struct bheap_node* hn;
@@ -1003,6 +999,8 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
1003 dgl_group_req_init(&group_lock, req); 999 dgl_group_req_init(&group_lock, req);
1004 for (i = 0; ccp->pages[i]; ++i) 1000 for (i = 0; ccp->pages[i]; ++i)
1005 set_req(&group_lock, req, ccp->colors[i], ccp->pages[i]); 1001 set_req(&group_lock, req, ccp->colors[i], ccp->pages[i]);
1002 } else {
1003 BUG_ON(CRIT_LEVEL_B == tsk_mc_crit(t));
1006 } 1004 }
1007 1005
1008 /* Setup job params */ 1006 /* Setup job params */
@@ -1108,12 +1106,12 @@ static long mc_admit_task(struct task_struct* task)
1108 ret = -EINVAL; 1106 ret = -EINVAL;
1109 goto out; 1107 goto out;
1110 } 1108 }
1111 if (crit < CRIT_LEVEL_C && get_partition(task) == interrupt_cpu) { 1109 /* if (crit < CRIT_LEVEL_C && get_partition(task) == interrupt_cpu) { */
1112 printk(KERN_WARNING "Tried to admit partitioned task on " 1110 /* printk(KERN_WARNING "Tried to admit partitioned task on " */
1113 "the interrupt master\n"); 1111 /* "the interrupt master\n"); */
1114 ret = -EINVAL; 1112 /* ret = -EINVAL; */
1115 goto out; 1113 /* goto out; */
1116 } 1114 /* } */
1117 if (crit == CRIT_LEVEL_A) { 1115 if (crit == CRIT_LEVEL_A) {
1118 ret = mc_ce_admit_task_common(task); 1116 ret = mc_ce_admit_task_common(task);
1119 if (ret) 1117 if (ret)
@@ -1529,8 +1527,8 @@ static void mc_release_ts(lt_t time)
1529 level = CRIT_LEVEL_A; 1527 level = CRIT_LEVEL_A;
1530 strcpy(name, "LVL-A"); 1528 strcpy(name, "LVL-A");
1531 for_each_online_cpu(cpu) { 1529 for_each_online_cpu(cpu) {
1532 if (cpu == interrupt_cpu) 1530 /* if (cpu == interrupt_cpu) */
1533 continue; 1531 /* continue; */
1534 entry = &per_cpu(cpus, cpu); 1532 entry = &per_cpu(cpus, cpu);
1535 sched_trace_container_param(++cont_id, (const char*)&name); 1533 sched_trace_container_param(++cont_id, (const char*)&name);
1536 ce = &entry->crit_entries[level]; 1534 ce = &entry->crit_entries[level];
@@ -1541,8 +1539,8 @@ static void mc_release_ts(lt_t time)
1541 level = CRIT_LEVEL_B; 1539 level = CRIT_LEVEL_B;
1542 strcpy(name, "LVL-B"); 1540 strcpy(name, "LVL-B");
1543 for_each_online_cpu(cpu) { 1541 for_each_online_cpu(cpu) {
1544 if (cpu == interrupt_cpu) 1542 /* if (cpu == interrupt_cpu) */
1545 continue; 1543 /* continue; */
1546 entry = &per_cpu(cpus, cpu); 1544 entry = &per_cpu(cpus, cpu);
1547 sched_trace_container_param(++cont_id, (const char*)&name); 1545 sched_trace_container_param(++cont_id, (const char*)&name);
1548 ce = &entry->crit_entries[level]; 1546 ce = &entry->crit_entries[level];
@@ -1631,7 +1629,7 @@ static void init_global_domain(struct domain_data *dom_data, enum crit_level lev
1631 } 1629 }
1632} 1630}
1633 1631
1634static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, 1632static void init_edf_domain(struct domain *dom, rt_domain_t *rt,
1635 enum crit_level prio, int is_partitioned, int cpu) 1633 enum crit_level prio, int is_partitioned, int cpu)
1636{ 1634{
1637 pd_domain_init(dom, rt, edf_ready_order, NULL, 1635 pd_domain_init(dom, rt, edf_ready_order, NULL,