aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-05-13 16:15:33 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-05-13 16:15:33 -0400
commit2d8f8176515f2516b9a0b85642a7b842eb53552b (patch)
tree0a434e9f3e5af613802db75f6811dd6fb1122f95
parent1fde4dd4de048d7fbfe3e1418f4a76c62423ad95 (diff)
Can disable nonpreemptivity
-rw-r--r--include/litmus/litmus.h18
-rw-r--r--litmus/Kconfig2
-rw-r--r--litmus/dgl.c4
-rw-r--r--litmus/sched_color.c22
4 files changed, 37 insertions, 9 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index f0ddb89e68dd..9fcf1f45820f 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -169,7 +169,7 @@ static inline void request_exit_np(struct task_struct *t)
169 169
170static inline void make_np(struct task_struct *t) 170static inline void make_np(struct task_struct *t)
171{ 171{
172 tsk_rt(t)->kernel_np++; 172 tsk_rt(t)->kernel_np = 1;
173} 173}
174 174
175/* Caller should check if preemption is necessary when 175/* Caller should check if preemption is necessary when
@@ -177,7 +177,7 @@ static inline void make_np(struct task_struct *t)
177 */ 177 */
178static inline int take_np(struct task_struct *t) 178static inline int take_np(struct task_struct *t)
179{ 179{
180 return --tsk_rt(t)->kernel_np; 180 return tsk_rt(t)->kernel_np = 0;
181} 181}
182 182
183/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */ 183/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
@@ -210,6 +210,20 @@ static inline int request_exit_np_atomic(struct task_struct *t)
210 210
211#else 211#else
212 212
213
214static inline void make_np(struct task_struct *t)
215{
216
217}
218
219/* Caller should check if preemption is necessary when
220 * the function return 0.
221 */
222static inline int take_np(struct task_struct *t)
223{
224 return 0;
225}
226
213static inline int is_kernel_np(struct task_struct* t) 227static inline int is_kernel_np(struct task_struct* t)
214{ 228{
215 return 0; 229 return 0;
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 272b64c30ba3..f0c48a6a3efb 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -14,7 +14,6 @@ config PLUGIN_CEDF
14 14
15config PLUGIN_COLOR 15config PLUGIN_COLOR
16 bool "Scheduling with Colors" 16 bool "Scheduling with Colors"
17 depends on NP_SECTION
18 default y 17 default y
19 help 18 help
20 Include the scheduling with colors scheduler. 19 Include the scheduling with colors scheduler.
@@ -65,7 +64,6 @@ config NP_SECTION
65 64
66config LITMUS_LOCKING 65config LITMUS_LOCKING
67 bool "Support for real-time locking protocols" 66 bool "Support for real-time locking protocols"
68 depends on NP_SECTION
69 default n 67 default n
70 help 68 help
71 Enable LITMUS^RT's deterministic multiprocessor real-time 69 Enable LITMUS^RT's deterministic multiprocessor real-time
diff --git a/litmus/dgl.c b/litmus/dgl.c
index 2df27b48fcdf..0c1ce73868e3 100644
--- a/litmus/dgl.c
+++ b/litmus/dgl.c
@@ -126,6 +126,10 @@ void set_req(struct dgl *dgl, struct dgl_group_req *greq,
126 126
127 BUG_ON(replicas > dgl->num_replicas); 127 BUG_ON(replicas > dgl->num_replicas);
128 128
129#ifndef CONFIG_NP_SECTION
130 BUG_ON(1);
131#endif
132
129 mask_idx(resource, &word, &bit); 133 mask_idx(resource, &word, &bit);
130 __set_bit(bit, &greq->requested[word]); 134 __set_bit(bit, &greq->requested[word]);
131 135
diff --git a/litmus/sched_color.c b/litmus/sched_color.c
index 29b2be97da52..8554fde49c0b 100644
--- a/litmus/sched_color.c
+++ b/litmus/sched_color.c
@@ -57,11 +57,19 @@ static raw_spinlock_t dgl_lock;
57#define task_fserver(task) (&task_entry(task)->fifo_server.server) 57#define task_fserver(task) (&task_entry(task)->fifo_server.server)
58#define entry_lock(entry) (&entry->rm_domain.ready_lock) 58#define entry_lock(entry) (&entry->rm_domain.ready_lock)
59 59
60#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) 60
61#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain) 61#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->rm_domain)
62#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) 62#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry))
63#define is_fifo_server(s) (s->sid > num_online_cpus()) 63#define is_fifo_server(s) (s->sid > num_online_cpus())
64 64
65#ifdef CONFIG_NP_SECTION
66#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c])
67#else
68#define has_resources(t, c) (1)
69#endif
70
71
72
65/* 73/*
66 * Requeue onto domain's release or ready queue based on task state. 74 * Requeue onto domain's release or ready queue based on task state.
67 */ 75 */
@@ -90,6 +98,7 @@ static void requeue(rt_domain_t *dom, struct task_struct* t)
90static void release_resources(struct task_struct *t) 98static void release_resources(struct task_struct *t)
91{ 99{
92 struct task_struct *sched; 100 struct task_struct *sched;
101#ifdef CONFIG_NP_SECTION
93 102
94 TRACE_TASK(t, "Releasing resources\n"); 103 TRACE_TASK(t, "Releasing resources\n");
95 104
@@ -99,7 +108,8 @@ static void release_resources(struct task_struct *t)
99 release_resources(sched); 108 release_resources(sched);
100 } else if (is_kernel_np(t)) 109 } else if (is_kernel_np(t))
101 remove_group_req(&group_lock, tsk_rt(t)->req); 110 remove_group_req(&group_lock, tsk_rt(t)->req);
102 tsk_rt(t)->kernel_np = 0; 111 take_np(t);
112#endif
103} 113}
104 114
105/* 115/*
@@ -112,6 +122,7 @@ static void acquire_resources(struct task_struct *t)
112 struct rt_server *server; 122 struct rt_server *server;
113 struct task_struct *sched; 123 struct task_struct *sched;
114 124
125#ifdef CONFIG_NP_SECTION
115 /* Can't acquire resources if t is not running */ 126 /* Can't acquire resources if t is not running */
116 BUG_ON(!get_task_server(t)); 127 BUG_ON(!get_task_server(t));
117 128
@@ -134,17 +145,18 @@ static void acquire_resources(struct task_struct *t)
134 /* Become np if there is a running task */ 145 /* Become np if there is a running task */
135 if (sched && has_resources(sched, cpu)) { 146 if (sched && has_resources(sched, cpu)) {
136 TRACE_TASK(t, "Running task with resource\n"); 147 TRACE_TASK(t, "Running task with resource\n");
137 tsk_rt(t)->kernel_np = 1; 148 make_np(t);
138 } else { 149 } else {
139 TRACE_TASK(t, "Running no resources\n"); 150 TRACE_TASK(t, "Running no resources\n");
140 tsk_rt(t)->kernel_np = 0; 151 take_np(t);
141 } 152 }
142 } else { 153 } else {
143 TRACE_TASK(t, "Acquiring resources\n"); 154 TRACE_TASK(t, "Acquiring resources\n");
144 if (!has_resources(t, cpu)) 155 if (!has_resources(t, cpu))
145 add_group_req(&group_lock, tsk_rt(t)->req, cpu); 156 add_group_req(&group_lock, tsk_rt(t)->req, cpu);
146 tsk_rt(t)->kernel_np = 1; 157 make_np(t);
147 } 158 }
159#endif
148} 160}
149 161
150/* 162/*