aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2009-12-17 21:23:36 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:05:45 -0400
commit4b38febbd59fd33542a343991262119eb9860f5e (patch)
tree1af88a0d354abe344c2c2869631f76a1806d75c3 /litmus
parent22763c5cf3690a681551162c15d34d935308c8d7 (diff)
[ported from 2008.3] Core LITMUS^RT infrastructure
Port 2008.3 Core LITMUS^RT infrastructure to Linux 2.6.32 litmus_sched_class implements 4 new methods: - prio_changed: void - switched_to: void - get_rr_interval: return infinity (i.e., 0) - select_task_rq: return current cpu
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Kconfig50
-rw-r--r--litmus/Makefile12
-rw-r--r--litmus/ft_event.c43
-rw-r--r--litmus/heap.c314
-rw-r--r--litmus/jobs.c43
-rw-r--r--litmus/litmus.c654
-rw-r--r--litmus/sched_litmus.c275
-rw-r--r--litmus/sched_plugin.c199
8 files changed, 1590 insertions, 0 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
new file mode 100644
index 000000000000..f8c642658a2f
--- /dev/null
+++ b/litmus/Kconfig
@@ -0,0 +1,50 @@
1menu "LITMUS^RT"
2
3menu "Tracing"
4
5config FEATHER_TRACE
6 bool "Feather-Trace Infrastructure"
7 default y
8 help
9 Feather-Trace basic tracing infrastructure. Includes device file
10 driver and instrumentation point support.
11
12
13config SCHED_TASK_TRACE
14 bool "Trace real-time tasks"
15 depends on FEATHER_TRACE
16 default y
17 help
18 Include support for the sched_trace_XXX() tracing functions. This
19 allows the collection of real-time task events such as job
20 completions, job releases, early completions, etc. This results in a
21 small overhead in the scheduling code. Disable if the overhead is not
22 acceptable (e.g., benchmarking).
23
24 Say Yes for debugging.
25 Say No for overhead tracing.
26
27config SCHED_OVERHEAD_TRACE
28 bool "Record timestamps for overhead measurements"
29 depends on FEATHER_TRACE
30 default n
31 help
32 Export event stream for overhead tracing.
33 Say Yes for overhead tracing.
34
35config SCHED_DEBUG_TRACE
36 bool "TRACE() debugging"
37 default y
38 help
39 Include support for sched_trace_log_messageg(), which is used to
40 implement TRACE(). If disabled, no TRACE() messages will be included
41 in the kernel, and no overheads due to debugging statements will be
42 incurred by the scheduler. Disable if the overhead is not acceptable
43 (e.g. benchmarking).
44
45 Say Yes for debugging.
46 Say No for overhead tracing.
47
48endmenu
49
50endmenu
diff --git a/litmus/Makefile b/litmus/Makefile
new file mode 100644
index 000000000000..f4c2d564cd0b
--- /dev/null
+++ b/litmus/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for LITMUS^RT
3#
4
5obj-y = sched_plugin.o litmus.o \
6 jobs.o \
7 heap.o
8
9obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
10obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
11obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
12obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
diff --git a/litmus/ft_event.c b/litmus/ft_event.c
new file mode 100644
index 000000000000..6084b6d6b364
--- /dev/null
+++ b/litmus/ft_event.c
@@ -0,0 +1,43 @@
1#include <linux/types.h>
2
3#include <litmus/feather_trace.h>
4
5#ifndef __ARCH_HAS_FEATHER_TRACE
6/* provide dummy implementation */
7
8int ft_events[MAX_EVENTS];
9
10int ft_enable_event(unsigned long id)
11{
12 if (id < MAX_EVENTS) {
13 ft_events[id]++;
14 return 1;
15 } else
16 return 0;
17}
18
19int ft_disable_event(unsigned long id)
20{
21 if (id < MAX_EVENTS && ft_events[id]) {
22 ft_events[id]--;
23 return 1;
24 } else
25 return 0;
26}
27
28int ft_disable_all_events(void)
29{
30 int i;
31
32 for (i = 0; i < MAX_EVENTS; i++)
33 ft_events[i] = 0;
34
35 return MAX_EVENTS;
36}
37
38int ft_is_event_enabled(unsigned long id)
39{
40 return id < MAX_EVENTS && ft_events[id];
41}
42
43#endif
diff --git a/litmus/heap.c b/litmus/heap.c
new file mode 100644
index 000000000000..112d14da46c3
--- /dev/null
+++ b/litmus/heap.c
@@ -0,0 +1,314 @@
1#include "linux/kernel.h"
2#include "litmus/heap.h"
3
4void heap_init(struct heap* heap)
5{
6 heap->head = NULL;
7 heap->min = NULL;
8}
9
10void heap_node_init(struct heap_node** _h, void* value)
11{
12 struct heap_node* h = *_h;
13 h->parent = NULL;
14 h->next = NULL;
15 h->child = NULL;
16 h->degree = NOT_IN_HEAP;
17 h->value = value;
18 h->ref = _h;
19}
20
21
22/* make child a subtree of root */
23static void __heap_link(struct heap_node* root,
24 struct heap_node* child)
25{
26 child->parent = root;
27 child->next = root->child;
28 root->child = child;
29 root->degree++;
30}
31
32/* merge root lists */
33static struct heap_node* __heap_merge(struct heap_node* a,
34 struct heap_node* b)
35{
36 struct heap_node* head = NULL;
37 struct heap_node** pos = &head;
38
39 while (a && b) {
40 if (a->degree < b->degree) {
41 *pos = a;
42 a = a->next;
43 } else {
44 *pos = b;
45 b = b->next;
46 }
47 pos = &(*pos)->next;
48 }
49 if (a)
50 *pos = a;
51 else
52 *pos = b;
53 return head;
54}
55
56/* reverse a linked list of nodes. also clears parent pointer */
57static struct heap_node* __heap_reverse(struct heap_node* h)
58{
59 struct heap_node* tail = NULL;
60 struct heap_node* next;
61
62 if (!h)
63 return h;
64
65 h->parent = NULL;
66 while (h->next) {
67 next = h->next;
68 h->next = tail;
69 tail = h;
70 h = next;
71 h->parent = NULL;
72 }
73 h->next = tail;
74 return h;
75}
76
77static void __heap_min(heap_prio_t higher_prio, struct heap* heap,
78 struct heap_node** prev, struct heap_node** node)
79{
80 struct heap_node *_prev, *cur;
81 *prev = NULL;
82
83 if (!heap->head) {
84 *node = NULL;
85 return;
86 }
87
88 *node = heap->head;
89 _prev = heap->head;
90 cur = heap->head->next;
91 while (cur) {
92 if (higher_prio(cur, *node)) {
93 *node = cur;
94 *prev = _prev;
95 }
96 _prev = cur;
97 cur = cur->next;
98 }
99}
100
101static void __heap_union(heap_prio_t higher_prio, struct heap* heap,
102 struct heap_node* h2)
103{
104 struct heap_node* h1;
105 struct heap_node *prev, *x, *next;
106 if (!h2)
107 return;
108 h1 = heap->head;
109 if (!h1) {
110 heap->head = h2;
111 return;
112 }
113 h1 = __heap_merge(h1, h2);
114 prev = NULL;
115 x = h1;
116 next = x->next;
117 while (next) {
118 if (x->degree != next->degree ||
119 (next->next && next->next->degree == x->degree)) {
120 /* nothing to do, advance */
121 prev = x;
122 x = next;
123 } else if (higher_prio(x, next)) {
124 /* x becomes the root of next */
125 x->next = next->next;
126 __heap_link(x, next);
127 } else {
128 /* next becomes the root of x */
129 if (prev)
130 prev->next = next;
131 else
132 h1 = next;
133 __heap_link(next, x);
134 x = next;
135 }
136 next = x->next;
137 }
138 heap->head = h1;
139}
140
141static struct heap_node* __heap_extract_min(heap_prio_t higher_prio,
142 struct heap* heap)
143{
144 struct heap_node *prev, *node;
145 __heap_min(higher_prio, heap, &prev, &node);
146 if (!node)
147 return NULL;
148 if (prev)
149 prev->next = node->next;
150 else
151 heap->head = node->next;
152 __heap_union(higher_prio, heap, __heap_reverse(node->child));
153 return node;
154}
155
156/* insert (and reinitialize) a node into the heap */
157void heap_insert(heap_prio_t higher_prio, struct heap* heap,
158 struct heap_node* node)
159{
160 struct heap_node *min;
161 node->child = NULL;
162 node->parent = NULL;
163 node->next = NULL;
164 node->degree = 0;
165 if (heap->min && higher_prio(node, heap->min)) {
166 /* swap min cache */
167 min = heap->min;
168 min->child = NULL;
169 min->parent = NULL;
170 min->next = NULL;
171 min->degree = 0;
172 __heap_union(higher_prio, heap, min);
173 heap->min = node;
174 } else
175 __heap_union(higher_prio, heap, node);
176}
177
178void heap_uncache_min(heap_prio_t higher_prio, struct heap* heap)
179{
180 struct heap_node* min;
181 if (heap->min) {
182 min = heap->min;
183 heap->min = NULL;
184 heap_insert(higher_prio, heap, min);
185 }
186}
187
188/* merge addition into target */
189void heap_union(heap_prio_t higher_prio,
190 struct heap* target, struct heap* addition)
191{
192 /* first insert any cached minima, if necessary */
193 heap_uncache_min(higher_prio, target);
194 heap_uncache_min(higher_prio, addition);
195 __heap_union(higher_prio, target, addition->head);
196 /* this is a destructive merge */
197 addition->head = NULL;
198}
199
200struct heap_node* heap_peek(heap_prio_t higher_prio,
201 struct heap* heap)
202{
203 if (!heap->min)
204 heap->min = __heap_extract_min(higher_prio, heap);
205 return heap->min;
206}
207
208struct heap_node* heap_take(heap_prio_t higher_prio,
209 struct heap* heap)
210{
211 struct heap_node *node;
212 if (!heap->min)
213 heap->min = __heap_extract_min(higher_prio, heap);
214 node = heap->min;
215 heap->min = NULL;
216 if (node)
217 node->degree = NOT_IN_HEAP;
218 return node;
219}
220
221int heap_decrease(heap_prio_t higher_prio, struct heap_node* node)
222{
223 struct heap_node *parent;
224 struct heap_node** tmp_ref;
225 void* tmp;
226
227 /* bubble up */
228 parent = node->parent;
229 while (parent && higher_prio(node, parent)) {
230 /* swap parent and node */
231 tmp = parent->value;
232 parent->value = node->value;
233 node->value = tmp;
234 /* swap references */
235 *(parent->ref) = node;
236 *(node->ref) = parent;
237 tmp_ref = parent->ref;
238 parent->ref = node->ref;
239 node->ref = tmp_ref;
240 /* step up */
241 node = parent;
242 parent = node->parent;
243 }
244
245 return parent != NULL;
246}
247
248void heap_delete(heap_prio_t higher_prio, struct heap* heap,
249 struct heap_node* node)
250{
251 struct heap_node *parent, *prev, *pos;
252 struct heap_node** tmp_ref;
253 void* tmp;
254
255 if (heap->min != node) {
256 /* bubble up */
257 parent = node->parent;
258 while (parent) {
259 /* swap parent and node */
260 tmp = parent->value;
261 parent->value = node->value;
262 node->value = tmp;
263 /* swap references */
264 *(parent->ref) = node;
265 *(node->ref) = parent;
266 tmp_ref = parent->ref;
267 parent->ref = node->ref;
268 node->ref = tmp_ref;
269 /* step up */
270 node = parent;
271 parent = node->parent;
272 }
273 /* now delete:
274 * first find prev */
275 prev = NULL;
276 pos = heap->head;
277 while (pos != node) {
278 prev = pos;
279 pos = pos->next;
280 }
281 /* we have prev, now remove node */
282 if (prev)
283 prev->next = node->next;
284 else
285 heap->head = node->next;
286 __heap_union(higher_prio, heap, __heap_reverse(node->child));
287 } else
288 heap->min = NULL;
289 node->degree = NOT_IN_HEAP;
290}
291
292/* allocate a heap node for value and insert into the heap */
293int heap_add(heap_prio_t higher_prio, struct heap* heap,
294 void* value, int gfp_flags)
295{
296 struct heap_node* hn = heap_node_alloc(gfp_flags);
297 if (likely(hn)) {
298 heap_node_init(&hn, value);
299 heap_insert(higher_prio, heap, hn);
300 }
301 return hn != NULL;
302}
303
304void* heap_take_del(heap_prio_t higher_prio,
305 struct heap* heap)
306{
307 struct heap_node* hn = heap_take(higher_prio, heap);
308 void* ret = NULL;
309 if (hn) {
310 ret = hn->value;
311 heap_node_free(hn);
312 }
313 return ret;
314}
diff --git a/litmus/jobs.c b/litmus/jobs.c
new file mode 100644
index 000000000000..36e314625d86
--- /dev/null
+++ b/litmus/jobs.c
@@ -0,0 +1,43 @@
1/* litmus/jobs.c - common job control code
2 */
3
4#include <linux/sched.h>
5
6#include <litmus/litmus.h>
7#include <litmus/jobs.h>
8
9void prepare_for_next_period(struct task_struct *t)
10{
11 BUG_ON(!t);
12 /* prepare next release */
13 t->rt_param.job_params.release = t->rt_param.job_params.deadline;
14 t->rt_param.job_params.deadline += get_rt_period(t);
15 t->rt_param.job_params.exec_time = 0;
16 /* update job sequence number */
17 t->rt_param.job_params.job_no++;
18
19 /* don't confuse Linux */
20 t->rt.time_slice = 1;
21}
22
23void release_at(struct task_struct *t, lt_t start)
24{
25 t->rt_param.job_params.deadline = start;
26 prepare_for_next_period(t);
27 set_rt_flags(t, RT_F_RUNNING);
28}
29
30
31/*
32 * Deactivate current task until the beginning of the next period.
33 */
34long complete_job(void)
35{
36 /* Mark that we do not excute anymore */
37 set_rt_flags(current, RT_F_SLEEP);
38 /* call schedule, this will return when a new job arrives
39 * it also takes care of preparing for the next release
40 */
41 schedule();
42 return 0;
43}
diff --git a/litmus/litmus.c b/litmus/litmus.c
new file mode 100644
index 000000000000..eb0d17e298d7
--- /dev/null
+++ b/litmus/litmus.c
@@ -0,0 +1,654 @@
1/* litmus.c -- Implementation of the LITMUS syscalls, the LITMUS intialization code,
2 * and the procfs interface..
3 */
4#include <asm/uaccess.h>
5#include <linux/uaccess.h>
6#include <linux/sysrq.h>
7
8#include <linux/module.h>
9#include <linux/proc_fs.h>
10#include <linux/slab.h>
11
12#include <litmus/litmus.h>
13#include <linux/sched.h>
14#include <litmus/sched_plugin.h>
15
16#include <litmus/heap.h>
17
18#include <litmus/trace.h>
19
20/* Number of RT tasks that exist in the system */
21atomic_t rt_task_count = ATOMIC_INIT(0);
22static DEFINE_SPINLOCK(task_transition_lock);
23
24/* Give log messages sequential IDs. */
25atomic_t __log_seq_no = ATOMIC_INIT(0);
26
27/* current master CPU for handling timer IRQs */
28atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
29
30static struct kmem_cache * heap_node_cache;
31
32struct heap_node* heap_node_alloc(int gfp_flags)
33{
34 return kmem_cache_alloc(heap_node_cache, gfp_flags);
35}
36
37void heap_node_free(struct heap_node* hn)
38{
39 kmem_cache_free(heap_node_cache, hn);
40}
41
42/*
43 * sys_set_task_rt_param
44 * @pid: Pid of the task which scheduling parameters must be changed
45 * @param: New real-time extension parameters such as the execution cost and
46 * period
47 * Syscall for manipulating with task rt extension params
48 * Returns EFAULT if param is NULL.
49 * ESRCH if pid is not corrsponding
50 * to a valid task.
51 * EINVAL if either period or execution cost is <=0
52 * EPERM if pid is a real-time task
53 * 0 if success
54 *
55 * Only non-real-time tasks may be configured with this system call
56 * to avoid races with the scheduler. In practice, this means that a
57 * task's parameters must be set _before_ calling sys_prepare_rt_task()
58 *
59 * find_task_by_vpid() assumes that we are in the same namespace of the
60 * target.
61 */
62asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
63{
64 struct rt_task tp;
65 struct task_struct *target;
66 int retval = -EINVAL;
67
68 printk("Setting up rt task parameters for process %d.\n", pid);
69
70 if (pid < 0 || param == 0) {
71 goto out;
72 }
73 if (copy_from_user(&tp, param, sizeof(tp))) {
74 retval = -EFAULT;
75 goto out;
76 }
77
78 /* Task search and manipulation must be protected */
79 read_lock_irq(&tasklist_lock);
80 if (!(target = find_task_by_vpid(pid))) {
81 retval = -ESRCH;
82 goto out_unlock;
83 }
84
85 if (is_realtime(target)) {
86 /* The task is already a real-time task.
87 * We cannot not allow parameter changes at this point.
88 */
89 retval = -EBUSY;
90 goto out_unlock;
91 }
92
93 if (tp.exec_cost <= 0)
94 goto out_unlock;
95 if (tp.period <= 0)
96 goto out_unlock;
97 if (!cpu_online(tp.cpu))
98 goto out_unlock;
99 if (tp.period < tp.exec_cost)
100 {
101 printk(KERN_INFO "litmus: real-time task %d rejected "
102 "because wcet > period\n", pid);
103 goto out_unlock;
104 }
105
106 target->rt_param.task_params = tp;
107
108 retval = 0;
109 out_unlock:
110 read_unlock_irq(&tasklist_lock);
111 out:
112 return retval;
113}
114
115/*
116 * Getter of task's RT params
117 * returns EINVAL if param or pid is NULL
118 * returns ESRCH if pid does not correspond to a valid task
119 * returns EFAULT if copying of parameters has failed.
120 *
121 * find_task_by_vpid() assumes that we are in the same namespace of the
122 * target.
123 */
124asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param)
125{
126 int retval = -EINVAL;
127 struct task_struct *source;
128 struct rt_task lp;
129 if (param == 0 || pid < 0)
130 goto out;
131 read_lock(&tasklist_lock);
132 if (!(source = find_task_by_vpid(pid))) {
133 retval = -ESRCH;
134 goto out_unlock;
135 }
136 lp = source->rt_param.task_params;
137 read_unlock(&tasklist_lock);
138 /* Do copying outside the lock */
139 retval =
140 copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0;
141 return retval;
142 out_unlock:
143 read_unlock(&tasklist_lock);
144 out:
145 return retval;
146
147}
148
149/*
150 * This is the crucial function for periodic task implementation,
151 * It checks if a task is periodic, checks if such kind of sleep
152 * is permitted and calls plugin-specific sleep, which puts the
153 * task into a wait array.
154 * returns 0 on successful wakeup
155 * returns EPERM if current conditions do not permit such sleep
156 * returns EINVAL if current task is not able to go to sleep
157 */
158asmlinkage long sys_complete_job(void)
159{
160 int retval = -EPERM;
161 if (!is_realtime(current)) {
162 retval = -EINVAL;
163 goto out;
164 }
165 /* Task with negative or zero period cannot sleep */
166 if (get_rt_period(current) <= 0) {
167 retval = -EINVAL;
168 goto out;
169 }
170 /* The plugin has to put the task into an
171 * appropriate queue and call schedule
172 */
173 retval = litmus->complete_job();
174 out:
175 return retval;
176}
177
178/* This is an "improved" version of sys_complete_job that
179 * addresses the problem of unintentionally missing a job after
180 * an overrun.
181 *
182 * returns 0 on successful wakeup
183 * returns EPERM if current conditions do not permit such sleep
184 * returns EINVAL if current task is not able to go to sleep
185 */
186asmlinkage long sys_wait_for_job_release(unsigned int job)
187{
188 int retval = -EPERM;
189 if (!is_realtime(current)) {
190 retval = -EINVAL;
191 goto out;
192 }
193
194 /* Task with negative or zero period cannot sleep */
195 if (get_rt_period(current) <= 0) {
196 retval = -EINVAL;
197 goto out;
198 }
199
200 retval = 0;
201
202 /* first wait until we have "reached" the desired job
203 *
204 * This implementation has at least two problems:
205 *
206 * 1) It doesn't gracefully handle the wrap around of
207 * job_no. Since LITMUS is a prototype, this is not much
208 * of a problem right now.
209 *
210 * 2) It is theoretically racy if a job release occurs
211 * between checking job_no and calling sleep_next_period().
212 * A proper solution would requiring adding another callback
213 * in the plugin structure and testing the condition with
214 * interrupts disabled.
215 *
216 * FIXME: At least problem 2 should be taken care of eventually.
217 */
218 while (!retval && job > current->rt_param.job_params.job_no)
219 /* If the last job overran then job <= job_no and we
220 * don't send the task to sleep.
221 */
222 retval = litmus->complete_job();
223 out:
224 return retval;
225}
226
227/* This is a helper syscall to query the current job sequence number.
228 *
229 * returns 0 on successful query
230 * returns EPERM if task is not a real-time task.
231 * returns EFAULT if &job is not a valid pointer.
232 */
233asmlinkage long sys_query_job_no(unsigned int __user *job)
234{
235 int retval = -EPERM;
236 if (is_realtime(current))
237 retval = put_user(current->rt_param.job_params.job_no, job);
238
239 return retval;
240}
241
242/* sys_null_call() is only used for determining raw system call
243 * overheads (kernel entry, kernel exit). It has no useful side effects.
244 * If ts is non-NULL, then the current Feather-Trace time is recorded.
245 */
246asmlinkage long sys_null_call(cycles_t __user *ts)
247{
248 long ret = 0;
249 cycles_t now;
250
251 if (ts) {
252 now = get_cycles();
253 ret = put_user(now, ts);
254 }
255
256 return ret;
257}
258
259/* p is a real-time task. Re-init its state as a best-effort task. */
260static void reinit_litmus_state(struct task_struct* p, int restore)
261{
262 struct rt_task user_config = {};
263 __user short *np_flag = NULL;
264
265 if (restore) {
266 /* Safe user-space provided configuration data. */
267 user_config = p->rt_param.task_params;
268 np_flag = p->rt_param.np_flag;
269 }
270
271 /* We probably should not be inheriting any task's priority
272 * at this point in time.
273 */
274 WARN_ON(p->rt_param.inh_task);
275
276 /* We need to restore the priority of the task. */
277// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio);
278
279 /* Cleanup everything else. */
280 memset(&p->rt_param, 0, sizeof(user_config));
281
282 /* Restore preserved fields. */
283 if (restore) {
284 p->rt_param.task_params = user_config;
285 p->rt_param.np_flag = np_flag;
286 }
287}
288
289long litmus_admit_task(struct task_struct* tsk)
290{
291 long retval = 0;
292 unsigned long flags;
293
294 BUG_ON(is_realtime(tsk));
295
296 if (get_rt_period(tsk) == 0 ||
297 get_exec_cost(tsk) > get_rt_period(tsk)) {
298 TRACE_TASK(tsk, "litmus admit: invalid task parameters "
299 "(%lu, %lu)\n",
300 get_exec_cost(tsk), get_rt_period(tsk));
301 return -EINVAL;
302 }
303
304 if (!cpu_online(get_partition(tsk)))
305 {
306 TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n",
307 get_partition(tsk));
308 return -EINVAL;
309 }
310
311 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
312
313 /* avoid scheduler plugin changing underneath us */
314 spin_lock_irqsave(&task_transition_lock, flags);
315
316 /* allocate heap node for this task */
317 tsk_rt(tsk)->heap_node = heap_node_alloc(GFP_ATOMIC);
318 if (!tsk_rt(tsk)->heap_node ||
319 !tsk_rt(tsk)->rel_heap) {
320 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
321 retval = -ENOMEM;
322 heap_node_free(tsk_rt(tsk)->heap_node);
323 } else
324 heap_node_init(&tsk_rt(tsk)->heap_node, tsk);
325
326 if (!retval)
327 retval = litmus->admit_task(tsk);
328
329 if (!retval) {
330 sched_trace_task_name(tsk);
331 sched_trace_task_param(tsk);
332 atomic_inc(&rt_task_count);
333 }
334
335 spin_unlock_irqrestore(&task_transition_lock, flags);
336
337 return retval;
338}
339
340void litmus_exit_task(struct task_struct* tsk)
341{
342 if (is_realtime(tsk)) {
343 sched_trace_task_completion(tsk, 1);
344 litmus->task_exit(tsk);
345 BUG_ON(heap_node_in_heap(tsk_rt(tsk)->heap_node));
346 heap_node_free(tsk_rt(tsk)->heap_node);
347 atomic_dec(&rt_task_count);
348 reinit_litmus_state(tsk, 1);
349 }
350}
351
352/* Switching a plugin in use is tricky.
353 * We must watch out that no real-time tasks exists
354 * (and that none is created in parallel) and that the plugin is not
355 * currently in use on any processor (in theory).
356 *
357 * For now, we don't enforce the second part since it is unlikely to cause
358 * any trouble by itself as long as we don't unload modules.
359 */
360int switch_sched_plugin(struct sched_plugin* plugin)
361{
362 unsigned long flags;
363 int ret = 0;
364
365 BUG_ON(!plugin);
366
367 /* stop task transitions */
368 spin_lock_irqsave(&task_transition_lock, flags);
369
370 /* don't switch if there are active real-time tasks */
371 if (atomic_read(&rt_task_count) == 0) {
372 ret = litmus->deactivate_plugin();
373 if (0 != ret)
374 goto out;
375 ret = plugin->activate_plugin();
376 if (0 != ret) {
377 printk(KERN_INFO "Can't activate %s (%d).\n",
378 plugin->plugin_name, ret);
379 plugin = &linux_sched_plugin;
380 }
381 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name);
382 litmus = plugin;
383 } else
384 ret = -EBUSY;
385out:
386 spin_unlock_irqrestore(&task_transition_lock, flags);
387 return ret;
388}
389
390/* Called upon fork.
391 * p is the newly forked task.
392 */
393void litmus_fork(struct task_struct* p)
394{
395 if (is_realtime(p))
396 /* clean out any litmus related state, don't preserve anything*/
397 reinit_litmus_state(p, 0);
398}
399
400/* Called upon execve().
401 * current is doing the exec.
402 * Don't let address space specific stuff leak.
403 */
404void litmus_exec(void)
405{
406 struct task_struct* p = current;
407
408 if (is_realtime(p)) {
409 WARN_ON(p->rt_param.inh_task);
410 p->rt_param.np_flag = NULL;
411 }
412}
413
414void exit_litmus(struct task_struct *dead_tsk)
415{
416 if (is_realtime(dead_tsk))
417 litmus_exit_task(dead_tsk);
418}
419
420
421#ifdef CONFIG_MAGIC_SYSRQ
422int sys_kill(int pid, int sig);
423
424static void sysrq_handle_kill_rt_tasks(int key, struct tty_struct *tty)
425{
426 struct task_struct *t;
427 read_lock(&tasklist_lock);
428 for_each_process(t) {
429 if (is_realtime(t)) {
430 sys_kill(t->pid, SIGKILL);
431 }
432 }
433 read_unlock(&tasklist_lock);
434}
435
436static struct sysrq_key_op sysrq_kill_rt_tasks_op = {
437 .handler = sysrq_handle_kill_rt_tasks,
438 .help_msg = "quit-rt-tasks(X)",
439 .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks",
440};
441
442
443#endif
444
445
446static int proc_read_stats(char *page, char **start,
447 off_t off, int count,
448 int *eof, void *data)
449{
450 int len;
451
452 len = snprintf(page, PAGE_SIZE,
453 "real-time tasks = %d\n"
454 "ready for release = %d\n",
455 atomic_read(&rt_task_count),
456 0);
457 return len;
458}
459
460static int proc_read_plugins(char *page, char **start,
461 off_t off, int count,
462 int *eof, void *data)
463{
464 int len;
465
466 len = print_sched_plugins(page, PAGE_SIZE);
467 return len;
468}
469
470static int proc_read_curr(char *page, char **start,
471 off_t off, int count,
472 int *eof, void *data)
473{
474 int len;
475
476 len = snprintf(page, PAGE_SIZE, "%s\n", litmus->plugin_name);
477 return len;
478}
479
480static int proc_write_curr(struct file *file,
481 const char *buffer,
482 unsigned long count,
483 void *data)
484{
485 int len, ret;
486 char name[65];
487 struct sched_plugin* found;
488
489 if(count > 64)
490 len = 64;
491 else
492 len = count;
493
494 if(copy_from_user(name, buffer, len))
495 return -EFAULT;
496
497 name[len] = '\0';
498 /* chomp name */
499 if (len > 1 && name[len - 1] == '\n')
500 name[len - 1] = '\0';
501
502 found = find_sched_plugin(name);
503
504 if (found) {
505 ret = switch_sched_plugin(found);
506 if (ret != 0)
507 printk(KERN_INFO "Could not switch plugin: %d\n", ret);
508 } else
509 printk(KERN_INFO "Plugin '%s' is unknown.\n", name);
510
511 return len;
512}
513
514
515static int proc_read_release_master(char *page, char **start,
516 off_t off, int count,
517 int *eof, void *data)
518{
519 int len, master;
520 master = atomic_read(&release_master_cpu);
521 if (master == NO_CPU)
522 len = snprintf(page, PAGE_SIZE, "NO_CPU\n");
523 else
524 len = snprintf(page, PAGE_SIZE, "%d\n", master);
525 return len;
526}
527
528static int proc_write_release_master(struct file *file,
529 const char *buffer,
530 unsigned long count,
531 void *data)
532{
533 int cpu, err, online = 0;
534 char msg[64];
535
536 if (count > 63)
537 return -EINVAL;
538
539 if (copy_from_user(msg, buffer, count))
540 return -EFAULT;
541
542 /* terminate */
543 msg[count] = '\0';
544 /* chomp */
545 if (count > 1 && msg[count - 1] == '\n')
546 msg[count - 1] = '\0';
547
548 if (strcmp(msg, "NO_CPU") == 0) {
549 atomic_set(&release_master_cpu, NO_CPU);
550 return count;
551 } else {
552 err = sscanf(msg, "%d", &cpu);
553 if (err == 1 && cpu >= 0 && (online = cpu_online(cpu))) {
554 atomic_set(&release_master_cpu, cpu);
555 return count;
556 } else {
557 TRACE("invalid release master: '%s' "
558 "(err:%d cpu:%d online:%d)\n",
559 msg, err, cpu, online);
560 return -EINVAL;
561 }
562 }
563}
564
565static struct proc_dir_entry *litmus_dir = NULL,
566 *curr_file = NULL,
567 *stat_file = NULL,
568 *plugs_file = NULL,
569 *release_master_file = NULL;
570
571static int __init init_litmus_proc(void)
572{
573 litmus_dir = proc_mkdir("litmus", NULL);
574 if (!litmus_dir) {
575 printk(KERN_ERR "Could not allocate LITMUS^RT procfs entry.\n");
576 return -ENOMEM;
577 }
578
579 curr_file = create_proc_entry("active_plugin",
580 0644, litmus_dir);
581 if (!curr_file) {
582 printk(KERN_ERR "Could not allocate active_plugin "
583 "procfs entry.\n");
584 return -ENOMEM;
585 }
586 curr_file->read_proc = proc_read_curr;
587 curr_file->write_proc = proc_write_curr;
588
589 release_master_file = create_proc_entry("release_master",
590 0644, litmus_dir);
591 if (!release_master_file) {
592 printk(KERN_ERR "Could not allocate release_master "
593 "procfs entry.\n");
594 return -ENOMEM;
595 }
596 release_master_file->read_proc = proc_read_release_master;
597 release_master_file->write_proc = proc_write_release_master;
598
599 stat_file = create_proc_read_entry("stats", 0444, litmus_dir,
600 proc_read_stats, NULL);
601
602 plugs_file = create_proc_read_entry("plugins", 0444, litmus_dir,
603 proc_read_plugins, NULL);
604
605 return 0;
606}
607
608static void exit_litmus_proc(void)
609{
610 if (plugs_file)
611 remove_proc_entry("plugins", litmus_dir);
612 if (stat_file)
613 remove_proc_entry("stats", litmus_dir);
614 if (curr_file)
615 remove_proc_entry("active_plugin", litmus_dir);
616 if (litmus_dir)
617 remove_proc_entry("litmus", NULL);
618}
619
620extern struct sched_plugin linux_sched_plugin;
621
622static int __init _init_litmus(void)
623{
624 /* Common initializers,
625 * mode change lock is used to enforce single mode change
626 * operation.
627 */
628 printk("Starting LITMUS^RT kernel\n");
629
630 register_sched_plugin(&linux_sched_plugin);
631
632 heap_node_cache = KMEM_CACHE(heap_node, SLAB_PANIC);
633
634#ifdef CONFIG_MAGIC_SYSRQ
635 /* offer some debugging help */
636 if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op))
637 printk("Registered kill rt tasks magic sysrq.\n");
638 else
639 printk("Could not register kill rt tasks magic sysrq.\n");
640#endif
641
642 init_litmus_proc();
643
644 return 0;
645}
646
647static void _exit_litmus(void)
648{
649 exit_litmus_proc();
650 kmem_cache_destroy(heap_node_cache);
651}
652
653module_init(_init_litmus);
654module_exit(_exit_litmus);
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
new file mode 100644
index 000000000000..ccedd3670ac5
--- /dev/null
+++ b/litmus/sched_litmus.c
@@ -0,0 +1,275 @@
1/* This file is included from kernel/sched.c */
2
3#include <litmus/litmus.h>
4#include <litmus/sched_plugin.h>
5
6static void update_time_litmus(struct rq *rq, struct task_struct *p)
7{
8 u64 delta = rq->clock - p->se.exec_start;
9 if (unlikely((s64)delta < 0))
10 delta = 0;
11 /* per job counter */
12 p->rt_param.job_params.exec_time += delta;
13 /* task counter */
14 p->se.sum_exec_runtime += delta;
15 /* sched_clock() */
16 p->se.exec_start = rq->clock;
17 cpuacct_charge(p, delta);
18}
19
20static void double_rq_lock(struct rq *rq1, struct rq *rq2);
21static void double_rq_unlock(struct rq *rq1, struct rq *rq2);
22
23static void litmus_tick(struct rq *rq, struct task_struct *p)
24{
25 if (is_realtime(p))
26 update_time_litmus(rq, p);
27 litmus->tick(p);
28}
29
30static void litmus_schedule(struct rq *rq, struct task_struct *prev)
31{
32 struct rq* other_rq;
33 long was_running;
34 lt_t _maybe_deadlock = 0;
35 /* WARNING: rq is _not_ locked! */
36 if (is_realtime(prev)) {
37 update_time_litmus(rq, prev);
38 if (!is_running(prev))
39 tsk_rt(prev)->present = 0;
40 }
41
42 /* let the plugin schedule */
43 rq->litmus_next = litmus->schedule(prev);
44
45 /* check if a global plugin pulled a task from a different RQ */
46 if (rq->litmus_next && task_rq(rq->litmus_next) != rq) {
47 /* we need to migrate the task */
48 other_rq = task_rq(rq->litmus_next);
49 TRACE_TASK(rq->litmus_next, "migrate from %d\n", other_rq->cpu);
50
51 /* while we drop the lock, the prev task could change its
52 * state
53 */
54 was_running = is_running(prev);
55 mb();
56 spin_unlock(&rq->lock);
57
58 /* Don't race with a concurrent switch. This could deadlock in
59 * the case of cross or circular migrations. It's the job of
60 * the plugin to make sure that doesn't happen.
61 */
62 TRACE_TASK(rq->litmus_next, "stack_in_use=%d\n",
63 rq->litmus_next->rt_param.stack_in_use);
64 if (rq->litmus_next->rt_param.stack_in_use != NO_CPU) {
65 TRACE_TASK(rq->litmus_next, "waiting to deschedule\n");
66 _maybe_deadlock = litmus_clock();
67 }
68 while (rq->litmus_next->rt_param.stack_in_use != NO_CPU) {
69 cpu_relax();
70 mb();
71 if (rq->litmus_next->rt_param.stack_in_use == NO_CPU)
72 TRACE_TASK(rq->litmus_next,
73 "descheduled. Proceeding.\n");
74 if (lt_before(_maybe_deadlock + 10000000,
75 litmus_clock())) {
76 /* We've been spinning for 10ms.
77 * Something can't be right!
78 * Let's abandon the task and bail out; at least
79 * we will have debug info instead of a hard
80 * deadlock.
81 */
82 TRACE_TASK(rq->litmus_next,
83 "stack too long in use. "
84 "Deadlock?\n");
85 rq->litmus_next = NULL;
86
87 /* bail out */
88 spin_lock(&rq->lock);
89 return;
90 }
91 }
92#ifdef __ARCH_WANT_UNLOCKED_CTXSW
93 if (rq->litmus_next->oncpu)
94 TRACE_TASK(rq->litmus_next, "waiting for !oncpu");
95 while (rq->litmus_next->oncpu) {
96 cpu_relax();
97 mb();
98 }
99#endif
100 double_rq_lock(rq, other_rq);
101 mb();
102 if (is_realtime(prev) && is_running(prev) != was_running) {
103 TRACE_TASK(prev,
104 "state changed while we dropped"
105 " the lock: is_running=%d, was_running=%d\n",
106 is_running(prev), was_running);
107 if (is_running(prev) && !was_running) {
108 /* prev task became unblocked
109 * we need to simulate normal sequence of events
110 * to scheduler plugins.
111 */
112 litmus->task_block(prev);
113 litmus->task_wake_up(prev);
114 }
115 }
116
117 set_task_cpu(rq->litmus_next, smp_processor_id());
118
119 /* DEBUG: now that we have the lock we need to make sure a
120 * couple of things still hold:
121 * - it is still a real-time task
122 * - it is still runnable (could have been stopped)
123 * If either is violated, then the active plugin is
124 * doing something wrong.
125 */
126 if (!is_realtime(rq->litmus_next) ||
127 !is_running(rq->litmus_next)) {
128 /* BAD BAD BAD */
129 TRACE_TASK(rq->litmus_next,
130 "BAD: migration invariant FAILED: "
131 "rt=%d running=%d\n",
132 is_realtime(rq->litmus_next),
133 is_running(rq->litmus_next));
134 /* drop the task */
135 rq->litmus_next = NULL;
136 }
137 /* release the other CPU's runqueue, but keep ours */
138 spin_unlock(&other_rq->lock);
139 }
140 if (rq->litmus_next)
141 rq->litmus_next->rt_param.stack_in_use = rq->cpu;
142}
143
144static void enqueue_task_litmus(struct rq *rq, struct task_struct *p,
145 int wakeup)
146{
147 if (wakeup) {
148 sched_trace_task_resume(p);
149 tsk_rt(p)->present = 1;
150 litmus->task_wake_up(p);
151 } else
152 TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n");
153}
154
155static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, int sleep)
156{
157 if (sleep) {
158 litmus->task_block(p);
159 tsk_rt(p)->present = 0;
160 sched_trace_task_block(p);
161 } else
162 TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n");
163}
164
165static void yield_task_litmus(struct rq *rq)
166{
167 BUG_ON(rq->curr != current);
168 litmus->complete_job();
169}
170
171/* Plugins are responsible for this.
172 */
173static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int flags)
174{
175}
176
177/* has already been taken care of */
178static void put_prev_task_litmus(struct rq *rq, struct task_struct *p)
179{
180}
181
182static struct task_struct *pick_next_task_litmus(struct rq *rq)
183{
184 struct task_struct* picked = rq->litmus_next;
185 rq->litmus_next = NULL;
186 if (picked)
187 picked->se.exec_start = rq->clock;
188 return picked;
189}
190
191static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued)
192{
193}
194
195static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running)
196{
197}
198
199static void prio_changed_litmus(struct rq *rq, struct task_struct *p,
200 int oldprio, int running)
201{
202}
203
204unsigned int get_rr_interval_litmus(struct task_struct *p)
205{
206 /* return infinity */
207 return 0;
208}
209
210/* This is called when a task became a real-time task, either due to a SCHED_*
211 * class transition or due to PI mutex inheritance. We don't handle Linux PI
212 * mutex inheritance yet (and probably never will). Use LITMUS provided
213 * synchronization primitives instead.
214 */
215static void set_curr_task_litmus(struct rq *rq)
216{
217 rq->curr->se.exec_start = rq->clock;
218}
219
220
221#ifdef CONFIG_SMP
222/* execve tries to rebalance task in this scheduling domain */
223static int select_task_rq_litmus(struct task_struct *p, int sd_flag, int flags)
224{
225 /* preemption is already disabled.
226 * We don't want to change cpu here
227 */
228 return smp_processor_id();
229}
230
231/* we don't repartition at runtime */
232
233static unsigned long
234load_balance_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest,
235 unsigned long max_load_move,
236 struct sched_domain *sd, enum cpu_idle_type idle,
237 int *all_pinned, int *this_best_prio)
238{
239 return 0;
240}
241
242static int
243move_one_task_litmus(struct rq *this_rq, int this_cpu, struct rq *busiest,
244 struct sched_domain *sd, enum cpu_idle_type idle)
245{
246 return 0;
247}
248#endif
249
250const struct sched_class litmus_sched_class = {
251 .next = &rt_sched_class,
252 .enqueue_task = enqueue_task_litmus,
253 .dequeue_task = dequeue_task_litmus,
254 .yield_task = yield_task_litmus,
255
256 .check_preempt_curr = check_preempt_curr_litmus,
257
258 .pick_next_task = pick_next_task_litmus,
259 .put_prev_task = put_prev_task_litmus,
260
261#ifdef CONFIG_SMP
262 .select_task_rq = select_task_rq_litmus,
263
264 .load_balance = load_balance_litmus,
265 .move_one_task = move_one_task_litmus,
266#endif
267
268 .set_curr_task = set_curr_task_litmus,
269 .task_tick = task_tick_litmus,
270
271 .get_rr_interval = get_rr_interval_litmus,
272
273 .prio_changed = prio_changed_litmus,
274 .switched_to = switched_to_litmus,
275};
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
new file mode 100644
index 000000000000..0be091ece569
--- /dev/null
+++ b/litmus/sched_plugin.c
@@ -0,0 +1,199 @@
1/* sched_plugin.c -- core infrastructure for the scheduler plugin system
2 *
3 * This file includes the initialization of the plugin system, the no-op Linux
4 * scheduler plugin and some dummy functions.
5 */
6
7#include <linux/list.h>
8#include <linux/spinlock.h>
9
10#include <litmus/litmus.h>
11#include <litmus/sched_plugin.h>
12
13#include <litmus/jobs.h>
14
15/*************************************************************
16 * Dummy plugin functions *
17 *************************************************************/
18
19static void litmus_dummy_finish_switch(struct task_struct * prev)
20{
21}
22
23static struct task_struct* litmus_dummy_schedule(struct task_struct * prev)
24{
25 return NULL;
26}
27
28static void litmus_dummy_tick(struct task_struct* tsk)
29{
30}
31
32static long litmus_dummy_admit_task(struct task_struct* tsk)
33{
34 printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n",
35 tsk->comm, tsk->pid);
36 return -EINVAL;
37}
38
39static void litmus_dummy_task_new(struct task_struct *t, int on_rq, int running)
40{
41}
42
43static void litmus_dummy_task_wake_up(struct task_struct *task)
44{
45}
46
47static void litmus_dummy_task_block(struct task_struct *task)
48{
49}
50
51static void litmus_dummy_task_exit(struct task_struct *task)
52{
53}
54
55static long litmus_dummy_complete_job(void)
56{
57 return -ENOSYS;
58}
59
60static long litmus_dummy_activate_plugin(void)
61{
62 return 0;
63}
64
65static long litmus_dummy_deactivate_plugin(void)
66{
67 return 0;
68}
69
70#ifdef CONFIG_FMLP
71
72static long litmus_dummy_inherit_priority(struct pi_semaphore *sem,
73 struct task_struct *new_owner)
74{
75 return -ENOSYS;
76}
77
78static long litmus_dummy_return_priority(struct pi_semaphore *sem)
79{
80 return -ENOSYS;
81}
82
83static long litmus_dummy_pi_block(struct pi_semaphore *sem,
84 struct task_struct *new_waiter)
85{
86 return -ENOSYS;
87}
88
89#endif
90
91
92/* The default scheduler plugin. It doesn't do anything and lets Linux do its
93 * job.
94 */
95struct sched_plugin linux_sched_plugin = {
96 .plugin_name = "Linux",
97 .tick = litmus_dummy_tick,
98 .task_new = litmus_dummy_task_new,
99 .task_exit = litmus_dummy_task_exit,
100 .task_wake_up = litmus_dummy_task_wake_up,
101 .task_block = litmus_dummy_task_block,
102 .complete_job = litmus_dummy_complete_job,
103 .schedule = litmus_dummy_schedule,
104 .finish_switch = litmus_dummy_finish_switch,
105 .activate_plugin = litmus_dummy_activate_plugin,
106 .deactivate_plugin = litmus_dummy_deactivate_plugin,
107#ifdef CONFIG_FMLP
108 .inherit_priority = litmus_dummy_inherit_priority,
109 .return_priority = litmus_dummy_return_priority,
110 .pi_block = litmus_dummy_pi_block,
111#endif
112 .admit_task = litmus_dummy_admit_task
113};
114
115/*
116 * The reference to current plugin that is used to schedule tasks within
117 * the system. It stores references to actual function implementations
118 * Should be initialized by calling "init_***_plugin()"
119 */
120struct sched_plugin *litmus = &linux_sched_plugin;
121
122/* the list of registered scheduling plugins */
123static LIST_HEAD(sched_plugins);
124static DEFINE_SPINLOCK(sched_plugins_lock);
125
126#define CHECK(func) {\
127 if (!plugin->func) \
128 plugin->func = litmus_dummy_ ## func;}
129
130/* FIXME: get reference to module */
131int register_sched_plugin(struct sched_plugin* plugin)
132{
133 printk(KERN_INFO "Registering LITMUS^RT plugin %s.\n",
134 plugin->plugin_name);
135
136 /* make sure we don't trip over null pointers later */
137 CHECK(finish_switch);
138 CHECK(schedule);
139 CHECK(tick);
140 CHECK(task_wake_up);
141 CHECK(task_exit);
142 CHECK(task_block);
143 CHECK(task_new);
144 CHECK(complete_job);
145 CHECK(activate_plugin);
146 CHECK(deactivate_plugin);
147#ifdef CONFIG_FMLP
148 CHECK(inherit_priority);
149 CHECK(return_priority);
150 CHECK(pi_block);
151#endif
152 CHECK(admit_task);
153
154 if (!plugin->release_at)
155 plugin->release_at = release_at;
156
157 spin_lock(&sched_plugins_lock);
158 list_add(&plugin->list, &sched_plugins);
159 spin_unlock(&sched_plugins_lock);
160
161 return 0;
162}
163
164
165/* FIXME: reference counting, etc. */
166struct sched_plugin* find_sched_plugin(const char* name)
167{
168 struct list_head *pos;
169 struct sched_plugin *plugin;
170
171 spin_lock(&sched_plugins_lock);
172 list_for_each(pos, &sched_plugins) {
173 plugin = list_entry(pos, struct sched_plugin, list);
174 if (!strcmp(plugin->plugin_name, name))
175 goto out_unlock;
176 }
177 plugin = NULL;
178
179out_unlock:
180 spin_unlock(&sched_plugins_lock);
181 return plugin;
182}
183
184int print_sched_plugins(char* buf, int max)
185{
186 int count = 0;
187 struct list_head *pos;
188 struct sched_plugin *plugin;
189
190 spin_lock(&sched_plugins_lock);
191 list_for_each(pos, &sched_plugins) {
192 plugin = list_entry(pos, struct sched_plugin, list);
193 count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name);
194 if (max - count <= 0)
195 break;
196 }
197 spin_unlock(&sched_plugins_lock);
198 return count;
199}