aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-05-03 16:50:32 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-05-03 17:17:52 -0400
commitbb35f3fc684667598d7ae39fd2d49a16f77beb39 (patch)
tree6a70f13510e36e2771652bb3fc6d60321bf1daf6
parente0e02579c34b9920781b3ce3fc9d6d7bcafb4d5b (diff)
Added color schedule
-rw-r--r--include/litmus/budget.h20
-rw-r--r--include/litmus/color.h9
-rw-r--r--include/litmus/dgl.h61
-rw-r--r--include/litmus/fifo_common.h25
-rw-r--r--include/litmus/litmus.h19
-rw-r--r--include/litmus/locking.h1
-rw-r--r--include/litmus/rt_domain.h2
-rw-r--r--include/litmus/rt_param.h14
-rw-r--r--include/litmus/rt_server.h39
-rw-r--r--include/litmus/sched_plugin.h7
-rw-r--r--include/litmus/sched_trace.h175
-rw-r--r--include/trace/events/litmus.h425
-rw-r--r--include/trace/ftrace.h5
-rw-r--r--kernel/sched.c6
-rw-r--r--litmus/Kconfig18
-rw-r--r--litmus/Makefile14
-rw-r--r--litmus/budget.c29
-rw-r--r--litmus/color_proc.c143
-rw-r--r--litmus/dgl.c248
-rw-r--r--litmus/fifo_common.c58
-rw-r--r--litmus/ftdev.c2
-rw-r--r--litmus/locking.c8
-rw-r--r--litmus/rt_server.c34
-rw-r--r--litmus/sched_color.c811
-rw-r--r--litmus/sched_litmus.c4
-rw-r--r--litmus/sched_plugin.c6
-rw-r--r--litmus/sync.c3
27 files changed, 2127 insertions, 59 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 732530e63491..6ef0e44effb1 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -1,8 +1,24 @@
1#ifndef _LITMUS_BUDGET_H_ 1#ifndef _LITMUS_BUDGET_H_
2#define _LITMUS_BUDGET_H_ 2#define _LITMUS_BUDGET_H_
3 3
4/* Update the per-processor enforcement timer (arm/reproram/cancel) for 4struct enforcement_timer {
5 * the next task. */ 5 struct hrtimer timer;
6 int armed;
7};
8
9/**
10 * update_enforcement_timer() - Update per-processor enforcement timer for
11 * the next scheduled task.
12 *
13 * If @t is not NULL and has a precisely enforced budget, the timer will be
14 * armed to trigger a reschedule when the budget is exhausted. Otherwise,
15 * the timer will be cancelled.
16*/
6void update_enforcement_timer(struct task_struct* t); 17void update_enforcement_timer(struct task_struct* t);
7 18
19void init_enforcement_timer(struct enforcement_timer *et);
20
21void arm_enforcement_timer(struct enforcement_timer* et, struct task_struct* t);
22
23void cancel_enforcement_timer(struct enforcement_timer* et);
8#endif 24#endif
diff --git a/include/litmus/color.h b/include/litmus/color.h
index 998af33cd3ea..250f08a6e1f3 100644
--- a/include/litmus/color.h
+++ b/include/litmus/color.h
@@ -1,6 +1,11 @@
1#ifndef LITMUS_COLOR_H 1#ifndef LITMUS_COLOR_H
2#define LITMUS_COLOR_H 2#define LITMUS_COLOR_H
3 3
4#define NUM_COLORS 64
5#define NUM_WAYS 12
6
7#ifdef __KERNEL__
8
4#define ONE_COLOR_LEN 11 9#define ONE_COLOR_LEN 11
5#define ONE_COLOR_FMT "%4lu: %4d\n" 10#define ONE_COLOR_FMT "%4lu: %4d\n"
6 11
@@ -16,6 +21,8 @@ void add_page_to_color_list(struct page*);
16void add_page_to_alloced_list(struct page*, struct vm_area_struct*); 21void add_page_to_alloced_list(struct page*, struct vm_area_struct*);
17void reclaim_pages(struct vm_area_struct*); 22void reclaim_pages(struct vm_area_struct*);
18 23
24int color_server_params(int cpu, unsigned long *wcet, unsigned long *period);
25
19int color_add_pages_handler(struct ctl_table *, int, void __user *, 26int color_add_pages_handler(struct ctl_table *, int, void __user *,
20 size_t *, loff_t *); 27 size_t *, loff_t *);
21int color_nr_pages_handler(struct ctl_table *, int, void __user *, 28int color_nr_pages_handler(struct ctl_table *, int, void __user *,
@@ -38,3 +45,5 @@ int color_reclaim_pages_handler(struct ctl_table *, int, void __user *,
38#endif 45#endif
39 46
40#endif 47#endif
48
49#endif
diff --git a/include/litmus/dgl.h b/include/litmus/dgl.h
new file mode 100644
index 000000000000..2bf61d4a7547
--- /dev/null
+++ b/include/litmus/dgl.h
@@ -0,0 +1,61 @@
1#ifndef __DGL_H_
2#define __DGL_H_
3
4#include <litmus/color.h>
5#include <linux/list.h>
6
7#define WP(num, word) (num / word + (num % word != 0))
8
9#define NUM_REPLICAS NUM_WAYS
10#define NUM_RESOURCES NUM_COLORS
11#define MASK_SIZE (sizeof(unsigned long) * 8)
12#define MASK_WORDS WP(NUM_RESOURCES, MASK_SIZE)
13
14/*
15 * A request for @replica amount of a single resource.
16 */
17struct dgl_req {
18 unsigned short replicas;
19 struct list_head list;
20};
21
22/*
23 * Simultaneous @requests for multiple resources.
24 */
25struct dgl_group_req {
26 int cpu;
27 unsigned long requested[MASK_WORDS];
28 unsigned long waiting[MASK_WORDS];
29 struct dgl_req requests[NUM_RESOURCES];
30 unsigned long long ts;
31};
32
33/*
34 * A single resource.
35 */
36struct dgl_resource {
37 unsigned int free_replicas;
38 struct list_head waiting;
39};
40
41/*
42 * A group of resources.
43 */
44struct dgl {
45 struct dgl_resource resources[NUM_RESOURCES];
46 struct dgl_group_req* acquired[NR_CPUS];
47
48 char requests;
49 char running;
50 unsigned long long ts;
51};
52
53void dgl_init(struct dgl *dgl);
54void dgl_group_req_init(struct dgl_group_req *greq);
55
56void set_req(struct dgl_group_req *greq, int resource, int replicas);
57
58void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu);
59void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq);
60
61#endif
diff --git a/include/litmus/fifo_common.h b/include/litmus/fifo_common.h
new file mode 100644
index 000000000000..4756f77bd511
--- /dev/null
+++ b/include/litmus/fifo_common.h
@@ -0,0 +1,25 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __FIFO_COMMON_H__
11#define __FIFO_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void fifo_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int fifo_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21int fifo_ready_order(struct bheap_node* a, struct bheap_node* b);
22
23int fifo_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24
25#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 0b071fd359f9..f0ddb89e68dd 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -44,6 +44,8 @@ void litmus_exit_task(struct task_struct *tsk);
44 44
45#define tsk_rt(t) (&(t)->rt_param) 45#define tsk_rt(t) (&(t)->rt_param)
46 46
47#define get_server_job(t) (tsk_rt(t)->job_params.fake_job_no)
48
47/* Realtime utility macros */ 49/* Realtime utility macros */
48#define get_rt_flags(t) (tsk_rt(t)->flags) 50#define get_rt_flags(t) (tsk_rt(t)->flags)
49#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) 51#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f))
@@ -51,10 +53,13 @@ void litmus_exit_task(struct task_struct *tsk);
51#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) 53#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
52#define get_rt_period(t) (tsk_rt(t)->task_params.period) 54#define get_rt_period(t) (tsk_rt(t)->task_params.period)
53#define get_rt_phase(t) (tsk_rt(t)->task_params.phase) 55#define get_rt_phase(t) (tsk_rt(t)->task_params.phase)
56#define get_rt_job(t) (tsk_rt(t)->job_params.job_no)
54#define get_partition(t) (tsk_rt(t)->task_params.cpu) 57#define get_partition(t) (tsk_rt(t)->task_params.cpu)
55#define get_deadline(t) (tsk_rt(t)->job_params.deadline) 58#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
56#define get_release(t) (tsk_rt(t)->job_params.release) 59#define get_release(t) (tsk_rt(t)->job_params.release)
57#define get_class(t) (tsk_rt(t)->task_params.cls) 60#define get_class(t) (tsk_rt(t)->task_params.cls)
61#define is_server(t) (tsk_rt(t)->is_server)
62#define get_task_server(task) (tsk_rt(task)->server)
58 63
59#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) 64#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
60#define get_boost_start(t) (tsk_rt(t)->boost_start_time) 65#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
@@ -128,6 +133,16 @@ void srp_ceiling_block(void);
128 133
129#define bheap2task(hn) ((struct task_struct*) hn->value) 134#define bheap2task(hn) ((struct task_struct*) hn->value)
130 135
136static inline struct control_page* get_control_page(struct task_struct *t)
137{
138 return tsk_rt(t)->ctrl_page;
139}
140
141static inline int has_control_page(struct task_struct* t)
142{
143 return tsk_rt(t)->ctrl_page != NULL;
144}
145
131#ifdef CONFIG_NP_SECTION 146#ifdef CONFIG_NP_SECTION
132 147
133static inline int is_kernel_np(struct task_struct *t) 148static inline int is_kernel_np(struct task_struct *t)
@@ -230,10 +245,6 @@ static inline int is_np(struct task_struct *t)
230 int kernel, user; 245 int kernel, user;
231 kernel = is_kernel_np(t); 246 kernel = is_kernel_np(t);
232 user = is_user_np(t); 247 user = is_user_np(t);
233 if (kernel || user)
234 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
235
236 kernel, user);
237 return kernel || user; 248 return kernel || user;
238#else 249#else
239 return unlikely(is_kernel_np(t) || is_user_np(t)); 250 return unlikely(is_kernel_np(t) || is_user_np(t));
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 4d7b870cb443..41991d5af01b 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -9,6 +9,7 @@ struct litmus_lock_ops;
9struct litmus_lock { 9struct litmus_lock {
10 struct litmus_lock_ops *ops; 10 struct litmus_lock_ops *ops;
11 int type; 11 int type;
12 int id;
12}; 13};
13 14
14struct litmus_lock_ops { 15struct litmus_lock_ops {
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h
index ac249292e866..b243f998fef7 100644
--- a/include/litmus/rt_domain.h
+++ b/include/litmus/rt_domain.h
@@ -82,6 +82,8 @@ void __add_ready(rt_domain_t* rt, struct task_struct *new);
82void __merge_ready(rt_domain_t* rt, struct bheap *tasks); 82void __merge_ready(rt_domain_t* rt, struct bheap *tasks);
83void __add_release(rt_domain_t* rt, struct task_struct *task); 83void __add_release(rt_domain_t* rt, struct task_struct *task);
84 84
85struct release_heap* release_heap_alloc(int gfp_flags);
86
85static inline struct task_struct* __take_ready(rt_domain_t* rt) 87static inline struct task_struct* __take_ready(rt_domain_t* rt)
86{ 88{
87 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); 89 struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue);
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index ed9b7d20a763..2991fff58bc6 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -5,6 +5,8 @@
5#ifndef _LINUX_RT_PARAM_H_ 5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_ 6#define _LINUX_RT_PARAM_H_
7 7
8#include <litmus/color.h>
9
8/* Litmus time type. */ 10/* Litmus time type. */
9typedef unsigned long long lt_t; 11typedef unsigned long long lt_t;
10 12
@@ -69,7 +71,10 @@ union np_flag {
69struct control_page { 71struct control_page {
70 volatile union np_flag sched; 72 volatile union np_flag sched;
71 73
72 /* to be extended */ 74 /* locking overhead tracing: time stamp prior to system call */
75 uint64_t ts_syscall_start; /* Feather-Trace cycles */
76
77 int requests[NUM_COLORS];
73}; 78};
74 79
75#ifndef __KERNEL__ 80#ifndef __KERNEL__
@@ -92,6 +97,8 @@ struct color_ctrl_page {
92struct _rt_domain; 97struct _rt_domain;
93struct bheap_node; 98struct bheap_node;
94struct release_heap; 99struct release_heap;
100struct rt_server;
101struct dgl_group_req;
95 102
96struct rt_job { 103struct rt_job {
97 /* Time instant the the job was or will be released. */ 104 /* Time instant the the job was or will be released. */
@@ -128,6 +135,8 @@ struct rt_param {
128 /* is the task present? (true if it can be scheduled) */ 135 /* is the task present? (true if it can be scheduled) */
129 unsigned int present:1; 136 unsigned int present:1;
130 137
138 unsigned int is_server:1;
139
131#ifdef CONFIG_LITMUS_LOCKING 140#ifdef CONFIG_LITMUS_LOCKING
132 /* Is the task being priority-boosted by a locking protocol? */ 141 /* Is the task being priority-boosted by a locking protocol? */
133 unsigned int priority_boosted:1; 142 unsigned int priority_boosted:1;
@@ -135,6 +144,8 @@ struct rt_param {
135 lt_t boost_start_time; 144 lt_t boost_start_time;
136#endif 145#endif
137 146
147 struct rt_server *server;
148
138 /* user controlled parameters */ 149 /* user controlled parameters */
139 struct rt_task task_params; 150 struct rt_task task_params;
140 151
@@ -213,6 +224,7 @@ struct rt_param {
213 struct control_page * ctrl_page; 224 struct control_page * ctrl_page;
214 225
215 struct color_ctrl_page *color_ctrl_page; 226 struct color_ctrl_page *color_ctrl_page;
227 struct dgl_group_req *req;
216}; 228};
217 229
218/* Possible RT flags */ 230/* Possible RT flags */
diff --git a/include/litmus/rt_server.h b/include/litmus/rt_server.h
new file mode 100644
index 000000000000..0f3147707a3b
--- /dev/null
+++ b/include/litmus/rt_server.h
@@ -0,0 +1,39 @@
1#ifndef __RT_SERVER_H
2#define __RT_SERVER_H
3
4#include <linux/sched.h>
5#include <litmus/litmus.h>
6#include <litmus/rt_domain.h>
7
8struct rt_server;
9
10typedef int (*need_preempt_t)(rt_domain_t *rt, struct task_struct *t);
11typedef void (*server_update_t)(struct rt_server *srv);
12typedef void (*server_requeue_t)(struct rt_server *srv, struct task_struct *t);
13typedef struct task_struct* (*server_take_t)(struct rt_server *srv);
14
15struct rt_server {
16 int sid;
17 int cpu;
18 struct task_struct* linked;
19 rt_domain_t* domain;
20 int running;
21
22 /* Does this server have a higher-priority task? */
23 need_preempt_t need_preempt;
24 /* System state has changed, so should server */
25 server_update_t update;
26 /* Requeue task in domain */
27 server_requeue_t requeue;
28 /* Take next task from domain */
29 server_take_t take;
30};
31
32void init_rt_server(struct rt_server *server,
33 int sid, int cpu, rt_domain_t *domain,
34 need_preempt_t need_preempt,
35 server_requeue_t requeue,
36 server_update_t update,
37 server_take_t take);
38
39#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 6e7cabdddae8..0f529fa78b4d 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -11,6 +11,8 @@
11#include <litmus/locking.h> 11#include <litmus/locking.h>
12#endif 12#endif
13 13
14struct litmus_lock;
15
14/************************ setup/tear down ********************/ 16/************************ setup/tear down ********************/
15 17
16typedef long (*activate_plugin_t) (void); 18typedef long (*activate_plugin_t) (void);
@@ -67,6 +69,9 @@ typedef long (*admit_task_t)(struct task_struct* tsk);
67 69
68typedef void (*release_at_t)(struct task_struct *t, lt_t start); 70typedef void (*release_at_t)(struct task_struct *t, lt_t start);
69 71
72/* TODO remove me */
73typedef void (*release_ts_t)(lt_t time);
74
70struct sched_plugin { 75struct sched_plugin {
71 struct list_head list; 76 struct list_head list;
72 /* basic info */ 77 /* basic info */
@@ -93,6 +98,8 @@ struct sched_plugin {
93 task_block_t task_block; 98 task_block_t task_block;
94 task_exit_t task_exit; 99 task_exit_t task_exit;
95 100
101 release_ts_t release_ts;
102
96#ifdef CONFIG_LITMUS_LOCKING 103#ifdef CONFIG_LITMUS_LOCKING
97 /* locking protocols */ 104 /* locking protocols */
98 allocate_lock_t allocate_lock; 105 allocate_lock_t allocate_lock;
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 7ca34cb13881..96d7666aa22c 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -24,7 +24,8 @@ struct st_param_data { /* regular params */
24 u32 phase; 24 u32 phase;
25 u8 partition; 25 u8 partition;
26 u8 class; 26 u8 class;
27 u8 __unused[2]; 27 u8 level;
28 u8 __unused[1];
28}; 29};
29 30
30struct st_release_data { /* A job is was/is going to be released. */ 31struct st_release_data { /* A job is was/is going to be released. */
@@ -71,8 +72,8 @@ struct st_resume_data { /* A task resumes. */
71 72
72struct st_action_data { 73struct st_action_data {
73 u64 when; 74 u64 when;
74 u8 action; 75 u32 action;
75 u8 __unused[7]; 76 u8 __unused[4];
76}; 77};
77 78
78struct st_sys_release_data { 79struct st_sys_release_data {
@@ -164,34 +165,156 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
164 165
165#endif 166#endif
166 167
168#ifdef CONFIG_SCHED_LITMUS_TRACEPOINT
169
170#include <trace/events/litmus.h>
171
172#else
173
174#warning this is happeing
175
176/* Override trace macros to actually do nothing */
177#define trace_litmus_task_param(t)
178#define trace_litmus_task_release(t)
179#define trace_litmus_switch_to(t)
180#define trace_litmus_switch_away(prev)
181#define trace_litmus_task_completion(t, forced)
182#define trace_litmus_task_block(t, i)
183#define trace_litmus_task_resume(t, i)
184#define trace_litmus_sys_release(start)
185
186#define trace_litmus_resource_acquire(t, i);
187#define trace_litmus_resource_release(t, i);
188#define trace_litmus_priority_donate(t, d, i)
189
190#define trace_litmus_container_param(cid, name)
191#define trace_litmus_server_param(sid, cid, wcet, time)
192#define trace_litmus_server_switch_to(sid, job, tid, tjob)
193#define trace_litmus_server_switch_away(sid, job, tid, tjob)
194#define trace_litmus_server_release(sid, job, release, deadline)
195#define trace_litmus_server_completion(sid, job)
196
197#endif
198
167 199
168#define SCHED_TRACE_BASE_ID 500 200#define SCHED_TRACE_BASE_ID 500
169 201
170 202
171#define sched_trace_task_name(t) \ 203#define sched_trace_task_name(t) \
172 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) 204 SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, \
173#define sched_trace_task_param(t) \ 205 do_sched_trace_task_name, t)
174 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) 206
175#define sched_trace_task_release(t) \ 207#define sched_trace_task_param(t) \
176 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) 208 do { \
177#define sched_trace_task_switch_to(t) \ 209 SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, \
178 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) 210 do_sched_trace_task_param, t); \
179#define sched_trace_task_switch_away(t) \ 211 trace_litmus_task_param(t); \
180 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) 212 } while (0)
181#define sched_trace_task_completion(t, forced) \ 213
182 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ 214#define sched_trace_task_release(t) \
183 (unsigned long) forced) 215 do { \
184#define sched_trace_task_block(t) \ 216 SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, \
185 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) 217 do_sched_trace_task_release, t); \
186#define sched_trace_task_resume(t) \ 218 trace_litmus_task_release(t); \
187 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) 219 } while (0)
188#define sched_trace_action(t, action) \ 220
189 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, do_sched_trace_action, t, \ 221#define sched_trace_task_switch_to(t) \
190 (unsigned long) action); 222 do { \
191/* when is a pointer, it does not need an explicit cast to unsigned long */ 223 SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, \
192#define sched_trace_sys_release(when) \ 224 do_sched_trace_task_switch_to, t); \
193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) 225 trace_litmus_switch_to(t); \
226 } while (0)
227
228#define sched_trace_task_switch_away(t) \
229 do { \
230 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, \
231 do_sched_trace_task_switch_away, t); \
232 trace_litmus_switch_away(t); \
233 } while (0)
234
235#define sched_trace_task_completion(t, forced) \
236 do { \
237 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, \
238 do_sched_trace_task_completion, t, \
239 (unsigned long) forced); \
240 trace_litmus_task_completion(t, forced); \
241 } while (0)
242
243#define sched_trace_task_block(t, i) \
244 do { \
245 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, \
246 do_sched_trace_task_block, t); \
247 trace_litmus_task_block(t, i); \
248 } while (0)
249
250#define sched_trace_task_resume(t, i) \
251 do { \
252 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, \
253 do_sched_trace_task_resume, t); \
254 trace_litmus_task_resume(t, i); \
255 } while (0)
256
257#define sched_trace_action(t, action) \
258 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 9, \
259 do_sched_trace_action, t, (unsigned long) action);
194 260
261/* when is a pointer, it does not need an explicit cast to unsigned long */
262#define sched_trace_sys_release(when) \
263 do { \
264 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, \
265 do_sched_trace_sys_release, when); \
266 trace_litmus_sys_release(when); \
267 } while (0)
268
269#define QT_START lt_t _qt_start = litmus_clock()
270#define QT_END \
271 sched_trace_log_message("%d P%d [%s@%s:%d]: Took %llu\n\n", \
272 TRACE_ARGS, litmus_clock() - _qt_start)
273
274#define sched_trace_resource_acquire(t, i) \
275 do { \
276 trace_litmus_resource_acquire(t, i); \
277 } while (0)
278
279#define sched_trace_resource_release(t, i) \
280 do { \
281 trace_litmus_resource_release(t, i); \
282 } while (0)
283
284#define sched_trace_priority_donate(t, d, i) \
285 do { \
286 trace_litmus_priority_donate(t, d, i); \
287 } while (0)
288
289#define sched_trace_container_param(cid, name) \
290 do { \
291 trace_litmus_container_param(cid, name); \
292 } while (0)
293
294#define sched_trace_server_param(sid, cid, wcet, period) \
295 do { \
296 trace_litmus_server_param(sid, cid, wcet, period); \
297 } while(0)
298
299#define sched_trace_server_switch_to(sid, job, tid, tjob) \
300 do { \
301 trace_litmus_server_switch_to(sid, job, tid, tjob); \
302 } while(0)
303
304#define sched_trace_server_switch_away(sid, job, tid, tjob) \
305 do { \
306 trace_litmus_server_switch_away(sid, job, tid, tjob); \
307 } while (0)
308
309#define sched_trace_server_release(sid, job, rel, dead) \
310 do { \
311 trace_litmus_server_release(sid, job, rel, dead); \
312 } while (0)
313
314#define sched_trace_server_completion(sid, job) \
315 do { \
316 trace_litmus_server_completion(sid, job); \
317 } while (0)
195 318
196#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 319#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
197 320
diff --git a/include/trace/events/litmus.h b/include/trace/events/litmus.h
new file mode 100644
index 000000000000..136a80db54a4
--- /dev/null
+++ b/include/trace/events/litmus.h
@@ -0,0 +1,425 @@
1/*
2 * LITMUS^RT kernel style scheduling tracepoints
3 */
4#undef TRACE_SYSTEM
5#define TRACE_SYSTEM litmus
6
7#if !defined(_SCHED_TASK_TRACEPOINT_H) || defined(TRACE_HEADER_MULTI_READ)
8#define _SCHED_TASK_TRACEPOINT_H
9
10#include <linux/tracepoint.h>
11
12#include <litmus/litmus.h>
13#include <litmus/rt_param.h>
14
15TRACE_EVENT(litmus_task_param,
16
17 TP_PROTO(struct task_struct *t),
18
19 TP_ARGS(t),
20
21 TP_STRUCT__entry(
22 __field( pid_t, pid )
23 __field( unsigned int, job )
24 __field( unsigned long long, wcet )
25 __field( unsigned long long, period )
26 __field( unsigned long long, phase )
27 __field( int, partition )
28 ),
29
30 TP_fast_assign(
31 __entry->pid = t ? t->pid : 0;
32 __entry->job = t ? t->rt_param.job_params.job_no : 0;
33 __entry->wcet = get_exec_cost(t);
34 __entry->period = get_rt_period(t);
35 __entry->phase = get_rt_phase(t);
36 __entry->partition = get_partition(t);
37 ),
38
39 TP_printk("period(%d, %Lu).\nwcet(%d, %Lu).\n",
40 __entry->pid, __entry->period,
41 __entry->pid, __entry->wcet)
42);
43
44/*
45 * Tracing jobs release
46 */
47TRACE_EVENT(litmus_task_release,
48
49 TP_PROTO(struct task_struct *t),
50
51 TP_ARGS(t),
52
53 TP_STRUCT__entry(
54 __field( pid_t, pid )
55 __field( unsigned int, job )
56 __field( unsigned long long, release )
57 __field( unsigned long long, deadline )
58 ),
59
60 TP_fast_assign(
61 __entry->pid = t ? t->pid : 0;
62 __entry->job = t ? t->rt_param.job_params.job_no : 0;
63 __entry->release = get_release(t);
64 __entry->deadline = get_deadline(t);
65 ),
66
67 TP_printk("release(job(%u, %u)): %Lu\ndeadline(job(%u, %u)): %Lu\n",
68 __entry->pid, __entry->job, __entry->release,
69 __entry->pid, __entry->job, __entry->deadline)
70);
71
72/*
73 * Tracepoint for switching to new task
74 */
75TRACE_EVENT(litmus_switch_to,
76
77 TP_PROTO(struct task_struct *t),
78
79 TP_ARGS(t),
80
81 TP_STRUCT__entry(
82 __field( pid_t, pid )
83 __field( unsigned int, job )
84 __field( unsigned long long, exec_time )
85 ),
86
87 TP_fast_assign(
88 __entry->pid = t->pid;//is_realtime(t) ? t->pid : 0;
89 __entry->job = t->rt_param.job_params.job_no;//is_realtime(t) ? t->rt_param.job_params.job_no : 0;
90 __entry->exec_time = get_exec_time(t);
91 ),
92
93 TP_printk("switch_to(job(%u, %u)): (exec: %Lu)\n",
94 __entry->pid, __entry->job,
95 __entry->exec_time)
96);
97
98/*
99 * Tracepoint for switching away previous task
100 */
101TRACE_EVENT(litmus_switch_away,
102
103 TP_PROTO(struct task_struct *t),
104
105 TP_ARGS(t),
106
107 TP_STRUCT__entry(
108 __field( pid_t, pid )
109 __field( unsigned int, job )
110 __field( unsigned long long, exec_time )
111 ),
112
113 TP_fast_assign(
114 __entry->pid = t->pid;//is_realtime(t) ? t->pid : 0;
115 __entry->job = t->rt_param.job_params.job_no;//is_realtime(t) ? t->rt_param.job_params.job_no : 0;
116 __entry->exec_time = get_exec_time(t);
117 ),
118
119 TP_printk("switch_away(job(%u, %u)): (exec: %Lu)\n",
120 __entry->pid, __entry->job,
121 __entry->exec_time)
122);
123
124/*
125 * Tracing jobs completion
126 */
127TRACE_EVENT(litmus_task_completion,
128
129 TP_PROTO(struct task_struct *t, unsigned long forced),
130
131 TP_ARGS(t, forced),
132
133 TP_STRUCT__entry(
134 __field( pid_t, pid )
135 __field( unsigned int, job )
136 __field( unsigned long, forced )
137 ),
138
139 TP_fast_assign(
140 __entry->pid = t ? t->pid : 0;
141 __entry->job = t ? t->rt_param.job_params.job_no : 0;
142 __entry->forced = forced;
143 ),
144
145 TP_printk("completed(job(%u, %u)): (forced: %lu)\n",
146 __entry->pid, __entry->job,
147 __entry->forced)
148);
149
150/*
151 * Trace blocking tasks.
152 */
153TRACE_EVENT(litmus_task_block,
154
155 TP_PROTO(struct task_struct *t, int lid),
156
157 TP_ARGS(t, lid),
158
159 TP_STRUCT__entry(
160 __field( pid_t, pid )
161 __field( int, lid )
162 ),
163
164 TP_fast_assign(
165 __entry->pid = t ? t->pid : 0;
166 __entry->lid = lid;
167 ),
168
169 TP_printk("(%u) blocks on %d\n", __entry->pid,
170 __entry->lid)
171);
172
173/*
174 * Lock events
175 */
176TRACE_EVENT(litmus_resource_acquire,
177
178 TP_PROTO(struct task_struct *t, int lid),
179
180 TP_ARGS(t, lid),
181
182 TP_STRUCT__entry(
183 __field( pid_t, pid )
184 __field( int, lid )
185 ),
186
187 TP_fast_assign(
188 __entry->pid = t ? t->pid : 0;
189 __entry->lid = lid;
190 ),
191
192 TP_printk("(%u) acquires %d\n", __entry->pid,
193 __entry->lid)
194);
195
196TRACE_EVENT(litmus_resource_release,
197
198 TP_PROTO(struct task_struct *t, int lid),
199
200 TP_ARGS(t, lid),
201
202 TP_STRUCT__entry(
203 __field( pid_t, pid )
204 __field( int, lid )
205 ),
206
207 TP_fast_assign(
208 __entry->pid = t ? t->pid : 0;
209 __entry->lid = lid;
210 ),
211
212 TP_printk("(%u) releases %d\n", __entry->pid,
213 __entry->lid)
214);
215
216TRACE_EVENT(litmus_priority_donate,
217
218 TP_PROTO(struct task_struct *t, struct task_struct *donor, int lid),
219
220 TP_ARGS(t, donor, lid),
221
222 TP_STRUCT__entry(
223 __field( pid_t, t_pid )
224 __field( pid_t, d_pid )
225 __field( unsigned long long, prio)
226 __field( int, lid )
227 ),
228
229 TP_fast_assign(
230 __entry->t_pid = t ? t->pid : 0;
231 __entry->d_pid = donor ? donor->pid : 0;
232 __entry->prio = get_deadline(donor);
233 __entry->lid = lid;
234 ),
235
236 TP_printk("(%u) inherits %llu from (%u) on %d\n", __entry->t_pid,
237 __entry->d_pid, __entry->prio, __entry->lid)
238);
239
240/*
241 * Tracing jobs resume
242 */
243TRACE_EVENT(litmus_task_resume,
244
245 TP_PROTO(struct task_struct *t, int lid),
246
247 TP_ARGS(t, lid),
248
249 TP_STRUCT__entry(
250 __field( pid_t, pid )
251 __field( int, lid )
252 __field( unsigned int, job )
253 ),
254
255 TP_fast_assign(
256 __entry->pid = t ? t->pid : 0;
257 __entry->job = t ? t->rt_param.job_params.job_no : 0;
258 __entry->lid = lid;
259 ),
260
261 TP_printk("resume(job(%u, %u)) on %d\n",
262 __entry->pid, __entry->job,
263 __entry->lid)
264);
265
266/*
267 * Trace synchronous release
268 */
269TRACE_EVENT(litmus_sys_release,
270
271 TP_PROTO(unsigned long long *start),
272
273 TP_ARGS(start),
274
275 TP_STRUCT__entry(
276 __field( unsigned long long, rel )
277 ),
278
279 TP_fast_assign(
280 __entry->rel = *start;
281 ),
282
283 TP_printk("SynRelease(%Lu)\n", __entry->rel)
284);
285
286/*
287 * Containers
288 */
289TRACE_EVENT(litmus_container_param,
290
291 TP_PROTO(int cid, const char *name),
292
293 TP_ARGS(cid, name),
294
295 TP_STRUCT__entry(
296 __field( int, cid )
297 __array( char, name, TASK_COMM_LEN )
298 ),
299
300 TP_fast_assign(
301 memcpy(__entry->name, name, TASK_COMM_LEN);
302 __entry->cid = cid;
303 ),
304
305 TP_printk("container, name: %s, id: %d\n", __entry->name, __entry->cid)
306);
307
308TRACE_EVENT(litmus_server_param,
309
310 TP_PROTO(int sid, int cid, unsigned long long wcet, unsigned long long period),
311
312 TP_ARGS(sid, cid, wcet, period),
313
314 TP_STRUCT__entry(
315 __field( int, sid )
316 __field( int, cid )
317 __field( unsigned long long, wcet )
318 __field( unsigned long long, period )
319 ),
320
321 TP_fast_assign(
322 __entry->cid = cid;
323 __entry->sid = sid;
324 __entry->wcet = wcet;
325 __entry->period = period;
326 ),
327
328 TP_printk("server(%llu, %llu), sid: %llu, cont: %llu\n",
329 __entry->wcet, __entry->period, __entry->sid, __entry->cid)
330);
331
332TRACE_EVENT(litmus_server_switch_to,
333
334 TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob),
335
336 TP_ARGS(sid, job, tid, tjob),
337
338 TP_STRUCT__entry(
339 __field( int, sid)
340 __field( unsigned int, job)
341 __field( int, tid)
342 __field( unsigned int, tjob)
343 ),
344
345 TP_fast_assign(
346 __entry->sid = sid;
347 __entry->tid = tid;
348 __entry->job = job;
349 __entry->tjob = tjob;
350 ),
351
352 TP_printk("switch_to(server(%d, %u)): (%d, %d)\n", __entry->sid, __entry->job, __entry->tid, __entry->tjob)
353);
354
355TRACE_EVENT(litmus_server_switch_away,
356
357 TP_PROTO(int sid, unsigned int job, int tid, unsigned int tjob),
358
359 TP_ARGS(sid, job, tid, tjob),
360
361 TP_STRUCT__entry(
362 __field( int, sid)
363 __field( unsigned int, job)
364 __field( int, tid)
365 __field( unsigned int, tjob)
366 ),
367
368 TP_fast_assign(
369 __entry->sid = sid;
370 __entry->tid = tid;
371 __entry->job = job;
372 __entry->tjob = tjob;
373 ),
374
375 TP_printk("switch_away(server(%d, %u)): (%d, %d)\n", __entry->sid, __entry->job, __entry->tid, __entry->tjob)
376);
377
378TRACE_EVENT(litmus_server_release,
379
380 TP_PROTO(int sid, unsigned int job,
381 unsigned long long release,
382 unsigned long long deadline),
383
384 TP_ARGS(sid, job, release, deadline),
385
386 TP_STRUCT__entry(
387 __field( int, sid)
388 __field( unsigned int, job)
389 __field( unsigned long long, release)
390 __field( unsigned long long, deadline)
391 ),
392
393 TP_fast_assign(
394 __entry->sid = sid;
395 __entry->job = job;
396 __entry->release = release;
397 __entry->deadline = deadline;
398 ),
399
400 TP_printk("release(server(%d, %u)), release: %llu, deadline: %llu\n", __entry->sid, __entry->job, __entry->release, __entry->deadline)
401);
402
403TRACE_EVENT(litmus_server_completion,
404
405 TP_PROTO(int sid, int job),
406
407 TP_ARGS(sid, job),
408
409 TP_STRUCT__entry(
410 __field( int, sid)
411 __field( unsigned int, job)
412 ),
413
414 TP_fast_assign(
415 __entry->sid = sid;
416 __entry->job = job;
417 ),
418
419 TP_printk("completion(server(%d, %d))\n", __entry->sid, __entry->job)
420);
421
422#endif /* _SCHED_TASK_TRACEPOINT_H */
423
424/* Must stay outside the protection */
425#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 533c49f48047..4d6f3474e8fa 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/ftrace_event.h> 19#include <linux/ftrace_event.h>
20#include <litmus/litmus.h>
20 21
21/* 22/*
22 * DECLARE_EVENT_CLASS can be used to add a generic function 23 * DECLARE_EVENT_CLASS can be used to add a generic function
@@ -54,7 +55,7 @@
54#define __string(item, src) __dynamic_array(char, item, -1) 55#define __string(item, src) __dynamic_array(char, item, -1)
55 56
56#undef TP_STRUCT__entry 57#undef TP_STRUCT__entry
57#define TP_STRUCT__entry(args...) args 58#define TP_STRUCT__entry(args...) args __field( unsigned long long, __rt_ts )
58 59
59#undef DECLARE_EVENT_CLASS 60#undef DECLARE_EVENT_CLASS
60#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ 61#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
@@ -507,7 +508,7 @@ static inline notrace int ftrace_get_offsets_##call( \
507 strcpy(__get_str(dst), src); 508 strcpy(__get_str(dst), src);
508 509
509#undef TP_fast_assign 510#undef TP_fast_assign
510#define TP_fast_assign(args...) args 511#define TP_fast_assign(args...) args; __entry->__rt_ts = litmus_clock();
511 512
512#undef TP_perf_assign 513#undef TP_perf_assign
513#define TP_perf_assign(args...) 514#define TP_perf_assign(args...)
diff --git a/kernel/sched.c b/kernel/sched.c
index baaca61bc3a3..2229d0deec4b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -80,14 +80,14 @@
80#include "workqueue_sched.h" 80#include "workqueue_sched.h"
81#include "sched_autogroup.h" 81#include "sched_autogroup.h"
82 82
83#define CREATE_TRACE_POINTS
84#include <trace/events/sched.h>
85
83#include <litmus/sched_trace.h> 86#include <litmus/sched_trace.h>
84#include <litmus/trace.h> 87#include <litmus/trace.h>
85 88
86static void litmus_tick(struct rq*, struct task_struct*); 89static void litmus_tick(struct rq*, struct task_struct*);
87 90
88#define CREATE_TRACE_POINTS
89#include <trace/events/sched.h>
90
91/* 91/*
92 * Convert user-nice values [ -20 ... 0 ... 19 ] 92 * Convert user-nice values [ -20 ... 0 ... 19 ]
93 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 93 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 94b48e199577..68459d4dca41 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -138,6 +138,24 @@ config SCHED_TASK_TRACE_SHIFT
138 10 => 1k events 138 10 => 1k events
139 8 => 512 events 139 8 => 512 events
140 140
141config SCHED_LITMUS_TRACEPOINT
142 bool "Enable Event/Tracepoint Tracing for real-time task tracing"
143 depends on TRACEPOINTS
144 default n
145 help
146 Enable kernel-style events (tracepoint) for Litmus. Litmus events
147 trace the same functions as the above sched_trace_XXX(), but can
148 be enabled independently.
149 Litmus tracepoints can be recorded and analyzed together (single
150 time reference) with all other kernel tracing events (e.g.,
151 sched:sched_switch, etc.).
152
153 This also enables a quick way to visualize schedule traces using
154 trace-cmd utility and kernelshark visualizer.
155
156 Say Yes for debugging and visualization purposes.
157 Say No for overhead tracing.
158
141config SCHED_OVERHEAD_TRACE 159config SCHED_OVERHEAD_TRACE
142 bool "Record timestamps for overhead measurements" 160 bool "Record timestamps for overhead measurements"
143 depends on FEATHER_TRACE 161 depends on FEATHER_TRACE
diff --git a/litmus/Makefile b/litmus/Makefile
index 2d77d11e905e..d24e9855a7f9 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -16,16 +16,22 @@ obj-y = sched_plugin.o litmus.o \
16 srp.o \ 16 srp.o \
17 bheap.o \ 17 bheap.o \
18 ctrldev.o \ 18 ctrldev.o \
19 sched_gsn_edf.o \
20 sched_psn_edf.o \
21 color.o \ 19 color.o \
22 color_proc.o \ 20 color_proc.o \
23 color_dev.o 21 color_dev.o \
22 rt_server.o \
23 dgl.o \
24 fifo_common.o \
25 sched_color.o
26
27 # sched_psn_edf.o \
28 # sched_gsn_edf.o \
29
30
24 31
25obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 32obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
26obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 33obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
27obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o 34obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o
28
29obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o 35obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
30obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 36obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
31obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o 37obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
diff --git a/litmus/budget.c b/litmus/budget.c
index 310e9a3d4172..84f3f22770b1 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -4,13 +4,8 @@
4 4
5#include <litmus/litmus.h> 5#include <litmus/litmus.h>
6#include <litmus/preempt.h> 6#include <litmus/preempt.h>
7 7#include <litmus/budget.h>
8struct enforcement_timer { 8#include <litmus/sched_trace.h>
9 /* The enforcement timer is used to accurately police
10 * slice budgets. */
11 struct hrtimer timer;
12 int armed;
13};
14 9
15DEFINE_PER_CPU(struct enforcement_timer, budget_timer); 10DEFINE_PER_CPU(struct enforcement_timer, budget_timer);
16 11
@@ -32,7 +27,7 @@ static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer)
32} 27}
33 28
34/* assumes called with IRQs off */ 29/* assumes called with IRQs off */
35static void cancel_enforcement_timer(struct enforcement_timer* et) 30void cancel_enforcement_timer(struct enforcement_timer* et)
36{ 31{
37 int ret; 32 int ret;
38 33
@@ -54,11 +49,10 @@ static void cancel_enforcement_timer(struct enforcement_timer* et)
54} 49}
55 50
56/* assumes called with IRQs off */ 51/* assumes called with IRQs off */
57static void arm_enforcement_timer(struct enforcement_timer* et, 52void arm_enforcement_timer(struct enforcement_timer* et,
58 struct task_struct* t) 53 struct task_struct* t)
59{ 54{
60 lt_t when_to_fire; 55 lt_t when_to_fire;
61 TRACE_TASK(t, "arming enforcement timer.\n");
62 56
63 /* Calling this when there is no budget left for the task 57 /* Calling this when there is no budget left for the task
64 * makes no sense, unless the task is non-preemptive. */ 58 * makes no sense, unless the task is non-preemptive. */
@@ -67,8 +61,11 @@ static void arm_enforcement_timer(struct enforcement_timer* et,
67 /* __hrtimer_start_range_ns() cancels the timer 61 /* __hrtimer_start_range_ns() cancels the timer
68 * anyway, so we don't have to check whether it is still armed */ 62 * anyway, so we don't have to check whether it is still armed */
69 63
70 if (likely(!is_np(t))) { 64 if (likely(!is_user_np(t))) {
71 when_to_fire = litmus_clock() + budget_remaining(t); 65 when_to_fire = litmus_clock() + budget_remaining(t);
66 TRACE_TASK(t, "arming enforcement timer for %llu.\n",
67 when_to_fire);
68
72 __hrtimer_start_range_ns(&et->timer, 69 __hrtimer_start_range_ns(&et->timer,
73 ns_to_ktime(when_to_fire), 70 ns_to_ktime(when_to_fire),
74 0 /* delta */, 71 0 /* delta */,
@@ -94,6 +91,11 @@ void update_enforcement_timer(struct task_struct* t)
94 } 91 }
95} 92}
96 93
94void init_enforcement_timer(struct enforcement_timer *et)
95{
96 hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
97 et->timer.function = on_enforcement_timeout;
98}
97 99
98static int __init init_budget_enforcement(void) 100static int __init init_budget_enforcement(void)
99{ 101{
@@ -102,8 +104,7 @@ static int __init init_budget_enforcement(void)
102 104
103 for (cpu = 0; cpu < NR_CPUS; cpu++) { 105 for (cpu = 0; cpu < NR_CPUS; cpu++) {
104 et = &per_cpu(budget_timer, cpu); 106 et = &per_cpu(budget_timer, cpu);
105 hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 107 init_enforcement_timer(et);
106 et->timer.function = on_enforcement_timeout;
107 } 108 }
108 return 0; 109 return 0;
109} 110}
diff --git a/litmus/color_proc.c b/litmus/color_proc.c
index 4cb6c9ac89bb..0ac533f96d3e 100644
--- a/litmus/color_proc.c
+++ b/litmus/color_proc.c
@@ -2,17 +2,30 @@
2#include <linux/sysctl.h> 2#include <linux/sysctl.h>
3#include <linux/slab.h> 3#include <linux/slab.h>
4 4
5#include <litmus/sched_trace.h>
5#include <litmus/color.h> 6#include <litmus/color.h>
6 7
8#define SPERIOD_LEN 7
9#define SPERIOD_FILE "period"
10#define SWCET_LEN 5
11#define SWCET_FILE "wcet"
12
7extern int color_sysctl_add_pages_data; /* litmus/color.c */ 13extern int color_sysctl_add_pages_data; /* litmus/color.c */
8 14
9static int zero = 0; 15static int zero = 0;
10static int one = 1; 16static int one = 1;
11 17
18static unsigned long *server_wcet;
19static unsigned long *server_period;
20
12#define NR_PAGES_INDEX 0 /* location of nr_pages in the table below */ 21#define NR_PAGES_INDEX 0 /* location of nr_pages in the table below */
13static struct ctl_table color_table[] = 22static struct ctl_table color_table[] =
14{ 23{
15 { 24 {
25 .procname = "servers",
26 .mode = 0555,
27 },
28 {
16 /* you MUST update NR_PAGES_INDEX if you move this entry */ 29 /* you MUST update NR_PAGES_INDEX if you move this entry */
17 .procname = "nr_pages", 30 .procname = "nr_pages",
18 .mode = 0444, 31 .mode = 0444,
@@ -41,6 +54,7 @@ static struct ctl_table litmus_table[] =
41 }, 54 },
42 { } 55 { }
43}; 56};
57
44static struct ctl_table litmus_dir_table[] = { 58static struct ctl_table litmus_dir_table[] = {
45 { 59 {
46 .procname = "litmus", 60 .procname = "litmus",
@@ -50,6 +64,26 @@ static struct ctl_table litmus_dir_table[] = {
50 { } 64 { }
51}; 65};
52 66
67int color_server_params(int cpu, unsigned long *wcet, unsigned long *period)
68{
69 if (cpu >= num_online_cpus()) {
70 printk(KERN_WARNING "Cannot access illegal CPU: %d\n", cpu);
71 return -EFAULT;
72 }
73
74 if (server_wcet[cpu] == 0 || server_period[cpu] == 0) {
75 printk(KERN_WARNING "Server %d is uninitialized!\n", cpu);
76 return -EPERM;
77 }
78
79 *wcet = server_wcet[cpu];
80 *period = server_period[cpu];
81
82 TRACE("For %d: %lu, %lu\n", cpu, server_wcet[cpu], server_period[cpu]);
83
84 return 0;
85}
86
53extern unsigned long nr_colors; /* litmus/color.c */ 87extern unsigned long nr_colors; /* litmus/color.c */
54 88
55/* must be called AFTER nr_colors is set */ 89/* must be called AFTER nr_colors is set */
@@ -67,11 +101,101 @@ out:
67 return ret; 101 return ret;
68} 102}
69 103
104static void __init init_server_entry(struct ctl_table *entry,
105 unsigned long *parameter,
106 char *name)
107{
108 entry->procname = name;
109 entry->mode = 0666;
110 entry->proc_handler = proc_doulongvec_minmax;
111 entry->data = parameter;
112 entry->maxlen = sizeof(unsigned long);
113}
114
115static int __init init_cpu_entry(struct ctl_table *cpu_table, int cpu)
116{
117 char *name;
118 size_t size;
119 struct ctl_table *server_table, *entry;
120
121 server_wcet[cpu] = 0;
122 server_period[cpu] = 0;
123
124 printk(KERN_INFO "Creating cpu %d\n", cpu);
125
126 size = sizeof(ctl_table) * 3;
127 server_table = kmalloc(size, GFP_ATOMIC);
128 if (!server_table) {
129 printk(KERN_WARNING "Could not allocate "
130 "color server proc for CPU %d.\n", cpu);
131 return -ENOMEM;
132 }
133 memset(server_table, 0, size);
134
135 /* Server WCET */
136 name = kmalloc(SWCET_LEN, GFP_ATOMIC);
137 if (!name) {
138 return -ENOMEM;
139 }
140 strcpy(name, SWCET_FILE);
141 entry = &server_table[0];
142 init_server_entry(entry, &server_wcet[cpu], name);
143
144
145 /* Server period */
146 name = kmalloc(SPERIOD_LEN, GFP_ATOMIC);
147 if (!name) {
148 return -ENOMEM;
149 }
150 strcpy(name, SPERIOD_FILE);
151 entry = &server_table[1];
152 init_server_entry(entry, &server_period[cpu], name);
153
154 name = kmalloc(3, GFP_ATOMIC);
155 if (!name) {
156 return -ENOMEM;
157 }
158 snprintf(name, 2, "%d", cpu);
159 cpu_table->procname = name;
160 cpu_table->mode = 0555;
161 cpu_table->child = server_table;
162
163 return 0;
164}
165
166static int __init init_server_entries(struct ctl_table *cpu_tables)
167{
168 size_t size;
169 int ret, cpu;
170 struct ctl_table *cpu_table;
171
172 size = sizeof(unsigned long) * num_online_cpus();
173 server_wcet = kmalloc(size, GFP_ATOMIC);
174 server_period = kmalloc(size, GFP_ATOMIC);
175 if (!server_wcet || !server_period) {
176 printk(KERN_WARNING "Could not allocate server parameters.\n");
177 return -ENOMEM;
178 }
179
180 for_each_online_cpu(cpu) {
181 cpu_table = &cpu_tables[cpu];
182 ret = init_cpu_entry(cpu_table, cpu);
183 if (ret) {
184 return ret;
185 }
186 }
187 return 0;
188}
189
190
70static struct ctl_table_header *litmus_sysctls; 191static struct ctl_table_header *litmus_sysctls;
71 192
72static int __init litmus_sysctl_init(void) 193static int __init litmus_sysctl_init(void)
73{ 194{
74 int ret = 0; 195 int ret = 0;
196 size_t size;
197 struct ctl_table *cpu_tables;
198
75 printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n"); 199 printk(KERN_INFO "Registering LITMUS^RT proc sysctl.\n");
76 litmus_sysctls = register_sysctl_table(litmus_dir_table); 200 litmus_sysctls = register_sysctl_table(litmus_dir_table);
77 if (!litmus_sysctls) { 201 if (!litmus_sysctls) {
@@ -80,6 +204,25 @@ static int __init litmus_sysctl_init(void)
80 goto out; 204 goto out;
81 } 205 }
82 ret = init_sysctl_nr_colors(); 206 ret = init_sysctl_nr_colors();
207 if (ret)
208 goto out;
209
210
211 size = sizeof(ctl_table) * (num_online_cpus() + 2);
212 cpu_tables = kmalloc(size, GFP_ATOMIC);
213 if (!cpu_tables) {
214 printk(KERN_WARNING "Could not allocate color CPU proc.\n");
215 ret = -ENOMEM;
216 goto out;
217 }
218 memset(cpu_tables, 0, size);
219
220 ret = init_server_entries(cpu_tables);
221 if (ret)
222 goto out;
223
224 color_table[0].child = cpu_tables;
225
83out: 226out:
84 return ret; 227 return ret;
85} 228}
diff --git a/litmus/dgl.c b/litmus/dgl.c
new file mode 100644
index 000000000000..e09d57cc2672
--- /dev/null
+++ b/litmus/dgl.c
@@ -0,0 +1,248 @@
1#include <linux/sched.h>
2#include <litmus/litmus.h>
3#include <litmus/dgl.h>
4#include <litmus/sched_trace.h>
5
6/* Word, bit -> resource id */
7#define ri(w, b) (w * MASK_SIZE + b)
8
9 /* For loop, where @i iterates over each set bit in @bit_arr */
10#define for_each_resource(bit_arr, w, b, i) \
11 for(w = 0; w < MASK_WORDS; ++w) \
12 for(b = find_first_bit(&bit_arr[w],MASK_SIZE), i = ri(w, b); \
13 b < MASK_SIZE; \
14 b = find_next_bit(&bit_arr[w],MASK_SIZE,b+1), i = ri(w, b))
15
16/* Return resource id in dgl @d for resource @r */
17#define resource_id(d, r) ((((void*)r) - (void*)(&(d)->resources))/ sizeof(*r))
18
19/* Return request group of req @r for resource @i */
20#define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \
21 struct dgl_group_req, requests))
22
23/* Resource id -> word, bit */
24static inline void mask_idx(int resource, int *word, int *bit)
25{
26 *word = resource / MASK_SIZE;
27 *bit = resource % MASK_SIZE;
28}
29
30
31static void print_waiting(struct dgl *dgl, struct dgl_resource *resource)
32{
33 struct dgl_req *pos;
34 struct dgl_group_req *greq;
35 int rid = resource_id(dgl, resource);
36 unsigned long long last = 0;
37
38 TRACE("List for rid %d\n", resource_id(dgl, resource));
39 list_for_each_entry(pos, &resource->waiting, list) {
40 greq = req_group(pos, rid);
41 TRACE(" 0x%p with timestamp %llu\n", greq, greq->ts);
42 BUG_ON(greq->ts < last);
43 last = greq->ts;
44 }
45}
46
47void dgl_init(struct dgl *dgl)
48{
49 int i;
50 struct dgl_resource *resource;
51
52 for (i = 0; i < NR_CPUS; ++i)
53 dgl->acquired[i] = NULL;
54
55 for (i = 0; i < NUM_RESOURCES; ++i) {
56 resource = &dgl->resources[i];
57
58 INIT_LIST_HEAD(&resource->waiting);
59 resource->free_replicas = NUM_REPLICAS;
60 }
61
62 dgl->requests = 0;
63 dgl->running = 0;
64 dgl->ts = 0;
65}
66
67void dgl_group_req_init(struct dgl_group_req *greq)
68{
69 int i;
70 greq->cpu = NO_CPU;
71 for (i = 0; i < MASK_WORDS; ++i) {
72 greq->requested[i] = 0;
73 greq->waiting[i] = 0;
74 }
75}
76
77/**
78 * set_req - create request for @replicas of @resource.
79 */
80void set_req(struct dgl_group_req *greq, int resource, int replicas)
81{
82 int word, bit;
83 struct dgl_req *req;
84
85 BUG_ON(replicas > NUM_REPLICAS);
86
87 mask_idx(resource, &word, &bit);
88 __set_bit(bit, &greq->requested[word]);
89
90 req = &greq->requests[resource];
91 INIT_LIST_HEAD(&req->list);
92 req->replicas = replicas;
93}
94
95/*
96 * Attempt to fulfill request @req for @resource.
97 * Return 1 if successful. If the matching group request has acquired all of
98 * its needed resources, this will then set that req as dgl->acquired[cpu].
99 */
100static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource,
101 struct dgl_req *req)
102{
103 int word, bit, rid, head, empty, room;
104 unsigned long waiting;
105 struct dgl_group_req *greq;
106
107 rid = resource_id(dgl, resource);
108 greq = req_group(req, rid);
109
110 head = resource->waiting.next == &req->list;
111 empty = list_empty(&resource->waiting);
112 room = resource->free_replicas >= req->replicas;
113
114 if (! (room && (head || empty)) ) {
115 TRACE("0x%p cannot acquire %d replicas, %d free\n",
116 greq, req->replicas, resource->free_replicas,
117 room, head, empty);
118 return 0;
119 }
120
121 resource->free_replicas -= req->replicas;
122 BUG_ON(resource->free_replicas > NUM_REPLICAS);
123
124 TRACE("0x%p acquired %d replicas of rid %d\n",
125 greq, req->replicas, rid);
126
127 mask_idx(rid, &word, &bit);
128 clear_bit(bit, &greq->waiting[word]);
129
130 waiting = 0;
131 for (word = 0; word < MASK_WORDS; word++) {
132 waiting |= greq->waiting[word];
133 if (waiting)
134 break;
135 }
136
137 if (!waiting) {
138 TRACE("0x%p acquired all resources\n", greq);
139 BUG_ON(dgl->acquired[greq->cpu]);
140 dgl->acquired[greq->cpu] = greq;
141 litmus_reschedule(greq->cpu);
142 dgl->running++;
143 }
144
145 return 1;
146}
147
148/**
149 * add_group_req - initiate group request.
150 */
151void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu)
152{
153 int b, w, i, succ, all_succ = 1;
154 struct dgl_req *req;
155 struct dgl_resource *resource;
156
157 greq->cpu = cpu;
158 greq->ts = dgl->ts++;
159
160 TRACE("0x%p group request added for CPU %d\n", greq, cpu);
161 BUG_ON(dgl->acquired[cpu] == greq);
162
163 ++dgl->requests;
164
165 for_each_resource(greq->requested, w, b, i) {
166 __set_bit(b, &greq->waiting[w]);
167 }
168
169 for_each_resource(greq->requested, w, b, i) {
170 req = &greq->requests[i];
171 resource = &dgl->resources[i];
172
173 succ = try_acquire(dgl, resource, req);
174 all_succ &= succ;
175
176 if (!succ) {
177 TRACE("0x%p waiting on rid %d\n", greq, i);
178 list_add_tail(&req->list, &resource->waiting);
179 }
180 }
181
182 /* Grant empty requests */
183 if (all_succ && !dgl->acquired[cpu]) {
184 TRACE("0x%p empty group request acquired cpu %d\n", greq, cpu);
185 dgl->acquired[cpu] = greq;
186 ++dgl->running;
187 }
188
189 BUG_ON(dgl->requests && !dgl->running);
190}
191
192/**
193 * remove_group_req - abandon group request.
194 *
195 * This will also progress the waiting queues of resources acquired by @greq.
196 */
197void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq)
198{
199 int b, w, i;
200 struct dgl_req *req, *next;
201 struct dgl_resource *resource;
202
203 TRACE("0x%p removing group request for CPU %d\n", greq, greq->cpu);
204
205 --dgl->requests;
206
207 if (dgl->acquired[greq->cpu] == greq) {
208 TRACE("0x%p no longer acquired on CPU %d\n", greq, greq->cpu);
209 dgl->acquired[greq->cpu] = NULL;
210 --dgl->running;
211 }
212
213 for_each_resource(greq->requested, w, b, i) {
214 req = &greq->requests[i];
215 resource = &dgl->resources[i];
216
217 if (!list_empty(&req->list)) {
218 /* Waiting on resource */
219 clear_bit(b, &greq->waiting[w]);
220 list_del_init(&req->list);
221 TRACE("Quitting 0x%p from rid %d\n",
222 req, i);
223 } else {
224 /* Have resource */
225 resource->free_replicas += req->replicas;
226 BUG_ON(resource->free_replicas > NUM_REPLICAS);
227 TRACE("0x%p releasing %d of %d replicas, rid %d\n",
228 greq, req->replicas, resource->free_replicas, i);
229
230 if (!list_empty(&resource->waiting)) {
231 /* Give it to the next guy */
232 next = list_first_entry(&resource->waiting,
233 struct dgl_req,
234 list);
235
236 BUG_ON(req_group(next, i)->ts < greq->ts);
237
238 if (try_acquire(dgl, resource, next)) {
239 list_del_init(&next->list);
240 print_waiting(dgl, resource);
241
242 }
243 }
244 }
245 }
246
247 BUG_ON(dgl->requests && !dgl->running);
248}
diff --git a/litmus/fifo_common.c b/litmus/fifo_common.c
new file mode 100644
index 000000000000..84ae98e42ae4
--- /dev/null
+++ b/litmus/fifo_common.c
@@ -0,0 +1,58 @@
1/*
2 * kernel/edf_common.c
3 *
4 * Common functions for EDF based scheduler.
5 */
6
7#include <linux/percpu.h>
8#include <linux/sched.h>
9#include <linux/list.h>
10
11#include <litmus/litmus.h>
12#include <litmus/sched_plugin.h>
13#include <litmus/sched_trace.h>
14
15#include <litmus/fifo_common.h>
16
17int fifo_higher_prio(struct task_struct* first,
18 struct task_struct* second)
19{
20 /* There is no point in comparing a task to itself. */
21 if (first && first == second) {
22 TRACE_TASK(first,
23 "WARNING: pointless fifo priority comparison.\n");
24 BUG_ON(1);
25 return 0;
26 }
27
28 if (!first || !second)
29 return first && !second;
30
31 /* Tiebreak by PID */
32 return (get_release(first) == get_release(second) &&
33 first->pid > second->pid) ||
34 (get_release(first) < get_release(second));
35
36
37}
38
39int fifo_ready_order(struct bheap_node* a, struct bheap_node* b)
40{
41 return fifo_higher_prio(bheap2task(a), bheap2task(b));
42}
43
44void fifo_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
45 release_jobs_t release)
46{
47 rt_domain_init(rt, fifo_ready_order, resched, release);
48}
49
50int fifo_preemption_needed(rt_domain_t* rt, struct task_struct *t)
51{
52 if (!__jobs_pending(rt))
53 return 0;
54 if (!t)
55 return 1;
56
57 return !is_realtime(t) || fifo_higher_prio(__next_ready(rt), t);
58}
diff --git a/litmus/ftdev.c b/litmus/ftdev.c
index 06fcf4cf77dc..7ff7f25b56aa 100644
--- a/litmus/ftdev.c
+++ b/litmus/ftdev.c
@@ -231,7 +231,9 @@ static ssize_t ftdev_read(struct file *filp,
231 * lost if the task is interrupted (e.g., killed). 231 * lost if the task is interrupted (e.g., killed).
232 */ 232 */
233 set_current_state(TASK_INTERRUPTIBLE); 233 set_current_state(TASK_INTERRUPTIBLE);
234
234 schedule_timeout(50); 235 schedule_timeout(50);
236
235 if (signal_pending(current)) { 237 if (signal_pending(current)) {
236 if (err == 0) 238 if (err == 0)
237 /* nothing read yet, signal problem */ 239 /* nothing read yet, signal problem */
diff --git a/litmus/locking.c b/litmus/locking.c
index 0c1aa6aa40b7..4881ca119acf 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -1,3 +1,5 @@
1#include <linux/sched.h>
2#include <litmus/litmus.h>
1#include <litmus/fdso.h> 3#include <litmus/fdso.h>
2 4
3#ifdef CONFIG_LITMUS_LOCKING 5#ifdef CONFIG_LITMUS_LOCKING
@@ -28,14 +30,18 @@ static inline struct litmus_lock* get_lock(struct od_table_entry* entry)
28 return (struct litmus_lock*) entry->obj->obj; 30 return (struct litmus_lock*) entry->obj->obj;
29} 31}
30 32
33atomic_t lock_id = ATOMIC_INIT(0);
34
31static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg) 35static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user arg)
32{ 36{
33 struct litmus_lock* lock; 37 struct litmus_lock* lock;
34 int err; 38 int err;
35 39
36 err = litmus->allocate_lock(&lock, type, arg); 40 err = litmus->allocate_lock(&lock, type, arg);
37 if (err == 0) 41 if (err == 0) {
42 lock->id = atomic_add_return(1, &lock_id);
38 *obj_ref = lock; 43 *obj_ref = lock;
44 }
39 return err; 45 return err;
40} 46}
41 47
diff --git a/litmus/rt_server.c b/litmus/rt_server.c
new file mode 100644
index 000000000000..818588a3d317
--- /dev/null
+++ b/litmus/rt_server.c
@@ -0,0 +1,34 @@
1#include <litmus/rt_server.h>
2
3
4static struct task_struct* default_server_take(struct rt_server *srv)
5{
6 return __take_ready(srv->domain);
7}
8
9static void default_server_update(struct rt_server *srv)
10{
11}
12
13void init_rt_server(struct rt_server *server,
14 int sid, int cpu, rt_domain_t *domain,
15 need_preempt_t need_preempt,
16 server_requeue_t requeue,
17 server_update_t update,
18 server_take_t take)
19{
20 if (!need_preempt || !requeue)
21 BUG_ON(1);
22
23 server->need_preempt = need_preempt;
24 server->requeue = requeue;
25
26 server->update = (update) ? update : default_server_update;
27 server->take = (take) ? take : default_server_take;
28
29 server->sid = sid;
30 server->cpu = cpu;
31 server->linked = NULL;
32 server->domain = domain;
33 server->running = 0;
34}
diff --git a/litmus/sched_color.c b/litmus/sched_color.c
new file mode 100644
index 000000000000..98a46bb1b06f
--- /dev/null
+++ b/litmus/sched_color.c
@@ -0,0 +1,811 @@
1#include <linux/percpu.h>
2#include <linux/sched.h>
3#include <linux/list.h>
4#include <linux/spinlock.h>
5#include <linux/module.h>
6#include <linux/slab.h>
7
8#include <litmus/litmus.h>
9#include <litmus/jobs.h>
10#include <litmus/preempt.h>
11#include <litmus/sched_plugin.h>
12#include <litmus/edf_common.h>
13#include <litmus/sched_trace.h>
14#include <litmus/color.h>
15#include <litmus/fifo_common.h>
16#include <litmus/budget.h>
17#include <litmus/rt_server.h>
18#include <litmus/dgl.h>
19
20/**
21 * @rt_server Common server functionality.
22 * @task Task used to schedule server.
23 * @timer Budget enforcement for @task
24 * @start_time If set, time at which server began running.
25 */
26struct fifo_server {
27 struct rt_server server;
28 struct task_struct* task;
29 struct enforcement_timer timer;
30 lt_t start_time;
31};
32
33/**
34 * @server Common server functionality.
35 * @edf_domain PEDF domain.
36 * @scheduled Task physically running on CPU.
37 * @fifo_server Server partitioned to this CPU.
38 */
39struct cpu_entry {
40 struct rt_server server;
41 rt_domain_t edf_domain;
42 struct task_struct* scheduled;
43 struct fifo_server fifo_server;
44};
45
46DEFINE_PER_CPU(struct cpu_entry, color_cpus);
47
48static rt_domain_t fifo_domain;
49static raw_spinlock_t fifo_lock;
50
51static struct dgl group_lock;
52static raw_spinlock_t dgl_lock;
53
54#define local_entry (&__get_cpu_var(color_cpus))
55#define remote_entry(cpu) (&per_cpu(color_cpus, cpu))
56#define task_entry(task) remote_entry(get_partition(task))
57#define task_fserver(task) (&task_entry(task)->fifo_server.server)
58#define entry_lock(entry) (&entry->edf_domain.ready_lock)
59
60#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c])
61#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->edf_domain)
62#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry))
63
64/*
65 * Requeue onto domain's release or ready queue based on task state.
66 */
67static void requeue(rt_domain_t *dom, struct task_struct* t)
68{
69 if (is_server(t) && !tsk_rt(t)->present)
70 /* Remove stopped server from the system */
71 return;
72
73 TRACE_TASK(t, "Requeueing\n");
74 if (is_queued(t)) {
75 TRACE_TASK(t, "Already queued!\n");
76 return;
77 }
78
79 set_rt_flags(t, RT_F_RUNNING);
80 if (is_released(t, litmus_clock()))
81 __add_ready(dom, t);
82 else
83 add_release(dom, t);
84}
85
86/*
87 * Relinquish resources held by @t (or its children).
88 */
89static void release_resources(struct task_struct *t)
90{
91 struct task_struct *sched;
92
93 TRACE_TASK(t, "Releasing resources\n");
94
95 if (is_server(t)) {
96 sched = task_fserver(t)->linked;
97 if (sched)
98 release_resources(sched);
99 } else if (is_kernel_np(t))
100 remove_group_req(&group_lock, tsk_rt(t)->req);
101 tsk_rt(t)->kernel_np = 0;
102}
103
104/*
105 * Put in requests for resources needed by @t. If @t is a server, this will
106 * set @t's np flag to reflect resources held by @t's children.
107 */
108static void acquire_resources(struct task_struct *t)
109{
110 int cpu;
111 struct rt_server *server;
112 struct task_struct *sched;
113
114 /* Can't acquire resources if t is not running */
115 BUG_ON(!get_task_server(t));
116
117 if (is_kernel_np(t)) {
118 TRACE_TASK(t, "Already contending for resources\n");
119 return;
120 }
121 cpu = get_task_server(t)->cpu;
122
123 if (is_server(t)) {
124 server = task_fserver(t);
125 sched = server->linked;
126
127 /* Happens when server is booted off on completion or
128 * has just completed executing a task.
129 */
130 if (sched && !is_kernel_np(sched))
131 acquire_resources(sched);
132
133 /* Become np if there is a running task */
134 if (sched && has_resources(sched, cpu)) {
135 TRACE_TASK(t, "Running task with resource\n");
136 tsk_rt(t)->kernel_np = 1;
137 } else {
138 TRACE_TASK(t, "Running no resources\n");
139 tsk_rt(t)->kernel_np = 0;
140 }
141 } else {
142 TRACE_TASK(t, "Acquiring resources\n");
143 if (!has_resources(t, cpu))
144 add_group_req(&group_lock, tsk_rt(t)->req, cpu);
145 tsk_rt(t)->kernel_np = 1;
146 }
147}
148
149/*
150 * Stop logically running the currently linked task.
151 */
152static void unlink(struct rt_server *server)
153{
154 BUG_ON(!server->linked);
155
156 if (is_server(server->linked))
157 task_fserver(server->linked)->running = 0;
158
159
160 sched_trace_server_switch_away(server->sid, 0,
161 server->linked->pid,
162 get_rt_job(server->linked));
163 TRACE_TASK(server->linked, "No longer run by server %d\n", server->sid);
164
165 raw_spin_lock(&dgl_lock);
166 release_resources(server->linked);
167 raw_spin_unlock(&dgl_lock);
168
169 get_task_server(server->linked) = NULL;
170 server->linked = NULL;
171}
172
173static struct task_struct* schedule_server(struct rt_server *server);
174
175/*
176 * Logically run @task.
177 */
178static void link(struct rt_server *server, struct task_struct *task)
179{
180 struct rt_server *tserv;
181
182 BUG_ON(server->linked);
183 BUG_ON(!server->running);
184 BUG_ON(is_kernel_np(task));
185
186 TRACE_TASK(task, "Run by server %d\n", server->sid);
187
188 if (is_server(task)) {
189 tserv = task_fserver(task);
190 tserv->running = 1;
191 schedule_server(tserv);
192 }
193
194 server->linked = task;
195 get_task_server(task) = server;
196
197 sched_trace_server_switch_to(server->sid, 0,
198 task->pid, get_rt_job(task));
199}
200
201/*
202 * Complete job for task linked to @server.
203 */
204static void job_completion(struct rt_server *server)
205{
206 struct task_struct *t = server->linked;
207
208 TRACE_TASK(t, "Job completed\n");
209 if (is_server(t))
210 sched_trace_server_completion(t->pid, get_rt_job(t));
211 else
212 sched_trace_task_completion(t, 0);
213
214 unlink(server);
215 set_rt_flags(t, RT_F_SLEEP);
216 prepare_for_next_period(t);
217
218 if (is_server(t))
219 sched_trace_server_release(t->pid, get_rt_job(t),
220 get_release(t), get_deadline(t));
221 else
222 sched_trace_task_release(t);
223
224 if (is_running(t))
225 server->requeue(server, t);
226}
227
228/*
229 * Update @server state to reflect task's state.
230 */
231static void update_task(struct rt_server *server)
232{
233 int oot, sleep, block, np;
234 struct task_struct *t = server->linked;
235
236 block = !is_running(t);
237 oot = budget_enforced(t) && budget_exhausted(t);
238 np = is_kernel_np(t);
239 sleep = get_rt_flags(t) == RT_F_SLEEP;
240
241 TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n",
242 block, oot, np, sleep);
243
244 if (block)
245 unlink(server);
246 else if (oot || sleep)
247 job_completion(server);
248}
249
250/*
251 * Link next task for @server.
252 */
253static struct task_struct* schedule_server(struct rt_server *server)
254{
255 struct task_struct *next;
256 struct rt_server *lserver;
257
258 TRACE("Scheduling server %d\n", server->sid);
259
260 if (server->linked) {
261 if (is_server(server->linked)) {
262 lserver = task_fserver(server->linked);
263 lserver->update(lserver);
264 }
265 update_task(server);
266 }
267
268 next = server->linked;
269 if ((!next || !is_np(next)) &&
270 server->need_preempt(server->domain, next)) {
271 if (next) {
272 TRACE_TASK(next, "Preempted\n");
273 unlink(server);
274 server->requeue(server, next);
275 }
276 next = __take_ready(server->domain);
277 link(server, next);
278 }
279
280 return next;
281}
282
283/*
284 * Dumb requeue for PEDF (CPU) servers.
285 */
286static void edf_requeue(struct rt_server *server, struct task_struct *t)
287{
288 BUG_ON(is_be(t));
289 requeue(server->domain, t);
290}
291
292/*
293 * Locking requeue for FIFO servers.
294 */
295static void fifo_requeue(struct rt_server *server, struct task_struct *t)
296{
297 BUG_ON(!is_be(t));
298 raw_spin_lock(&fifo_lock);
299 requeue(server->domain, t);
300 raw_spin_unlock(&fifo_lock);
301}
302
303
304/*
305 * Locking take for FIFO servers.
306 */
307static struct task_struct* fifo_take(struct rt_server *server)
308{
309 struct task_struct *ret;
310
311 raw_spin_lock(&fifo_lock);
312 ret = __take_ready(server->domain);
313 raw_spin_unlock(&fifo_lock);
314
315 return ret;
316}
317
318/*
319 * Update server state, including picking next running task and incrementing
320 * server execution time.
321 */
322static void fifo_update(struct rt_server *server)
323{
324 lt_t delta;
325 struct fifo_server *fserver;
326
327 fserver = container_of(server, struct fifo_server, server);
328 TRACE_TASK(fserver->task, "Updating FIFO server\n");
329
330 if (!server->linked || has_resources(server->linked, server->cpu)) {
331 /* Running here means linked to a parent server */
332 BUG_ON(!server->running);
333
334 /* Stop executing */
335 if (fserver->start_time) {
336 delta = litmus_clock() - fserver->start_time;
337 tsk_rt(fserver->task)->job_params.exec_time += delta;
338 fserver->start_time = 0;
339 cancel_enforcement_timer(&fserver->timer);
340 } else {
341 /* Server is linked, but not executing */
342 BUG_ON(fserver->timer.armed);
343 }
344
345 /* Calculate next task */
346 schedule_server(&fserver->server);
347
348 /* Reserve needed resources */
349 raw_spin_lock(&dgl_lock);
350 acquire_resources(fserver->task);
351 raw_spin_unlock(&dgl_lock);
352 }
353}
354
355/*
356 * Triggers preemption on edf-scheduled "linked" field only.
357 */
358static void color_edf_release(rt_domain_t *edf, struct bheap *tasks)
359{
360 unsigned long flags;
361 struct cpu_entry *entry;
362
363 TRACE_TASK(bheap2task(bheap_peek(edf->order, tasks)),
364 "Released set of EDF tasks\n");
365
366 entry = container_of(edf, struct cpu_entry, edf_domain);
367 raw_spin_lock_irqsave(entry_lock(entry), flags);
368
369 __merge_ready(edf, tasks);
370
371 if (edf_preemption_needed(edf, entry->server.linked) &&
372 (!entry->server.linked || !is_kernel_np(entry->server.linked))) {
373 litmus_reschedule(entry->server.cpu);
374 }
375
376 raw_spin_unlock_irqrestore(entry_lock(entry), flags);
377}
378
379/*
380 * Triggers preemption on first FIFO server which is running NULL.
381 */
382static void check_for_fifo_preempt(void)
383{
384 int ret = 0, cpu;
385 struct cpu_entry *entry;
386 struct rt_server *cpu_server, *fifo_server;
387
388 TRACE("Checking for FIFO preempt\n");
389
390 for_each_online_cpu(cpu) {
391 entry = remote_entry(cpu);
392 cpu_server = &entry->server;
393 fifo_server = &entry->fifo_server.server;
394
395 raw_spin_lock(entry_lock(entry));
396 raw_spin_lock(&fifo_lock);
397
398 if (cpu_server->linked && is_server(cpu_server->linked) &&
399 !fifo_server->linked) {
400 litmus_reschedule(cpu);
401 ret = 1;
402 }
403
404 raw_spin_unlock(&fifo_lock);
405 raw_spin_unlock(entry_lock(entry));
406
407 if (ret)
408 break;
409 }
410}
411
412static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks)
413{
414 unsigned long flags;
415
416 TRACE_TASK(bheap2task(bheap_peek(dom->order, tasks)),
417 "Released set of FIFO tasks\n");
418 local_irq_save(flags);
419
420 raw_spin_lock(&fifo_lock);
421 __merge_ready(dom, tasks);
422 raw_spin_unlock(&fifo_lock);
423
424 check_for_fifo_preempt();
425
426 local_irq_restore(flags);
427}
428
429#define cpu_empty(entry, run) \
430 (!(run) || (is_server(run) && !(entry)->fifo_server.server.linked))
431
432static struct task_struct* color_schedule(struct task_struct *prev)
433{
434 unsigned long flags;
435 int server_running;
436 struct cpu_entry *entry = local_entry;
437 struct task_struct *next, *plink = entry->server.linked;
438
439 TRACE("Reschedule on %d at %llu\n", entry->server.cpu, litmus_clock());
440 BUG_ON(entry->scheduled && entry->scheduled != prev);
441 BUG_ON(entry->scheduled && !is_realtime(prev));
442
443 raw_spin_lock_irqsave(entry_lock(entry), flags);
444
445 if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) {
446 TRACE_TASK(prev, "Snuck in on new!\n");
447 requeue(task_dom(entry, prev), prev);
448 }
449
450 /* Pick next top-level task */
451 next = schedule_server(&entry->server);
452 /* Schedule hierarchically */
453 server_running = next && is_server(next);
454 if (server_running)
455 next = task_fserver(next)->linked;
456
457 /* Selected tasks must contend for group lock */
458 if (next) {
459 raw_spin_lock(&dgl_lock);
460 acquire_resources(next);
461 if (has_resources(next, entry->server.cpu)) {
462 TRACE_TASK(next, "Has group lock\n");
463 sched_trace_task_resume(next, 1);
464 } else {
465 TRACE_TASK(next, "Does not have lock, 0x%p does\n",
466 group_lock.acquired[entry->server.cpu]);
467 if (next != prev)
468 sched_trace_task_block(next, 1);
469 next = NULL;
470 server_running = 0;
471 }
472 raw_spin_unlock(&dgl_lock);
473 }
474
475 /* Server is blocked if its running task is blocked. Note that if the
476 * server has no running task, the server will now execute NULL.
477 */
478 if (server_running) {
479 TRACE_TASK(entry->server.linked, "Server running\n");
480 arm_enforcement_timer(&entry->fifo_server.timer,
481 entry->fifo_server.task);
482 entry->fifo_server.start_time = litmus_clock();
483 }
484
485 if (prev)
486 tsk_rt(prev)->scheduled_on = NO_CPU;
487 if (next)
488 tsk_rt(next)->scheduled_on = entry->server.cpu;
489
490 entry->scheduled = next;
491 sched_state_task_picked();
492
493 raw_spin_unlock_irqrestore(entry_lock(entry), flags);
494
495 return entry->scheduled;
496}
497
498static void color_task_new(struct task_struct *t, int on_rq, int running)
499{
500 unsigned long flags;
501 int i, replicas;
502 raw_spinlock_t *lock;
503 struct cpu_entry *entry;
504 struct dgl_group_req *req;
505
506 TRACE_TASK(t, "New colored task\n");
507 local_irq_save(flags);
508
509 entry = (is_be(t)) ? local_entry : task_entry(t);
510 lock = task_lock(entry, t);
511
512 release_at(t, litmus_clock());
513
514 /* Create request for dynamic group locks */
515 req = kmalloc(sizeof(*req), GFP_ATOMIC);
516 dgl_group_req_init(req);
517 for (i = 0; i < NUM_RESOURCES; i++) {
518 replicas = get_control_page(t)->requests[i];
519 if (replicas)
520 set_req(req, i, replicas);
521 }
522 tsk_rt(t)->req = req;
523
524 /* Join system */
525 raw_spin_lock(lock);
526 if (running) {
527 TRACE_TASK(t, "Already scheduled on %d\n", entry->server.cpu);
528 BUG_ON(entry->scheduled);
529 entry->scheduled = t;
530 tsk_rt(t)->scheduled_on = entry->server.cpu;
531 } else
532 requeue(task_dom(entry, t), t);
533 raw_spin_unlock(lock);
534
535 /* Trigger preemptions */
536 if (is_be(t))
537 check_for_fifo_preempt();
538 else
539 litmus_reschedule(entry->server.cpu);
540
541 local_irq_restore(flags);
542}
543
544static void color_task_wake_up(struct task_struct *task)
545{
546 unsigned long flags;
547 struct cpu_entry* entry = task_entry(task);
548 raw_spinlock_t *lock = task_lock(entry, task);
549 lt_t now = litmus_clock();
550
551 TRACE_TASK(task, "Wake up at %llu\n", now);
552
553 local_irq_save(flags);
554
555 /* Abuse sporadic model */
556 if (is_tardy(task, now)) {
557 release_at(task, now);
558 sched_trace_task_release(task);
559 }
560
561 /* Re-enter system */
562 if (entry->scheduled != task) {
563 raw_spin_lock(lock);
564 requeue(task_dom(entry, task), task);
565 raw_spin_unlock(lock);
566 } else {
567 TRACE_TASK(task, "Is already scheduled on %d!\n",
568 entry->scheduled);
569 }
570
571 /* Trigger preemptions */
572 if (is_be(task))
573 check_for_fifo_preempt();
574 else
575 litmus_reschedule(entry->server.cpu);
576
577 local_irq_restore(flags);
578}
579
580static void color_task_block(struct task_struct *t)
581{
582 TRACE_TASK(t, "Block at %llu, state=%d\n", litmus_clock(), t->state);
583 BUG_ON(!is_realtime(t));
584 BUG_ON(is_queued(t));
585}
586
587static void color_task_exit(struct task_struct * t)
588{
589 unsigned long flags;
590 struct cpu_entry *entry = task_entry(t);
591 raw_spinlock_t *lock = task_lock(entry, t);
592
593 TRACE_TASK(t, "RIP, now reschedule\n");
594
595 local_irq_save(flags);
596
597 /* Remove from scheduler consideration */
598 if (is_queued(t)) {
599 raw_spin_lock(lock);
600 remove(task_dom(entry, t), t);
601 raw_spin_unlock(lock);
602 }
603
604 /* Stop parent server */
605 if (get_task_server(t))
606 unlink(get_task_server(t));
607
608 /* Unschedule running task */
609 if (tsk_rt(t)->scheduled_on != NO_CPU) {
610 entry = remote_entry(tsk_rt(t)->scheduled_on);
611
612 raw_spin_lock(entry_lock(entry));
613
614 tsk_rt(t)->scheduled_on = NO_CPU;
615 entry->scheduled = NULL;
616 litmus_reschedule(entry->server.cpu);
617
618 raw_spin_unlock(entry_lock(entry));
619 }
620
621 /* Remove dgl request from system */
622 raw_spin_lock(&dgl_lock);
623 release_resources(t);
624 raw_spin_unlock(&dgl_lock);
625 kfree(tsk_rt(t)->req);
626
627 local_irq_restore(flags);
628}
629
630/*
631 * Non-be tasks must have migrated to the right CPU.
632 */
633static long color_admit_task(struct task_struct* t)
634{
635 int ret = is_be(t) || task_cpu(t) == get_partition(t) ? 0 : -EINVAL;
636 if (!ret) {
637 printk(KERN_WARNING "Task failed to migrate to CPU %d\n",
638 get_partition(t));
639 }
640 return ret;
641}
642
643/*
644 * Load server parameters.
645 */
646static long color_activate_plugin(void)
647{
648 int cpu, ret = 0;
649 struct rt_task tp;
650 struct task_struct *server_task;
651 struct cpu_entry *entry;
652 lt_t now = litmus_clock();
653
654 for_each_online_cpu(cpu) {
655 entry = remote_entry(cpu);
656 server_task = entry->fifo_server.task;
657
658 raw_spin_lock(entry_lock(entry));
659
660 ret = color_server_params(cpu, &tp.exec_cost,
661 &tp.period);
662 if (ret) {
663 printk(KERN_WARNING "Uninitialized server for CPU %d\n",
664 entry->server.cpu);
665 goto loop_end;
666 }
667
668 /* Fill rt parameters */
669 tp.phase = 0;
670 tp.cpu = cpu;
671 tp.cls = RT_CLASS_SOFT;
672 tp.budget_policy = PRECISE_ENFORCEMENT;
673 tsk_rt(server_task)->task_params = tp;
674 tsk_rt(server_task)->present = 1;
675
676 /* Make runnable */
677 release_at(server_task, now);
678 entry->fifo_server.start_time = 0;
679 entry->scheduled = NULL;
680
681 if (!is_queued(server_task))
682 requeue(&entry->edf_domain, server_task);
683
684 TRACE_TASK(server_task, "Created server with wcet: %llu, "
685 "period: %llu\n", tp.exec_cost, tp.period);
686
687 loop_end:
688 raw_spin_unlock(entry_lock(entry));
689 }
690
691 return ret;
692}
693
694/*
695 * Mark servers as unused, making future calls to requeue fail.
696 */
697static long color_deactivate_plugin(void)
698{
699 int cpu;
700 struct cpu_entry *entry;
701
702 for_each_online_cpu(cpu) {
703 entry = remote_entry(cpu);
704 if (entry->fifo_server.task) {
705 tsk_rt(entry->fifo_server.task)->present = 0;
706 }
707 }
708 return 0;
709}
710
711/*
712 * Dump container and server parameters for tracing.
713 */
714static void color_release_ts(lt_t time)
715{
716 int cpu, fifo_cid;
717 char fifo_name[TASK_COMM_LEN], cpu_name[TASK_COMM_LEN];
718 struct cpu_entry *entry;
719 struct task_struct *stask;
720
721 strcpy(cpu_name, "CPU");
722 strcpy(fifo_name, "BE");
723
724 fifo_cid = num_online_cpus();
725 trace_litmus_container_param(fifo_cid, fifo_name);
726
727 for_each_online_cpu(cpu) {
728 entry = remote_entry(cpu);
729 trace_litmus_container_param(cpu, cpu_name);
730 trace_litmus_server_param(entry->server.sid, cpu, 0, 0);
731 stask = entry->fifo_server.task;
732 trace_litmus_server_param(stask->pid, fifo_cid,
733 get_exec_cost(stask),
734 get_rt_period(stask));
735 }
736}
737
738static struct sched_plugin color_plugin __cacheline_aligned_in_smp = {
739 .plugin_name = "COLOR",
740 .task_new = color_task_new,
741 .complete_job = complete_job,
742 .task_exit = color_task_exit,
743 .schedule = color_schedule,
744 .task_wake_up = color_task_wake_up,
745 .task_block = color_task_block,
746 .admit_task = color_admit_task,
747
748 .release_ts = color_release_ts,
749
750 .activate_plugin = color_activate_plugin,
751 .deactivate_plugin = color_deactivate_plugin,
752};
753
754static int __init init_color(void)
755{
756 int cpu;
757 struct cpu_entry *entry;
758 struct task_struct *server_task;
759 struct fifo_server *fifo_server;
760 struct rt_server *cpu_server;
761
762 for_each_online_cpu(cpu) {
763 entry = remote_entry(cpu);
764 edf_domain_init(&entry->edf_domain, NULL, color_edf_release);
765
766 entry->scheduled = NULL;
767
768 /* Create FIFO server */
769 fifo_server = &entry->fifo_server;
770 init_rt_server(&fifo_server->server,
771 cpu + num_online_cpus() + 1,
772 cpu,
773 &fifo_domain,
774 fifo_preemption_needed,
775 fifo_requeue, fifo_update, fifo_take);
776
777
778 /* Create task struct for FIFO server */
779 server_task = kmalloc(sizeof(struct task_struct), GFP_ATOMIC);
780 memset(server_task, 0, sizeof(*server_task));
781 server_task->policy = SCHED_LITMUS;
782 strcpy(server_task->comm, "server");
783 server_task->pid = fifo_server->server.sid;
784 fifo_server->task = server_task;
785
786 /* Create rt_params for FIFO server */
787 tsk_rt(server_task)->heap_node = bheap_node_alloc(GFP_ATOMIC);
788 tsk_rt(server_task)->rel_heap = release_heap_alloc(GFP_ATOMIC);
789 bheap_node_init(&tsk_rt(server_task)->heap_node, server_task);
790 tsk_rt(server_task)->is_server = 1;
791
792 /* Create CPU server */
793 cpu_server = &entry->server;
794 init_rt_server(cpu_server, cpu + 1, cpu,
795 &entry->edf_domain, edf_preemption_needed,
796 edf_requeue, NULL, NULL);
797 cpu_server->running = 1;
798
799 init_enforcement_timer(&fifo_server->timer);
800 }
801
802 fifo_domain_init(&fifo_domain, NULL, color_fifo_release);
803 raw_spin_lock_init(&fifo_lock);
804
805 dgl_init(&group_lock);
806 raw_spin_lock_init(&dgl_lock);
807
808 return register_sched_plugin(&color_plugin);
809}
810
811module_init(init_color);
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 6553948407de..3de3c8605aae 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -160,7 +160,7 @@ static void enqueue_task_litmus(struct rq *rq, struct task_struct *p,
160 int flags) 160 int flags)
161{ 161{
162 if (flags & ENQUEUE_WAKEUP) { 162 if (flags & ENQUEUE_WAKEUP) {
163 sched_trace_task_resume(p); 163 sched_trace_task_resume(p, 0);
164 tsk_rt(p)->present = 1; 164 tsk_rt(p)->present = 1;
165 /* LITMUS^RT plugins need to update the state 165 /* LITMUS^RT plugins need to update the state
166 * _before_ making it available in global structures. 166 * _before_ making it available in global structures.
@@ -185,7 +185,7 @@ static void dequeue_task_litmus(struct rq *rq, struct task_struct *p,
185 if (flags & DEQUEUE_SLEEP) { 185 if (flags & DEQUEUE_SLEEP) {
186 litmus->task_block(p); 186 litmus->task_block(p);
187 tsk_rt(p)->present = 0; 187 tsk_rt(p)->present = 0;
188 sched_trace_task_block(p); 188 sched_trace_task_block(p, 0);
189 189
190 rq->litmus.nr_running--; 190 rq->litmus.nr_running--;
191 } else 191 } else
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 00a1900d6457..123c7516fb76 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -95,6 +95,10 @@ static void litmus_dummy_task_exit(struct task_struct *task)
95{ 95{
96} 96}
97 97
98static void litmus_dummy_release_ts(lt_t time)
99{
100}
101
98static long litmus_dummy_complete_job(void) 102static long litmus_dummy_complete_job(void)
99{ 103{
100 return -ENOSYS; 104 return -ENOSYS;
@@ -136,6 +140,7 @@ struct sched_plugin linux_sched_plugin = {
136 .finish_switch = litmus_dummy_finish_switch, 140 .finish_switch = litmus_dummy_finish_switch,
137 .activate_plugin = litmus_dummy_activate_plugin, 141 .activate_plugin = litmus_dummy_activate_plugin,
138 .deactivate_plugin = litmus_dummy_deactivate_plugin, 142 .deactivate_plugin = litmus_dummy_deactivate_plugin,
143 .release_ts = litmus_dummy_release_ts,
139#ifdef CONFIG_LITMUS_LOCKING 144#ifdef CONFIG_LITMUS_LOCKING
140 .allocate_lock = litmus_dummy_allocate_lock, 145 .allocate_lock = litmus_dummy_allocate_lock,
141#endif 146#endif
@@ -174,6 +179,7 @@ int register_sched_plugin(struct sched_plugin* plugin)
174 CHECK(complete_job); 179 CHECK(complete_job);
175 CHECK(activate_plugin); 180 CHECK(activate_plugin);
176 CHECK(deactivate_plugin); 181 CHECK(deactivate_plugin);
182 CHECK(release_ts);
177#ifdef CONFIG_LITMUS_LOCKING 183#ifdef CONFIG_LITMUS_LOCKING
178 CHECK(allocate_lock); 184 CHECK(allocate_lock);
179#endif 185#endif
diff --git a/litmus/sync.c b/litmus/sync.c
index bf75fde5450b..f3c9262f7022 100644
--- a/litmus/sync.c
+++ b/litmus/sync.c
@@ -73,6 +73,9 @@ static long do_release_ts(lt_t start)
73 73
74 complete_n(&ts_release, task_count); 74 complete_n(&ts_release, task_count);
75 75
76 /* TODO: remove this hack */
77 litmus->release_ts(start);
78
76 return task_count; 79 return task_count;
77} 80}
78 81