aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-09-07 08:25:18 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-10-18 14:29:36 -0400
commit56faaa8985834035e955a522ca451aebd420178c (patch)
tree972ada0c82fbc4d6238cb89ecd2ada93482b7def /litmus
parent8d70e44e57cdeefa0f920f86672a69b365a35de1 (diff)
Re-implement synchronous task set release support
The old implementation had the problem that the first job release of all tasks was handled on the *same* CPU (that triggered the synchronous task set release). This could result in significant latency spikes. The new implementation uses a completion per task. Futher, each task programs its own release, which ensures that it will be on the proper processor.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/sync.c104
1 files changed, 76 insertions, 28 deletions
diff --git a/litmus/sync.c b/litmus/sync.c
index bf75fde5450b..873b3ffb2e7e 100644
--- a/litmus/sync.c
+++ b/litmus/sync.c
@@ -16,63 +16,106 @@
16 16
17#include <litmus/sched_trace.h> 17#include <litmus/sched_trace.h>
18 18
19static DECLARE_COMPLETION(ts_release); 19struct ts_release_wait {
20 struct list_head list;
21 struct completion completion;
22 lt_t ts_release_time;
23};
24
25#define DECLARE_TS_RELEASE_WAIT(symb) \
26 struct ts_release_wait symb = \
27 { \
28 LIST_HEAD_INIT(symb.list), \
29 COMPLETION_INITIALIZER_ONSTACK(symb.completion), \
30 0 \
31 }
32
33static LIST_HEAD(task_release_list);
34static DEFINE_MUTEX(task_release_lock);
20 35
21static long do_wait_for_ts_release(void) 36static long do_wait_for_ts_release(void)
22{ 37{
23 long ret = 0; 38 DECLARE_TS_RELEASE_WAIT(wait);
39
40 long ret = -ERESTARTSYS;
41
42 if (mutex_lock_interruptible(&task_release_lock))
43 goto out;
44
45 list_add(&wait.list, &task_release_list);
24 46
25 /* If the interruption races with a release, the completion object 47 mutex_unlock(&task_release_lock);
26 * may have a non-zero counter. To avoid this problem, this should
27 * be replaced by wait_for_completion().
28 *
29 * For debugging purposes, this is interruptible for now.
30 */
31 ret = wait_for_completion_interruptible(&ts_release);
32 48
49 /* We are enqueued, now we wait for someone to wake us up. */
50 ret = wait_for_completion_interruptible(&wait.completion);
51
52 if (!ret) {
53 /* Completion succeeded, setup release. */
54 litmus->release_at(current, wait.ts_release_time
55 + current->rt_param.task_params.phase
56 - current->rt_param.task_params.period);
57 /* trigger advance to next job release at the programmed time */
58 ret = complete_job();
59 } else {
60 /* We were interrupted, must cleanup list. */
61 mutex_lock(&task_release_lock);
62 if (!wait.completion.done)
63 list_del(&wait.list);
64 mutex_unlock(&task_release_lock);
65 }
66
67out:
33 return ret; 68 return ret;
34} 69}
35 70
36int count_tasks_waiting_for_release(void) 71int count_tasks_waiting_for_release(void)
37{ 72{
38 unsigned long flags;
39 int task_count = 0; 73 int task_count = 0;
40 struct list_head *pos; 74 struct list_head *pos;
41 75
42 spin_lock_irqsave(&ts_release.wait.lock, flags); 76 mutex_lock(&task_release_lock);
43 list_for_each(pos, &ts_release.wait.task_list) { 77
78 list_for_each(pos, &task_release_list) {
44 task_count++; 79 task_count++;
45 } 80 }
46 spin_unlock_irqrestore(&ts_release.wait.lock, flags); 81
82 mutex_unlock(&task_release_lock);
83
47 84
48 return task_count; 85 return task_count;
49} 86}
50 87
51static long do_release_ts(lt_t start) 88static long do_release_ts(lt_t start)
52{ 89{
53 int task_count = 0; 90 long task_count = 0;
54 unsigned long flags; 91
55 struct list_head *pos; 92 struct list_head *pos;
56 struct task_struct *t; 93 struct ts_release_wait *wait;
57 94
95 if (mutex_lock_interruptible(&task_release_lock)) {
96 task_count = -ERESTARTSYS;
97 goto out;
98 }
58 99
59 spin_lock_irqsave(&ts_release.wait.lock, flags);
60 TRACE("<<<<<< synchronous task system release >>>>>>\n"); 100 TRACE("<<<<<< synchronous task system release >>>>>>\n");
61
62 sched_trace_sys_release(&start); 101 sched_trace_sys_release(&start);
63 list_for_each(pos, &ts_release.wait.task_list) { 102
64 t = (struct task_struct*) list_entry(pos, 103 task_count = 0;
65 struct __wait_queue, 104 list_for_each(pos, &task_release_list) {
66 task_list)->private; 105 wait = (struct ts_release_wait*)
106 list_entry(pos, struct ts_release_wait, list);
107
67 task_count++; 108 task_count++;
68 litmus->release_at(t, start + t->rt_param.task_params.phase); 109 wait->ts_release_time = start;
69 sched_trace_task_release(t); 110 complete(&wait->completion);
70 } 111 }
71 112
72 spin_unlock_irqrestore(&ts_release.wait.lock, flags); 113 /* clear stale list */
114 INIT_LIST_HEAD(&task_release_list);
73 115
74 complete_n(&ts_release, task_count); 116 mutex_unlock(&task_release_lock);
75 117
118out:
76 return task_count; 119 return task_count;
77} 120}
78 121
@@ -88,17 +131,22 @@ asmlinkage long sys_wait_for_ts_release(void)
88 return ret; 131 return ret;
89} 132}
90 133
134#define ONE_MS 1000000
91 135
92asmlinkage long sys_release_ts(lt_t __user *__delay) 136asmlinkage long sys_release_ts(lt_t __user *__delay)
93{ 137{
94 long ret; 138 long ret;
95 lt_t delay; 139 lt_t delay;
140 lt_t start_time;
96 141
97 /* FIXME: check capabilities... */ 142 /* FIXME: check capabilities... */
98 143
99 ret = copy_from_user(&delay, __delay, sizeof(delay)); 144 ret = copy_from_user(&delay, __delay, sizeof(delay));
100 if (ret == 0) 145 if (ret == 0) {
101 ret = do_release_ts(litmus_clock() + delay); 146 /* round up to next larger integral millisecond */
147 start_time = ((litmus_clock() / ONE_MS) + 1) * ONE_MS;
148 ret = do_release_ts(start_time + delay);
149 }
102 150
103 return ret; 151 return ret;
104} 152}