aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sync.c
blob: bf75fde5450b4820985e73b3750e06b6b824ec15 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
/* litmus/sync.c - Support for synchronous and asynchronous task system releases.
 *
 *
 */

#include <asm/atomic.h>
#include <asm/uaccess.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/completion.h>

#include <litmus/litmus.h>
#include <litmus/sched_plugin.h>
#include <litmus/jobs.h>

#include <litmus/sched_trace.h>

static DECLARE_COMPLETION(ts_release);

static long do_wait_for_ts_release(void)
{
	long ret = 0;

	/* If the interruption races with a release, the completion object
	 * may have a non-zero counter. To avoid this problem, this should
	 * be replaced by wait_for_completion().
	 *
	 * For debugging purposes, this is interruptible for now.
	 */
	ret = wait_for_completion_interruptible(&ts_release);

	return ret;
}

int count_tasks_waiting_for_release(void)
{
	unsigned long flags;
	int task_count = 0;
	struct list_head *pos;

	spin_lock_irqsave(&ts_release.wait.lock, flags);
	list_for_each(pos, &ts_release.wait.task_list) {
		task_count++;
	}
	spin_unlock_irqrestore(&ts_release.wait.lock, flags);

	return task_count;
}

static long do_release_ts(lt_t start)
{
	int  task_count = 0;
	unsigned long flags;
	struct list_head	*pos;
	struct task_struct 	*t;


	spin_lock_irqsave(&ts_release.wait.lock, flags);
	TRACE("<<<<<< synchronous task system release >>>>>>\n");

	sched_trace_sys_release(&start);
	list_for_each(pos, &ts_release.wait.task_list) {
		t = (struct task_struct*) list_entry(pos,
						     struct __wait_queue,
						     task_list)->private;
		task_count++;
		litmus->release_at(t, start + t->rt_param.task_params.phase);
		sched_trace_task_release(t);
	}

	spin_unlock_irqrestore(&ts_release.wait.lock, flags);

	complete_n(&ts_release, task_count);

	return task_count;
}


asmlinkage long sys_wait_for_ts_release(void)
{
	long ret = -EPERM;
	struct task_struct *t = current;

	if (is_realtime(t))
		ret = do_wait_for_ts_release();

	return ret;
}


asmlinkage long sys_release_ts(lt_t __user *__delay)
{
	long ret;
	lt_t delay;

	/* FIXME: check capabilities... */

	ret = copy_from_user(&delay, __delay, sizeof(delay));
	if (ret == 0)
		ret = do_release_ts(litmus_clock() + delay);

	return ret;
}