aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2012-04-12 23:43:40 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2012-04-12 23:43:40 -0400
commit8b936011abd09adcfab5dea0b6fdc7a0c2e5fcce (patch)
tree377879dc298cdf30e2aacac3420548fe555f9355
parentb91f30c8f0e03ea4bf7ec861469819c5435eb2d9 (diff)
sched_color: added support for group locks.
This does not add support for telling the plugin _which_ resources you will acquire. Group locks were tested by setting random resource requests in task_new().
-rw-r--r--include/litmus/dgl.h53
-rw-r--r--include/litmus/litmus.h5
-rw-r--r--include/litmus/rt_param.h7
-rw-r--r--include/litmus/rt_server.h6
-rw-r--r--litmus/Makefile1
-rw-r--r--litmus/budget.c2
-rw-r--r--litmus/color_proc.c8
-rw-r--r--litmus/dgl.c197
-rw-r--r--litmus/rt_server.c3
-rw-r--r--litmus/sched_color.c300
10 files changed, 479 insertions, 103 deletions
diff --git a/include/litmus/dgl.h b/include/litmus/dgl.h
new file mode 100644
index 000000000000..989f6c7d5e8e
--- /dev/null
+++ b/include/litmus/dgl.h
@@ -0,0 +1,53 @@
1#ifndef __DGL_H_
2#define __DGL_H_
3
4#include <linux/list.h>
5
6#define NUM_REPLICAS 4
7#define NUM_RESOURCES 4
8#define MASK_SIZE (sizeof(unsigned long) * 8)
9#define MASK_WORDS (NUM_RESOURCES/MASK_SIZE + (NUM_RESOURCES%MASK_SIZE != 0))
10
11/*
12 * A request for @replica amount of a single resource.
13 */
14struct dgl_req {
15 unsigned short replicas;
16 struct list_head list;
17};
18
19/*
20 * Simultaneous @requests for multiple resources.
21 */
22struct dgl_group_req {
23 int cpu;
24 unsigned long requested[MASK_WORDS];
25 unsigned long waiting[MASK_WORDS];
26 struct dgl_req requests[NUM_RESOURCES];
27};
28
29/*
30 * A single resource.
31 */
32struct dgl_resource {
33 unsigned int free_replicas;
34 struct list_head waiting;
35};
36
37/*
38 * A group of resources.
39 */
40struct dgl {
41 struct dgl_resource resources[NUM_RESOURCES];
42 struct dgl_group_req* acquired[NR_CPUS];
43};
44
45void dgl_init(struct dgl *dgl);
46void dgl_group_req_init(struct dgl_group_req *greq);
47
48void set_req(struct dgl_group_req *greq, int resource, int replicas);
49
50void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu);
51void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq);
52
53#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 8398a10171e0..7188edd3a6cf 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -60,6 +60,7 @@ void litmus_exit_task(struct task_struct *tsk);
60#define get_class(t) (tsk_rt(t)->task_params.cls) 60#define get_class(t) (tsk_rt(t)->task_params.cls)
61#define get_task_domain(t) (tsk_rt(t)->_domain) 61#define get_task_domain(t) (tsk_rt(t)->_domain)
62#define is_server(t) (tsk_rt(t)->is_server) 62#define is_server(t) (tsk_rt(t)->is_server)
63#define get_task_server(task) (tsk_rt(task)->server)
63 64
64#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) 65#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
65#define get_boost_start(t) (tsk_rt(t)->boost_start_time) 66#define get_boost_start(t) (tsk_rt(t)->boost_start_time)
@@ -216,10 +217,6 @@ static inline int is_np(struct task_struct *t)
216 int kernel, user; 217 int kernel, user;
217 kernel = is_kernel_np(t); 218 kernel = is_kernel_np(t);
218 user = is_user_np(t); 219 user = is_user_np(t);
219 if (kernel || user)
220 TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n",
221
222 kernel, user);
223 return kernel || user; 220 return kernel || user;
224#else 221#else
225 return unlikely(is_kernel_np(t) || is_user_np(t)); 222 return unlikely(is_kernel_np(t) || is_user_np(t));
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 4d3a14992e0c..e56aab006cc1 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -5,6 +5,8 @@
5#ifndef _LINUX_RT_PARAM_H_ 5#ifndef _LINUX_RT_PARAM_H_
6#define _LINUX_RT_PARAM_H_ 6#define _LINUX_RT_PARAM_H_
7 7
8#include <litmus/dgl.h>
9
8/* Litmus time type. */ 10/* Litmus time type. */
9typedef unsigned long long lt_t; 11typedef unsigned long long lt_t;
10 12
@@ -66,7 +68,7 @@ struct control_page {
66 /* locking overhead tracing: time stamp prior to system call */ 68 /* locking overhead tracing: time stamp prior to system call */
67 uint64_t ts_syscall_start; /* Feather-Trace cycles */ 69 uint64_t ts_syscall_start; /* Feather-Trace cycles */
68 70
69 /* to be extended */ 71 int requests[NUM_RESOURCES];
70}; 72};
71 73
72/* don't export internal data structures to user space (liblitmus) */ 74/* don't export internal data structures to user space (liblitmus) */
@@ -77,6 +79,7 @@ struct bheap_node;
77struct release_heap; 79struct release_heap;
78struct domain; 80struct domain;
79struct rt_server; 81struct rt_server;
82struct dgl_group_req;
80 83
81struct rt_job { 84struct rt_job {
82 /* Time instant the the job was or will be released. */ 85 /* Time instant the the job was or will be released. */
@@ -218,6 +221,8 @@ struct rt_param {
218 221
219 /* Pointer to the page shared between userspace and kernel. */ 222 /* Pointer to the page shared between userspace and kernel. */
220 struct control_page * ctrl_page; 223 struct control_page * ctrl_page;
224
225 struct dgl_group_req *req;
221}; 226};
222 227
223/* Possible RT flags */ 228/* Possible RT flags */
diff --git a/include/litmus/rt_server.h b/include/litmus/rt_server.h
index 17517790a104..0f3147707a3b 100644
--- a/include/litmus/rt_server.h
+++ b/include/litmus/rt_server.h
@@ -14,11 +14,12 @@ typedef struct task_struct* (*server_take_t)(struct rt_server *srv);
14 14
15struct rt_server { 15struct rt_server {
16 int sid; 16 int sid;
17 int cpu;
17 struct task_struct* linked; 18 struct task_struct* linked;
18 rt_domain_t* domain; 19 rt_domain_t* domain;
19 int running; 20 int running;
20 21
21 /* Does this server have a higher-priority task */ 22 /* Does this server have a higher-priority task? */
22 need_preempt_t need_preempt; 23 need_preempt_t need_preempt;
23 /* System state has changed, so should server */ 24 /* System state has changed, so should server */
24 server_update_t update; 25 server_update_t update;
@@ -29,11 +30,10 @@ struct rt_server {
29}; 30};
30 31
31void init_rt_server(struct rt_server *server, 32void init_rt_server(struct rt_server *server,
32 int sid, rt_domain_t *domain, 33 int sid, int cpu, rt_domain_t *domain,
33 need_preempt_t need_preempt, 34 need_preempt_t need_preempt,
34 server_requeue_t requeue, 35 server_requeue_t requeue,
35 server_update_t update, 36 server_update_t update,
36 server_take_t take); 37 server_take_t take);
37 38
38
39#endif 39#endif
diff --git a/litmus/Makefile b/litmus/Makefile
index 9e58a9c78691..f1f92fa8d6d3 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -22,6 +22,7 @@ obj-y = sched_plugin.o litmus.o \
22 color_proc.o \ 22 color_proc.o \
23 sched_color.o \ 23 sched_color.o \
24 rt_server.o \ 24 rt_server.o \
25 dgl.o \
25 fifo_common.o 26 fifo_common.o
26 27
27 28
diff --git a/litmus/budget.c b/litmus/budget.c
index 0a7bd665f814..142ccd9e0b52 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -61,7 +61,7 @@ void arm_enforcement_timer(struct enforcement_timer* et,
61 /* __hrtimer_start_range_ns() cancels the timer 61 /* __hrtimer_start_range_ns() cancels the timer
62 * anyway, so we don't have to check whether it is still armed */ 62 * anyway, so we don't have to check whether it is still armed */
63 63
64 if (likely(!is_np(t))) { 64 if (likely(!is_user_np(t))) {
65 when_to_fire = litmus_clock() + budget_remaining(t); 65 when_to_fire = litmus_clock() + budget_remaining(t);
66 TRACE_TASK(t, "arming enforcement timer for %llu.\n", 66 TRACE_TASK(t, "arming enforcement timer for %llu.\n",
67 when_to_fire); 67 when_to_fire);
diff --git a/litmus/color_proc.c b/litmus/color_proc.c
index 25915e714821..9a104a803cac 100644
--- a/litmus/color_proc.c
+++ b/litmus/color_proc.c
@@ -2,13 +2,13 @@
2#include <linux/sysctl.h> 2#include <linux/sysctl.h>
3#include <linux/slab.h> 3#include <linux/slab.h>
4 4
5#include <litmus/color.h>
5#include <litmus/sched_trace.h> 6#include <litmus/sched_trace.h>
6 7
7 8#define SPERIOD_LEN 7
8#define SPERIOD_LEN 7
9#define SPERIOD_FILE "period" 9#define SPERIOD_FILE "period"
10#define SWCET_LEN 5 10#define SWCET_LEN 5
11#define SWCET_FILE "wcet" 11#define SWCET_FILE "wcet"
12 12
13static unsigned long *server_wcet; 13static unsigned long *server_wcet;
14static unsigned long *server_period; 14static unsigned long *server_period;
diff --git a/litmus/dgl.c b/litmus/dgl.c
new file mode 100644
index 000000000000..9749597aa769
--- /dev/null
+++ b/litmus/dgl.c
@@ -0,0 +1,197 @@
1#include <linux/sched.h>
2#include <litmus/litmus.h>
3#include <litmus/dgl.h>
4#include <litmus/sched_trace.h>
5
6/* Word, bit -> resource id */
7#define ri(w, b) (w * MASK_SIZE + b)
8
9 /* For loop, where @i iterates over each set bit in @bit_arr */
10#define for_each_resource(bit_arr, w, b, i) \
11 for(w = 0; w < MASK_WORDS; ++w) \
12 for(b = find_first_bit(&bit_arr[w],MASK_SIZE), i = ri(w, b); \
13 b < MASK_SIZE; \
14 b = find_next_bit(&bit_arr[w],MASK_SIZE,b+1), i = ri(w, b))
15
16/* Return resource id in dgl @d for resource @r */
17#define resource_id(d, r) ((((void*)r) - (void*)(&(d)->resources))/ sizeof(*r))
18
19/* Return request group of req @r for resource @i */
20#define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \
21 struct dgl_group_req, requests))
22
23/* Resource id -> word, bit */
24static inline void mask_idx(int resource, int *word, int *bit)
25{
26 *word = resource / MASK_SIZE;
27 *bit = resource % MASK_SIZE;
28}
29
30void dgl_init(struct dgl *dgl)
31{
32 int i;
33 struct dgl_resource *resource;
34
35 for (i = 0; i < NR_CPUS; i++)
36 dgl->acquired[i] = NULL;
37
38 for (i = 0; i < NUM_RESOURCES; i++) {
39 resource = &dgl->resources[i];
40
41 INIT_LIST_HEAD(&resource->waiting);
42 resource->free_replicas = NUM_REPLICAS;
43 }
44}
45
46void dgl_group_req_init(struct dgl_group_req *greq)
47{
48 int i;
49 greq->cpu = NO_CPU;
50 for (i = 0; i < MASK_WORDS; i++) {
51 greq->requested[i] = 0;
52 greq->waiting[i] = 0;
53 }
54}
55
56/**
57 * set_req - create request for @replicas of @resource.
58 */
59void set_req(struct dgl_group_req *greq, int resource, int replicas)
60{
61 int word, bit;
62 struct dgl_req *req;
63
64 BUG_ON(replicas > NUM_REPLICAS);
65
66 mask_idx(resource, &word, &bit);
67 TRACE("0x%p will request resource %d, word %d, bit %d\n",
68 greq, resource, word, bit);
69
70 __set_bit(bit, &greq->requested[word]);
71
72 req = &greq->requests[resource];
73 INIT_LIST_HEAD(&req->list);
74 req->replicas = replicas;
75}
76
77/*
78 * Attempt to fulfill request @req for @resource.
79 * Return 1 if successful. If the matching group request has acquired all of
80 * its needed resources, this will then set that req as dgl->acquired[cpu].
81 */
82static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource,
83 struct dgl_req *req)
84{
85 int word, bit, rid;
86 unsigned long waiting;
87 struct dgl_group_req *greq;
88
89 if (resource->free_replicas < req->replicas) {
90 TRACE("0x%p cannot acquire %d replicas, only %d free\n",
91 greq, req->replicas, resource->free_replicas);
92 return 0;
93 }
94
95 resource->free_replicas -= req->replicas;
96
97 rid = resource_id(dgl, resource);
98 greq = req_group(req, rid);
99 mask_idx(rid, &word, &bit);
100
101 TRACE("0x%p acquired rid %d, word %d, bit %d\n",
102 greq, rid, word, bit);
103
104 clear_bit(bit, &greq->waiting[word]);
105
106 waiting = 0;
107 for (word = 0; word < MASK_WORDS; word++) {
108 waiting |= greq->waiting[word];
109 if (waiting)
110 break;
111 }
112
113 if (!waiting) {
114 TRACE("0x%p acquired all resources\n", greq);
115 BUG_ON(dgl->acquired[greq->cpu]);
116 dgl->acquired[greq->cpu] = greq;
117 litmus_reschedule(greq->cpu);
118 }
119
120 return 1;
121}
122
123/**
124 * add_group_req - initiate group request.
125 */
126void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu)
127{
128 int b, w, i, succ, all_succ = 1;
129 struct dgl_req *req;
130 struct dgl_resource *resource;
131
132 greq->cpu = cpu;
133
134 TRACE("0x%p group request added for CPU %d\n", greq, cpu);
135 BUG_ON(dgl->acquired[cpu] == greq);
136
137 for_each_resource(greq->requested, w, b, i) {
138 __set_bit(b, &greq->waiting[w]);
139 }
140
141 for_each_resource(greq->requested, w, b, i) {
142 req = &greq->requests[i];
143 resource = &dgl->resources[i];
144
145 succ = try_acquire(dgl, resource, req);
146 all_succ &= succ;
147
148 if (!succ) {
149 TRACE("0x%p waiting on resource %d\n", greq, i);
150 list_add_tail(&req->list, &resource->waiting);
151 }
152 }
153}
154
155/**
156 * remove_group_req - abandon group request.
157 *
158 * This will also progress the waiting queues of resources acquired by @greq.
159 */
160void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq)
161{
162 int b, w, i;
163 struct dgl_req *req, *next;
164 struct dgl_resource *resource;
165
166 TRACE("0x%p removing group request for CPU %d\n", greq, greq->cpu);
167
168 if (dgl->acquired[greq->cpu] == greq) {
169 TRACE("0x%p no longer acquired on CPU %d\n", greq, greq->cpu);
170 dgl->acquired[greq->cpu] = NULL;
171 }
172
173 for_each_resource(greq->requested, w, b, i) {
174 req = &greq->requests[i];
175 resource = &dgl->resources[i];
176
177 if (!list_empty(&req->list)) {
178 /* Waiting on resource */
179 clear_bit(b, &greq->waiting[w]);
180 list_del_init(&req->list);
181 TRACE("0x%p quit waiting for resource %d\n", greq, i);
182 } else {
183 /* Have resource */
184 resource->free_replicas += req->replicas;
185 TRACE("0x%p releasing resource %d\n", greq, i);
186
187 if (!list_empty(&resource->waiting)) {
188 /* Give it to the next guy */
189 next = list_first_entry(&resource->waiting,
190 struct dgl_req,
191 list);
192 if (try_acquire(dgl, resource, next))
193 list_del_init(&next->list);
194 }
195 }
196 }
197}
diff --git a/litmus/rt_server.c b/litmus/rt_server.c
index 35f7d4dea079..818588a3d317 100644
--- a/litmus/rt_server.c
+++ b/litmus/rt_server.c
@@ -11,7 +11,7 @@ static void default_server_update(struct rt_server *srv)
11} 11}
12 12
13void init_rt_server(struct rt_server *server, 13void init_rt_server(struct rt_server *server,
14 int sid, rt_domain_t *domain, 14 int sid, int cpu, rt_domain_t *domain,
15 need_preempt_t need_preempt, 15 need_preempt_t need_preempt,
16 server_requeue_t requeue, 16 server_requeue_t requeue,
17 server_update_t update, 17 server_update_t update,
@@ -27,6 +27,7 @@ void init_rt_server(struct rt_server *server,
27 server->take = (take) ? take : default_server_take; 27 server->take = (take) ? take : default_server_take;
28 28
29 server->sid = sid; 29 server->sid = sid;
30 server->cpu = cpu;
30 server->linked = NULL; 31 server->linked = NULL;
31 server->domain = domain; 32 server->domain = domain;
32 server->running = 0; 33 server->running = 0;
diff --git a/litmus/sched_color.c b/litmus/sched_color.c
index 02325db677ab..61a28da1ef6c 100644
--- a/litmus/sched_color.c
+++ b/litmus/sched_color.c
@@ -15,7 +15,14 @@
15#include <litmus/fifo_common.h> 15#include <litmus/fifo_common.h>
16#include <litmus/budget.h> 16#include <litmus/budget.h>
17#include <litmus/rt_server.h> 17#include <litmus/rt_server.h>
18#include <litmus/dgl.h>
18 19
20/**
21 * @rt_server Common server functionality.
22 * @task Task used to schedule server.
23 * @timer Budget enforcement for @task
24 * @start_time If set, time at which server began running.
25 */
19struct fifo_server { 26struct fifo_server {
20 struct rt_server server; 27 struct rt_server server;
21 struct task_struct* task; 28 struct task_struct* task;
@@ -23,39 +30,48 @@ struct fifo_server {
23 lt_t start_time; 30 lt_t start_time;
24}; 31};
25 32
33/**
34 * @server Common server functionality.
35 * @edf_domain PEDF domain.
36 * @scheduled Task physically running on CPU.
37 * @fifo_server Server partitioned to this CPU.
38 */
26struct cpu_entry { 39struct cpu_entry {
27 int cpu;
28 struct rt_server server; 40 struct rt_server server;
29 rt_domain_t edf_domain; 41 rt_domain_t edf_domain;
30 struct task_struct* scheduled; /* Actually running, EDF or FIFO */ 42 struct task_struct* scheduled;
31 struct fifo_server fifo_server; 43 struct fifo_server fifo_server;
32}; 44};
33 45
34DEFINE_PER_CPU(struct cpu_entry, color_cpus); 46DEFINE_PER_CPU(struct cpu_entry, color_cpus);
47
35static rt_domain_t fifo_domain; 48static rt_domain_t fifo_domain;
36static raw_spinlock_t fifo_lock; 49static raw_spinlock_t fifo_lock;
37 50
38#define local_entry (&__get_cpu_var(color_cpus)) 51static struct dgl group_lock;
39#define remote_edf(cpu) (&per_cpu(psnedf_domains, cpu).domain) 52static raw_spinlock_t dgl_lock;
40#define remote_entry(cpu) (&per_cpu(color_cpus, cpu)) 53
41#define task_edf(task) remote_edf(get_partition(task)) 54#define local_entry (&__get_cpu_var(color_cpus))
42#define task_entry(task) remote_entry(get_partition(task)) 55#define remote_entry(cpu) (&per_cpu(color_cpus, cpu))
43#define task_server(task) (&task_entry(task)->fifo_server.server) 56#define task_entry(task) remote_entry(get_partition(task))
44#define run_server(task) (tsk_rt(task)->server) 57#define task_fserver(task) (&task_entry(task)->fifo_server.server)
45#define entry_lock(entry) (&entry->edf_domain.ready_lock) 58#define entry_lock(entry) (&entry->edf_domain.ready_lock)
46#define task_domain(entry, task) (is_be(task)? &fifo_domain :&entry->edf_domain) 59
47#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry)) 60#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c])
61#define task_dom(entry, task) (is_be(task) ? &fifo_domain : &entry->edf_domain)
62#define task_lock(entry, task) (is_be(task) ? &fifo_lock : entry_lock(entry))
48 63
49/* 64/*
50 * Requeue onto domains release or ready queue based on task state. 65 * Requeue onto domain's release or ready queue based on task state.
51 */ 66 */
52static void requeue(rt_domain_t *dom, struct task_struct* t) 67static void requeue(rt_domain_t *dom, struct task_struct* t)
53{ 68{
54 if (is_server(t) && !tsk_rt(t)->present) 69 if (is_server(t) && !tsk_rt(t)->present)
70 /* Remove stopped server from the system */
55 return; 71 return;
56 72
73 TRACE_TASK(t, "Requeueing\n");
57 BUG_ON(is_queued(t)); 74 BUG_ON(is_queued(t));
58 TRACE_TASK(t, "FIFO requeueing\n");
59 75
60 set_rt_flags(t, RT_F_RUNNING); 76 set_rt_flags(t, RT_F_RUNNING);
61 if (is_released(t, litmus_clock())) 77 if (is_released(t, litmus_clock()))
@@ -65,25 +81,89 @@ static void requeue(rt_domain_t *dom, struct task_struct* t)
65} 81}
66 82
67/* 83/*
84 * Relinquish resources held by @t (or its children).
85 */
86static void release_resources(struct task_struct *t)
87{
88 struct task_struct *sched;
89
90 TRACE_TASK(t, "Releasing resources\n");
91
92 if (is_server(t)) {
93 sched = task_fserver(t)->linked;
94 if (sched)
95 release_resources(sched);
96 } else if (is_kernel_np(t))
97 remove_group_req(&group_lock, tsk_rt(t)->req);
98 tsk_rt(t)->kernel_np = 0;
99}
100
101/*
102 * Put in requests for resources needed by @t. If @t is a server, this will
103 * set @t's np flag to reflect resources held by @t's children.
104 */
105static void acquire_resources(struct task_struct *t)
106{
107 int cpu;
108 struct rt_server *server;
109 struct task_struct *sched;
110
111 /* Can't acquire resources if t is not running */
112 BUG_ON(!get_task_server(t));
113
114 if (is_kernel_np(t)) {
115 TRACE_TASK(t, "Already contending for resources\n");
116 return;
117 }
118 cpu = get_task_server(t)->cpu;
119
120 if (is_server(t)) {
121 server = task_fserver(t);
122 sched = server->linked;
123
124 /* Happens when server is booted off on completion or
125 * has just completed executing a task.
126 */
127 if (sched && !is_kernel_np(sched))
128 acquire_resources(sched);
129
130 /* Become np if there is a running task */
131 if (sched && has_resources(sched, cpu)) {
132 TRACE_TASK(t, "Running task with resource\n");
133 tsk_rt(t)->kernel_np = 1;
134 } else {
135 TRACE_TASK(t, "Running no resources\n");
136 tsk_rt(t)->kernel_np = 0;
137 }
138 } else {
139 TRACE_TASK(t, "Acquiring resources\n");
140 if (!has_resources(t, cpu))
141 add_group_req(&group_lock, tsk_rt(t)->req, cpu);
142 tsk_rt(t)->kernel_np = 1;
143 }
144}
145
146/*
68 * Stop logically running the currently linked task. 147 * Stop logically running the currently linked task.
69 */ 148 */
70static void unlink(struct rt_server *server) 149static void unlink(struct rt_server *server)
71{ 150{
72 struct rt_server *tserv;
73
74 BUG_ON(!server->linked); 151 BUG_ON(!server->linked);
75 152
76 if (is_server(server->linked)) { 153 if (is_server(server->linked))
77 tserv = task_server(server->linked); 154 task_fserver(server->linked)->running = 0;
78 tserv->running = 0; 155
79 }
80 156
81 sched_trace_server_switch_away(server->sid, 0, 157 sched_trace_server_switch_away(server->sid, 0,
82 server->linked->pid, 158 server->linked->pid,
83 get_rt_job(server->linked)); 159 get_rt_job(server->linked));
84 TRACE_TASK(server->linked, "No longer run by server %d\n", server->sid); 160 TRACE_TASK(server->linked, "No longer run by server %d\n", server->sid);
85 161
86 run_server(server->linked) = NULL; 162 raw_spin_lock(&dgl_lock);
163 release_resources(server->linked);
164 raw_spin_unlock(&dgl_lock);
165
166 get_task_server(server->linked) = NULL;
87 server->linked = NULL; 167 server->linked = NULL;
88} 168}
89 169
@@ -98,19 +178,21 @@ static void link(struct rt_server *server, struct task_struct *task)
98 178
99 BUG_ON(server->linked); 179 BUG_ON(server->linked);
100 BUG_ON(!server->running); 180 BUG_ON(!server->running);
181 BUG_ON(is_kernel_np(task));
182
183 TRACE_TASK(task, "Run by server %d\n", server->sid);
101 184
102 if (is_server(task)) { 185 if (is_server(task)) {
103 tserv = task_server(task); 186 tserv = task_fserver(task);
104 tserv->running = 1; 187 tserv->running = 1;
105 schedule_server(tserv); 188 schedule_server(tserv);
106 } 189 }
107 190
108 server->linked = task; 191 server->linked = task;
109 run_server(task) = server; 192 get_task_server(task) = server;
110 193
111 sched_trace_server_switch_to(server->sid, 0, 194 sched_trace_server_switch_to(server->sid, 0,
112 task->pid, get_rt_job(task)); 195 task->pid, get_rt_job(task));
113 TRACE_TASK(task, "Run by server %d\n", server->sid);
114} 196}
115 197
116/* 198/*
@@ -126,14 +208,13 @@ static void job_completion(struct rt_server *server)
126 else 208 else
127 sched_trace_task_completion(t, 0); 209 sched_trace_task_completion(t, 0);
128 210
129
130 unlink(server); 211 unlink(server);
131 set_rt_flags(t, RT_F_SLEEP); 212 set_rt_flags(t, RT_F_SLEEP);
132 prepare_for_next_period(t); 213 prepare_for_next_period(t);
133 214
134 if (is_server(t)) 215 if (is_server(t))
135 sched_trace_server_release(t->pid, get_rt_job(t), 216 sched_trace_server_release(t->pid, get_rt_job(t),
136 get_release(t), get_rt_period(t)); 217 get_release(t), get_deadline(t));
137 else 218 else
138 sched_trace_task_release(t); 219 sched_trace_task_release(t);
139 220
@@ -142,7 +223,7 @@ static void job_completion(struct rt_server *server)
142} 223}
143 224
144/* 225/*
145 * Update server state to reflect task state. 226 * Update @server state to reflect task's state.
146 */ 227 */
147static void update_task(struct rt_server *server) 228static void update_task(struct rt_server *server)
148{ 229{
@@ -151,18 +232,16 @@ static void update_task(struct rt_server *server)
151 232
152 block = !is_running(t); 233 block = !is_running(t);
153 oot = budget_enforced(t) && budget_exhausted(t); 234 oot = budget_enforced(t) && budget_exhausted(t);
154 np = is_np(t); 235 np = is_kernel_np(t);
155 sleep = get_rt_flags(t) == RT_F_SLEEP; 236 sleep = get_rt_flags(t) == RT_F_SLEEP;
156 237
157
158 TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n", 238 TRACE_TASK(t, "Updating task, block: %d, oot: %d, np: %d, sleep: %d\n",
159 block, oot, np, sleep); 239 block, oot, np, sleep);
160 240
161 if (block) 241 if (block)
162 unlink(server); 242 unlink(server);
163 else if ((oot || sleep) && !np) 243 else if (oot || sleep)
164 job_completion(server); 244 job_completion(server);
165
166} 245}
167 246
168/* 247/*
@@ -177,15 +256,15 @@ static struct task_struct* schedule_server(struct rt_server *server)
177 256
178 if (server->linked) { 257 if (server->linked) {
179 if (is_server(server->linked)) { 258 if (is_server(server->linked)) {
180 lserver = task_server(server->linked); 259 lserver = task_fserver(server->linked);
181 lserver->update(lserver); 260 lserver->update(lserver);
182 } 261 }
183
184 update_task(server); 262 update_task(server);
185 } 263 }
186 264
187 next = server->linked; 265 next = server->linked;
188 if (server->need_preempt(server->domain, next)) { 266 if ((!next || !is_np(next)) &&
267 server->need_preempt(server->domain, next)) {
189 if (next) { 268 if (next) {
190 TRACE_TASK(next, "Preempted\n"); 269 TRACE_TASK(next, "Preempted\n");
191 unlink(server); 270 unlink(server);
@@ -199,10 +278,11 @@ static struct task_struct* schedule_server(struct rt_server *server)
199} 278}
200 279
201/* 280/*
202 * Dumb requeue for CPU servers. 281 * Dumb requeue for PEDF (CPU) servers.
203 */ 282 */
204static void edf_requeue(struct rt_server *server, struct task_struct *t) 283static void edf_requeue(struct rt_server *server, struct task_struct *t)
205{ 284{
285 BUG_ON(is_be(t));
206 requeue(server->domain, t); 286 requeue(server->domain, t);
207} 287}
208 288
@@ -211,6 +291,7 @@ static void edf_requeue(struct rt_server *server, struct task_struct *t)
211 */ 291 */
212static void fifo_requeue(struct rt_server *server, struct task_struct *t) 292static void fifo_requeue(struct rt_server *server, struct task_struct *t)
213{ 293{
294 BUG_ON(!is_be(t));
214 raw_spin_lock(&fifo_lock); 295 raw_spin_lock(&fifo_lock);
215 requeue(server->domain, t); 296 requeue(server->domain, t);
216 raw_spin_unlock(&fifo_lock); 297 raw_spin_unlock(&fifo_lock);
@@ -243,16 +324,27 @@ static void fifo_update(struct rt_server *server)
243 fserver = container_of(server, struct fifo_server, server); 324 fserver = container_of(server, struct fifo_server, server);
244 TRACE_TASK(fserver->task, "Updating FIFO server\n"); 325 TRACE_TASK(fserver->task, "Updating FIFO server\n");
245 326
246 BUG_ON(!server->running); 327 if (!server->linked || has_resources(server->linked, server->cpu)) {
247 BUG_ON(!fserver->start_time); 328 /* Running here means linked to a parent server */
248 329 BUG_ON(!server->running);
249 delta = litmus_clock() - fserver->start_time; 330
250 tsk_rt(fserver->task)->job_params.exec_time += delta; 331 /* Stop executing */
251 fserver->start_time = 0; 332 if (fserver->start_time) {
333 delta = litmus_clock() - fserver->start_time;
334 tsk_rt(fserver->task)->job_params.exec_time += delta;
335 fserver->start_time = 0;
336 cancel_enforcement_timer(&fserver->timer);
337 } else {
338 /* Server is linked, but not executing */
339 BUG_ON(fserver->timer.armed);
340 }
252 341
253 cancel_enforcement_timer(&fserver->timer); 342 /* Calculate next task */
343 schedule_server(&fserver->server);
254 344
255 schedule_server(&fserver->server); 345 /* Reserve needed resources */
346 acquire_resources(fserver->task);
347 }
256} 348}
257 349
258/* 350/*
@@ -273,7 +365,7 @@ static void color_edf_release(rt_domain_t *edf, struct bheap *tasks)
273 365
274 if (edf_preemption_needed(edf, entry->server.linked) && 366 if (edf_preemption_needed(edf, entry->server.linked) &&
275 (!entry->server.linked || !is_kernel_np(entry->server.linked))) { 367 (!entry->server.linked || !is_kernel_np(entry->server.linked))) {
276 litmus_reschedule(entry->cpu); 368 litmus_reschedule(entry->server.cpu);
277 } 369 }
278 370
279 raw_spin_unlock_irqrestore(entry_lock(entry), flags); 371 raw_spin_unlock_irqrestore(entry_lock(entry), flags);
@@ -335,10 +427,11 @@ static void color_fifo_release(rt_domain_t *dom, struct bheap *tasks)
335static struct task_struct* color_schedule(struct task_struct *prev) 427static struct task_struct* color_schedule(struct task_struct *prev)
336{ 428{
337 unsigned long flags; 429 unsigned long flags;
430 int server_running;
338 struct cpu_entry *entry = local_entry; 431 struct cpu_entry *entry = local_entry;
339 struct task_struct *next, *plink = entry->server.linked; 432 struct task_struct *next, *plink = entry->server.linked;
340 433
341 TRACE("Reschedule on %d at %llu\n", entry->cpu, litmus_clock()); 434 TRACE("Reschedule on %d at %llu\n", entry->server.cpu, litmus_clock());
342 BUG_ON(entry->scheduled && entry->scheduled != prev); 435 BUG_ON(entry->scheduled && entry->scheduled != prev);
343 BUG_ON(entry->scheduled && !is_realtime(prev)); 436 BUG_ON(entry->scheduled && !is_realtime(prev));
344 437
@@ -346,15 +439,38 @@ static struct task_struct* color_schedule(struct task_struct *prev)
346 439
347 if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) { 440 if (entry->scheduled && cpu_empty(entry, plink) && is_running(prev)) {
348 TRACE_TASK(prev, "Snuck in on new!\n"); 441 TRACE_TASK(prev, "Snuck in on new!\n");
349 requeue(task_domain(entry, prev), prev); 442 requeue(task_dom(entry, prev), prev);
350 } 443 }
351 444
352 /* Pick next top-level task */ 445 /* Pick next top-level task */
353 next = schedule_server(&entry->server); 446 next = schedule_server(&entry->server);
447 /* Schedule hierarchically */
448 server_running = next && is_server(next);
449 if (server_running)
450 next = task_fserver(next)->linked;
451
452 /* Selected tasks must contend for group lock */
453 if (next) {
454 raw_spin_lock(&dgl_lock);
455 acquire_resources(next);
456 if (has_resources(next, entry->server.cpu)) {
457 TRACE_TASK(next, "Has group lock\n");
458 sched_trace_task_resume(next, 1);
459 } else {
460 TRACE_TASK(next, "Does not have lock, 0x%p does\n",
461 group_lock.acquired[entry->server.cpu]);
462 sched_trace_task_block(next, 1);
463 next = NULL;
464 server_running = 0;
465 }
466 raw_spin_unlock(&dgl_lock);
467 }
354 468
355 /* Arm timer if needed */ 469 /* Server is blocked if its running task is blocked. Note that if the
356 if (next && is_server(next)) { 470 * server has no running task, the server will now execute NULL.
357 next = task_server(next)->linked; 471 */
472 if (server_running) {
473 TRACE_TASK(entry->server.linked, "Server running\n");
358 arm_enforcement_timer(&entry->fifo_server.timer, 474 arm_enforcement_timer(&entry->fifo_server.timer,
359 entry->fifo_server.task); 475 entry->fifo_server.task);
360 entry->fifo_server.start_time = litmus_clock(); 476 entry->fifo_server.start_time = litmus_clock();
@@ -363,7 +479,7 @@ static struct task_struct* color_schedule(struct task_struct *prev)
363 if (prev) 479 if (prev)
364 tsk_rt(prev)->scheduled_on = NO_CPU; 480 tsk_rt(prev)->scheduled_on = NO_CPU;
365 if (next) 481 if (next)
366 tsk_rt(next)->scheduled_on = entry->cpu; 482 tsk_rt(next)->scheduled_on = entry->server.cpu;
367 483
368 entry->scheduled = next; 484 entry->scheduled = next;
369 sched_state_task_picked(); 485 sched_state_task_picked();
@@ -375,9 +491,11 @@ static struct task_struct* color_schedule(struct task_struct *prev)
375 491
376static void color_task_new(struct task_struct *t, int on_rq, int running) 492static void color_task_new(struct task_struct *t, int on_rq, int running)
377{ 493{
378 struct cpu_entry* entry;
379 unsigned long flags; 494 unsigned long flags;
495 int i, replicas;
380 raw_spinlock_t *lock; 496 raw_spinlock_t *lock;
497 struct cpu_entry *entry;
498 struct dgl_group_req *req;
381 499
382 TRACE_TASK(t, "New colored task\n"); 500 TRACE_TASK(t, "New colored task\n");
383 local_irq_save(flags); 501 local_irq_save(flags);
@@ -387,21 +505,36 @@ static void color_task_new(struct task_struct *t, int on_rq, int running)
387 505
388 release_at(t, litmus_clock()); 506 release_at(t, litmus_clock());
389 507
508 /* Create request for dynamic group locks */
509 req = kmalloc(sizeof(*req), GFP_ATOMIC);
510 dgl_group_req_init(req);
511 for (i = 0; i < NUM_RESOURCES; i++) {
512 /* /\* Testing *\/ */
513 /* set_req(req, i, 2); */
514 /* /\* Real *\/ */
515 /* replicas = get_control_page(t)->requests[i]; */
516 /* if (replicas) { */
517 /* set_req(req, i, replicas); */
518 /* } */
519 }
520 tsk_rt(t)->req = req;
521
522 /* Join system */
390 raw_spin_lock(lock); 523 raw_spin_lock(lock);
391 if (running) { 524 if (running) {
392 /* Already running on a CPU, update CPU state to match */ 525 TRACE_TASK(t, "Already scheduled on %d\n", entry->cpu);
393 BUG_ON(entry->scheduled); 526 BUG_ON(entry->scheduled);
394 entry->scheduled = t; 527 entry->scheduled = t;
395 tsk_rt(t)->scheduled_on = entry->cpu; 528 tsk_rt(t)->scheduled_on = entry->server.cpu;
396 } else { 529 } else
397 requeue(task_domain(entry, t), t); 530 requeue(task_dom(entry, t), t);
398 }
399 raw_spin_unlock(lock); 531 raw_spin_unlock(lock);
400 532
533 /* Trigger preemptions */
401 if (is_be(t)) 534 if (is_be(t))
402 check_for_fifo_preempt(); 535 check_for_fifo_preempt();
403 else 536 else
404 litmus_reschedule(entry->cpu); 537 litmus_reschedule(entry->server.cpu);
405 538
406 local_irq_restore(flags); 539 local_irq_restore(flags);
407} 540}
@@ -423,19 +556,21 @@ static void color_task_wake_up(struct task_struct *task)
423 sched_trace_task_release(task); 556 sched_trace_task_release(task);
424 } 557 }
425 558
559 /* Re-enter system */
426 if (entry->scheduled != task) { 560 if (entry->scheduled != task) {
427 raw_spin_lock(lock); 561 raw_spin_lock(lock);
428 requeue(task_domain(entry, task), task); 562 requeue(task_dom(entry, task), task);
429 raw_spin_unlock(lock); 563 raw_spin_unlock(lock);
430 } else { 564 } else {
431 TRACE_TASK(task, "Is already scheduled on %d!\n", 565 TRACE_TASK(task, "Is already scheduled on %d!\n",
432 entry->scheduled); 566 entry->scheduled);
433 } 567 }
434 568
569 /* Trigger preemptions */
435 if (is_be(task)) 570 if (is_be(task))
436 check_for_fifo_preempt(); 571 check_for_fifo_preempt();
437 else 572 else
438 litmus_reschedule(entry->cpu); 573 litmus_reschedule(entry->server.cpu);
439 574
440 local_irq_restore(flags); 575 local_irq_restore(flags);
441} 576}
@@ -460,15 +595,15 @@ static void color_task_exit(struct task_struct * t)
460 /* Remove from scheduler consideration */ 595 /* Remove from scheduler consideration */
461 if (is_queued(t)) { 596 if (is_queued(t)) {
462 raw_spin_lock(lock); 597 raw_spin_lock(lock);
463 remove(task_domain(entry, t), t); 598 remove(task_dom(entry, t), t);
464 raw_spin_unlock(lock); 599 raw_spin_unlock(lock);
465 } 600 }
466 601
467 /* Stop parent server */ 602 /* Stop parent server */
468 if (run_server(t)) 603 if (get_task_server(t))
469 unlink(run_server(t)); 604 unlink(get_task_server(t));
470 605
471 /* Unschedule running CPU */ 606 /* Unschedule running task */
472 if (tsk_rt(t)->scheduled_on != NO_CPU) { 607 if (tsk_rt(t)->scheduled_on != NO_CPU) {
473 entry = remote_entry(tsk_rt(t)->scheduled_on); 608 entry = remote_entry(tsk_rt(t)->scheduled_on);
474 609
@@ -476,11 +611,17 @@ static void color_task_exit(struct task_struct * t)
476 611
477 tsk_rt(t)->scheduled_on = NO_CPU; 612 tsk_rt(t)->scheduled_on = NO_CPU;
478 entry->scheduled = NULL; 613 entry->scheduled = NULL;
479 litmus_reschedule(entry->cpu); 614 litmus_reschedule(entry->server.cpu);
480 615
481 raw_spin_unlock(entry_lock(entry)); 616 raw_spin_unlock(entry_lock(entry));
482 } 617 }
483 618
619 /* Remove dgl request from system */
620 raw_spin_lock(&dgl_lock);
621 release_resources(t);
622 raw_spin_unlock(&dgl_lock);
623 kfree(tsk_rt(t)->req);
624
484 local_irq_restore(flags); 625 local_irq_restore(flags);
485} 626}
486 627
@@ -492,25 +633,6 @@ static long color_admit_task(struct task_struct* t)
492 return is_be(t) || task_cpu(t) == get_partition(t) ? 0 : -EINVAL; 633 return is_be(t) || task_cpu(t) == get_partition(t) ? 0 : -EINVAL;
493} 634}
494 635
495static void color_tick(struct task_struct *t)
496{
497 struct cpu_entry *entry = local_entry;
498 BUG_ON(is_realtime(t) && t != entry->scheduled);
499 if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) {
500 if (!is_np(t)) {
501 litmus_reschedule_local();
502 TRACE("color_scheduler_tick: "
503 "%d is preemptable "
504 " => FORCE_RESCHED\n", t->pid);
505 } else if (is_user_np(t)) {
506 TRACE("color_scheduler_tick: "
507 "%d is non-preemptable, "
508 "preemption delayed.\n", t->pid);
509 request_exit_np(t);
510 }
511 }
512}
513
514/* 636/*
515 * Load server parameters. 637 * Load server parameters.
516 */ 638 */
@@ -532,7 +654,7 @@ static long color_activate_plugin(void)
532 &tp.period); 654 &tp.period);
533 if (ret) { 655 if (ret) {
534 printk(KERN_WARNING "Uninitialized server for CPU %d\n", 656 printk(KERN_WARNING "Uninitialized server for CPU %d\n",
535 entry->cpu); 657 entry->server.cpu);
536 goto loop_end; 658 goto loop_end;
537 } 659 }
538 660
@@ -547,7 +669,6 @@ static long color_activate_plugin(void)
547 /* Make runnable */ 669 /* Make runnable */
548 release_at(server_task, now); 670 release_at(server_task, now);
549 requeue(&entry->edf_domain, server_task); 671 requeue(&entry->edf_domain, server_task);
550
551 entry->fifo_server.start_time = 0; 672 entry->fifo_server.start_time = 0;
552 entry->scheduled = NULL; 673 entry->scheduled = NULL;
553 674
@@ -607,7 +728,6 @@ static void color_release_ts(lt_t time)
607 728
608static struct sched_plugin color_plugin __cacheline_aligned_in_smp = { 729static struct sched_plugin color_plugin __cacheline_aligned_in_smp = {
609 .plugin_name = "COLOR", 730 .plugin_name = "COLOR",
610 .tick = color_tick,
611 .task_new = color_task_new, 731 .task_new = color_task_new,
612 .complete_job = complete_job, 732 .complete_job = complete_job,
613 .task_exit = color_task_exit, 733 .task_exit = color_task_exit,
@@ -634,13 +754,13 @@ static int __init init_color(void)
634 entry = remote_entry(cpu); 754 entry = remote_entry(cpu);
635 edf_domain_init(&entry->edf_domain, NULL, color_edf_release); 755 edf_domain_init(&entry->edf_domain, NULL, color_edf_release);
636 756
637 entry->cpu = cpu;
638 entry->scheduled = NULL; 757 entry->scheduled = NULL;
639 758
640 /* Create FIFO server */ 759 /* Create FIFO server */
641 fifo_server = &entry->fifo_server; 760 fifo_server = &entry->fifo_server;
642 init_rt_server(&fifo_server->server, 761 init_rt_server(&fifo_server->server,
643 cpu + num_online_cpus() + 1, 762 cpu + num_online_cpus() + 1,
763 cpu,
644 &fifo_domain, 764 &fifo_domain,
645 fifo_preemption_needed, 765 fifo_preemption_needed,
646 fifo_requeue, fifo_update, fifo_take); 766 fifo_requeue, fifo_update, fifo_take);
@@ -660,10 +780,9 @@ static int __init init_color(void)
660 bheap_node_init(&tsk_rt(server_task)->heap_node, server_task); 780 bheap_node_init(&tsk_rt(server_task)->heap_node, server_task);
661 tsk_rt(server_task)->is_server = 1; 781 tsk_rt(server_task)->is_server = 1;
662 782
663
664 /* Create CPU server */ 783 /* Create CPU server */
665 cpu_server = &entry->server; 784 cpu_server = &entry->server;
666 init_rt_server(cpu_server, cpu + 1, 785 init_rt_server(cpu_server, cpu + 1, cpu,
667 &entry->edf_domain, edf_preemption_needed, 786 &entry->edf_domain, edf_preemption_needed,
668 edf_requeue, NULL, NULL); 787 edf_requeue, NULL, NULL);
669 cpu_server->running = 1; 788 cpu_server->running = 1;
@@ -674,6 +793,9 @@ static int __init init_color(void)
674 fifo_domain_init(&fifo_domain, NULL, color_fifo_release); 793 fifo_domain_init(&fifo_domain, NULL, color_fifo_release);
675 raw_spin_lock_init(&fifo_lock); 794 raw_spin_lock_init(&fifo_lock);
676 795
796 dgl_init(&group_lock);
797 raw_spin_lock_init(&dgl_lock);
798
677 return register_sched_plugin(&color_plugin); 799 return register_sched_plugin(&color_plugin);
678} 800}
679 801