diff options
-rw-r--r-- | include/litmus/event_group.h | 50 | ||||
-rw-r--r-- | include/litmus/rt_domain.h | 13 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 3 | ||||
-rw-r--r-- | include/litmus/sched_mc.h | 7 | ||||
-rw-r--r-- | litmus/Makefile | 11 | ||||
-rw-r--r-- | litmus/event_group.c | 108 | ||||
-rw-r--r-- | litmus/litmus.c | 30 | ||||
-rw-r--r-- | litmus/rt_domain.c | 137 | ||||
-rw-r--r-- | litmus/sched_mc.c | 339 |
9 files changed, 452 insertions, 246 deletions
diff --git a/include/litmus/event_group.h b/include/litmus/event_group.h index 58692a11b683..37d5012d770e 100644 --- a/include/litmus/event_group.h +++ b/include/litmus/event_group.h | |||
@@ -3,13 +3,14 @@ | |||
3 | 3 | ||
4 | #define EVENT_QUEUE_SLOTS 127 /* prime */ | 4 | #define EVENT_QUEUE_SLOTS 127 /* prime */ |
5 | 5 | ||
6 | typedef void (*fire_event_t)(void *data); | 6 | struct rt_event; |
7 | typedef void (*fire_event_t)(struct rt_event *e); | ||
7 | 8 | ||
8 | struct event_group { | 9 | struct event_group { |
9 | lt_t res; | 10 | lt_t res; |
10 | int cpu; | 11 | int cpu; |
11 | struct list_head event_queue[EVENT_QUEUE_SLOTS]; | 12 | struct list_head event_queue[EVENT_QUEUE_SLOTS]; |
12 | raw_spinlock_t queue_lock; | 13 | raw_spinlock_t queue_lock; |
13 | }; | 14 | }; |
14 | 15 | ||
15 | /** | 16 | /** |
@@ -23,8 +24,8 @@ struct event_list { | |||
23 | struct hrtimer timer; | 24 | struct hrtimer timer; |
24 | struct hrtimer_start_on_info info; | 25 | struct hrtimer_start_on_info info; |
25 | 26 | ||
26 | struct list_head list; /* For event_queue */ | 27 | struct list_head list; /* For event_queue */ |
27 | struct event_group *group; /* For callback */ | 28 | struct event_group* group; /* For callback */ |
28 | }; | 29 | }; |
29 | 30 | ||
30 | /** | 31 | /** |
@@ -32,10 +33,8 @@ struct event_list { | |||
32 | */ | 33 | */ |
33 | struct rt_event { | 34 | struct rt_event { |
34 | /* Function to call on event expiration */ | 35 | /* Function to call on event expiration */ |
35 | fire_event_t fire; | 36 | fire_event_t function; |
36 | /* To be passed into fire */ | 37 | /* Priority of this event (lower is better) */ |
37 | void *data; | ||
38 | /* Priority of this event (lower is better */ | ||
39 | int prio; | 38 | int prio; |
40 | 39 | ||
41 | /* For membership in the event_list */ | 40 | /* For membership in the event_list */ |
@@ -48,9 +47,38 @@ struct rt_event { | |||
48 | struct event_list *event_list; | 47 | struct event_list *event_list; |
49 | }; | 48 | }; |
50 | 49 | ||
51 | void init_event_group(struct event_group*, lt_t, int); | 50 | /** |
52 | void add_event(struct event_group*, struct rt_event*, lt_t); | 51 | * init_event_group() - Prepare group for events. |
53 | void cancel_event(struct event_group*, struct rt_event*); | 52 | * @group Group to prepare |
53 | * @res Timer resolution. Two events of @res distance will be merged | ||
54 | * @cpu Cpu on which to fire timers | ||
55 | */ | ||
56 | void init_event_group(struct event_group* group, lt_t res, int cpu); | ||
57 | |||
58 | /** | ||
59 | * add_event() - Add timer to event group. | ||
60 | * @group Group with which to merge event | ||
61 | * @e Event to be fired at a specific time | ||
62 | * @time Time to fire event | ||
63 | */ | ||
64 | void add_event(struct event_group* group, struct rt_event* e, lt_t time); | ||
65 | |||
66 | /** | ||
67 | * cancel_event() - Remove event from the group. | ||
68 | */ | ||
69 | void cancel_event(struct rt_event*); | ||
70 | |||
71 | /** | ||
72 | * init_event() - Create an event. | ||
73 | * @e Event to create | ||
74 | * @prio Priority of the event (lower is better) | ||
75 | * @function Function to fire when event expires | ||
76 | * @el Pre-allocated event list for timer merging | ||
77 | */ | ||
78 | void init_event(struct rt_event* e, int prio, fire_event_t function, | ||
79 | struct event_list *el); | ||
80 | |||
54 | struct event_list* event_list_alloc(int); | 81 | struct event_list* event_list_alloc(int); |
82 | void event_list_free(struct event_list *el); | ||
55 | 83 | ||
56 | #endif | 84 | #endif |
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 0e4e75cd1e67..59350fb78d4f 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <litmus/bheap.h> | 8 | #include <litmus/bheap.h> |
9 | #include <litmus/domain.h> | 9 | #include <litmus/domain.h> |
10 | #include <litmus/event_group.h> | ||
10 | 11 | ||
11 | #define RELEASE_QUEUE_SLOTS 127 /* prime */ | 12 | #define RELEASE_QUEUE_SLOTS 127 /* prime */ |
12 | 13 | ||
@@ -30,7 +31,10 @@ typedef struct _rt_domain { | |||
30 | raw_spinlock_t release_lock; | 31 | raw_spinlock_t release_lock; |
31 | struct release_queue release_queue; | 32 | struct release_queue release_queue; |
32 | 33 | ||
33 | #ifdef CONFIG_RELEASE_MASTER | 34 | #ifdef CONFIG_MERGE_TIMERS |
35 | struct event_group* event_group; | ||
36 | int prio; | ||
37 | #elif CONFIG_RELEASE_MASTER | ||
34 | int release_master; | 38 | int release_master; |
35 | #endif | 39 | #endif |
36 | 40 | ||
@@ -54,13 +58,18 @@ struct release_heap { | |||
54 | lt_t release_time; | 58 | lt_t release_time; |
55 | /* all tasks to be released at release_time */ | 59 | /* all tasks to be released at release_time */ |
56 | struct bheap heap; | 60 | struct bheap heap; |
61 | |||
62 | #ifdef CONFIG_MERGE_TIMERS | ||
63 | /* used to merge timer calls */ | ||
64 | struct rt_event event; | ||
65 | #else | ||
57 | /* used to trigger the release */ | 66 | /* used to trigger the release */ |
58 | struct hrtimer timer; | 67 | struct hrtimer timer; |
59 | |||
60 | #ifdef CONFIG_RELEASE_MASTER | 68 | #ifdef CONFIG_RELEASE_MASTER |
61 | /* used to delegate releases */ | 69 | /* used to delegate releases */ |
62 | struct hrtimer_start_on_info info; | 70 | struct hrtimer_start_on_info info; |
63 | #endif | 71 | #endif |
72 | #endif | ||
64 | /* required for the timer callback */ | 73 | /* required for the timer callback */ |
65 | rt_domain_t* dom; | 74 | rt_domain_t* dom; |
66 | }; | 75 | }; |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index e247fb515e8c..e6288e8807f0 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -123,6 +123,9 @@ struct rt_param { | |||
123 | /* mixed criticality specific data */ | 123 | /* mixed criticality specific data */ |
124 | struct mc_data *mc_data; | 124 | struct mc_data *mc_data; |
125 | #endif | 125 | #endif |
126 | #ifdef CONFIG_MERGE_TIMERS | ||
127 | struct rt_event *event; | ||
128 | #endif | ||
126 | 129 | ||
127 | /* user controlled parameters */ | 130 | /* user controlled parameters */ |
128 | struct rt_task task_params; | 131 | struct rt_task task_params; |
diff --git a/include/litmus/sched_mc.h b/include/litmus/sched_mc.h index 95cd22cd7202..9ddf860c83a7 100644 --- a/include/litmus/sched_mc.h +++ b/include/litmus/sched_mc.h | |||
@@ -31,6 +31,13 @@ struct mc_data { | |||
31 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) | 31 | #define tsk_mc_data(t) (tsk_rt(t)->mc_data) |
32 | #define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) | 32 | #define tsk_mc_crit(t) (tsk_mc_data(t)->mc_task.crit) |
33 | #define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost) | 33 | #define is_ghost(t) (tsk_mc_data(t)->mc_job.is_ghost) |
34 | #define TS "(%s/%d:%d:%s)" | ||
35 | #define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", \ | ||
36 | (t) ? t->pid : 1, \ | ||
37 | (t) ? t->rt_param.job_params.job_no : 1, \ | ||
38 | (t && get_task_domain(t)) ? get_task_domain(t)->name : "" | ||
39 | #define TRACE_MC_TASK(t, fmt, args...) \ | ||
40 | TRACE(TS " " fmt "\n", TA(t), ##args) | ||
34 | 41 | ||
35 | /* | 42 | /* |
36 | * Cache the budget along with the struct PID for a task so that we don't need | 43 | * Cache the budget along with the struct PID for a task so that we don't need |
diff --git a/litmus/Makefile b/litmus/Makefile index f3555334d62d..af8bc7618b95 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -16,13 +16,12 @@ obj-y = sched_plugin.o litmus.o \ | |||
16 | srp.o \ | 16 | srp.o \ |
17 | bheap.o \ | 17 | bheap.o \ |
18 | ctrldev.o \ | 18 | ctrldev.o \ |
19 | sched_gsn_edf.o \ | 19 | domain.o \ |
20 | sched_psn_edf.o \ | 20 | event_group.o |
21 | domain.o | ||
22 | 21 | ||
23 | obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | 22 | # obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o |
24 | obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | 23 | # obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o |
25 | obj-$(CONFIG_PLUGIN_MC) += sched_mc.o sched_mc_ce.o ce_domain.o event_group.o | 24 | obj-$(CONFIG_PLUGIN_MC) += sched_mc.o sched_mc_ce.o ce_domain.o |
26 | 25 | ||
27 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | 26 | obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o |
28 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | 27 | obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o |
diff --git a/litmus/event_group.c b/litmus/event_group.c index 22c74a19d1d6..b4521ab370d1 100644 --- a/litmus/event_group.c +++ b/litmus/event_group.c | |||
@@ -5,6 +5,12 @@ | |||
5 | #include <litmus/trace.h> | 5 | #include <litmus/trace.h> |
6 | #include <litmus/event_group.h> | 6 | #include <litmus/event_group.h> |
7 | 7 | ||
8 | #if 1 | ||
9 | #define VTRACE TRACE | ||
10 | #else | ||
11 | #define VTRACE(fmt, args...) | ||
12 | #endif | ||
13 | |||
8 | /* | 14 | /* |
9 | * Return event_queue slot for the given time. | 15 | * Return event_queue slot for the given time. |
10 | */ | 16 | */ |
@@ -20,6 +26,7 @@ static unsigned int time2slot(lt_t time) | |||
20 | static enum hrtimer_restart on_timer(struct hrtimer *timer) | 26 | static enum hrtimer_restart on_timer(struct hrtimer *timer) |
21 | { | 27 | { |
22 | unsigned long flags; | 28 | unsigned long flags; |
29 | int num = 0; | ||
23 | struct event_list *el; | 30 | struct event_list *el; |
24 | struct rt_event *e; | 31 | struct rt_event *e; |
25 | struct list_head *pos, *safe, list; | 32 | struct list_head *pos, *safe, list; |
@@ -27,19 +34,24 @@ static enum hrtimer_restart on_timer(struct hrtimer *timer) | |||
27 | el = container_of(timer, struct event_list, timer); | 34 | el = container_of(timer, struct event_list, timer); |
28 | 35 | ||
29 | raw_spin_lock_irqsave(&el->group->queue_lock, flags); | 36 | raw_spin_lock_irqsave(&el->group->queue_lock, flags); |
37 | VTRACE("Removing event list 0x%p\n", el); | ||
30 | list_del_init(&el->list); | 38 | list_del_init(&el->list); |
31 | raw_spin_unlock_irqrestore(&el->group->queue_lock, flags); | 39 | raw_spin_unlock_irqrestore(&el->group->queue_lock, flags); |
32 | 40 | ||
33 | /* Empty event list so this event can be requeued */ | 41 | /* Empty event list so this event can be requeued */ |
42 | VTRACE("Emptying event list 0x%p\n", el); | ||
34 | list_replace_init(&el->events, &list); | 43 | list_replace_init(&el->events, &list); |
35 | 44 | ||
36 | /* Fire events */ | 45 | /* Fire events */ |
37 | list_for_each_safe(pos, safe, &list) { | 46 | list_for_each_safe(pos, safe, &list) { |
47 | num++; | ||
38 | e = list_entry(pos, struct rt_event, list); | 48 | e = list_entry(pos, struct rt_event, list); |
39 | TRACE("Dequeueing event with prio %d\n", e->prio); | 49 | TRACE("Dequeueing event with prio %d from 0x%p\n", |
50 | e->prio, el); | ||
40 | list_del_init(pos); | 51 | list_del_init(pos); |
41 | e->fire(e->data); | 52 | e->function(e); |
42 | } | 53 | } |
54 | VTRACE("Exhausted %d events from list 0x%p\n", num, el); | ||
43 | return HRTIMER_NORESTART; | 55 | return HRTIMER_NORESTART; |
44 | } | 56 | } |
45 | 57 | ||
@@ -48,16 +60,22 @@ static enum hrtimer_restart on_timer(struct hrtimer *timer) | |||
48 | */ | 60 | */ |
49 | void insert_event(struct event_list *el, struct rt_event *e) | 61 | void insert_event(struct event_list *el, struct rt_event *e) |
50 | { | 62 | { |
51 | struct list_head *pos; | 63 | struct list_head *pos, *last = NULL; |
52 | struct rt_event *queued; | 64 | struct rt_event *queued; |
53 | list_for_each_prev(pos, &el->events) { | 65 | list_for_each(pos, &el->events) { |
54 | queued = list_entry(pos, struct rt_event, list); | 66 | queued = list_entry(pos, struct rt_event, list); |
67 | last = pos; | ||
55 | if (e->prio < queued->prio) { | 68 | if (e->prio < queued->prio) { |
56 | __list_add(&e->list, pos, pos->next); | 69 | VTRACE("Inserting priority %d 0x%p before %d 0x%p " |
70 | "in 0x%p, pos 0x%p\n", e->prio, &e->list, | ||
71 | queued->prio, &queued->list, el, pos); | ||
72 | list_add_tail(&e->list, pos); | ||
57 | return; | 73 | return; |
58 | } | 74 | } |
59 | } | 75 | } |
60 | list_add(&e->list, &el->events); | 76 | VTRACE("Inserting priority %d 0x%p at end of 0x%p, last 0x%p\n", |
77 | e->prio, &el->list, el, last); | ||
78 | list_add(&e->list, (last) ? last : pos); | ||
61 | } | 79 | } |
62 | 80 | ||
63 | /* | 81 | /* |
@@ -65,52 +83,60 @@ void insert_event(struct event_list *el, struct rt_event *e) | |||
65 | * is being used yet and use_event_heap is 1, will create the list | 83 | * is being used yet and use_event_heap is 1, will create the list |
66 | * and return it. Otherwise it will return NULL. | 84 | * and return it. Otherwise it will return NULL. |
67 | */ | 85 | */ |
68 | static struct event_list *get_event_list(struct event_group *group, | 86 | static struct event_list* get_event_list(struct event_group *group, |
69 | struct rt_event *e, | 87 | struct rt_event *e, |
70 | lt_t fire, | 88 | lt_t fire, |
71 | int use_event_heap) | 89 | int use_event_list) |
72 | { | 90 | { |
73 | struct list_head* pos; | 91 | struct list_head* pos; |
74 | struct event_list *el = NULL, *tmp; | 92 | struct event_list *el = NULL, *tmp; |
75 | unsigned int slot = time2slot(fire); | 93 | unsigned int slot = time2slot(fire); |
76 | 94 | ||
77 | /* initialize pos for the case that the list is empty */ | 95 | VTRACE("Getting list for %llu\n", fire); |
96 | |||
97 | /* Initialize pos for the case that the list is empty */ | ||
78 | pos = group->event_queue[slot].next; | 98 | pos = group->event_queue[slot].next; |
79 | list_for_each(pos, &group->event_queue[slot]) { | 99 | list_for_each(pos, &group->event_queue[slot]) { |
80 | tmp = list_entry(pos, struct event_list, list); | 100 | tmp = list_entry(pos, struct event_list, list); |
81 | if (lt_after_eq(fire, tmp->fire_time) && | 101 | if (lt_after_eq(fire, tmp->fire_time) && |
82 | lt_before(tmp->fire_time, fire)) { | 102 | lt_before(fire, tmp->fire_time + group->res)) { |
83 | /* perfect match -- this happens on hyperperiod | 103 | VTRACE("Found match at time %llu\n", tmp->fire_time); |
84 | * boundaries | ||
85 | */ | ||
86 | el = tmp; | 104 | el = tmp; |
87 | break; | 105 | break; |
88 | } else if (lt_before(fire, tmp->fire_time)) { | 106 | } else if (lt_before(fire, tmp->fire_time)) { |
89 | /* we need to insert a new node since rh is | 107 | /* We need to insert a new node since el is |
90 | * already in the future | 108 | * already in the future |
91 | */ | 109 | */ |
110 | VTRACE("Time %llu was before %llu\n", | ||
111 | fire, tmp->fire_time); | ||
92 | break; | 112 | break; |
113 | } else { | ||
114 | VTRACE("Time %llu was after %llu\n", | ||
115 | fire, tmp->fire_time + group->res); | ||
93 | } | 116 | } |
94 | } | 117 | } |
95 | if (!el && use_event_heap) { | 118 | if (!el && use_event_list) { |
96 | /* use pre-allocated release heap */ | 119 | /* Use pre-allocated list */ |
97 | tmp = e->event_list; | 120 | tmp = e->event_list; |
98 | tmp->fire_time = fire; | 121 | tmp->fire_time = fire; |
99 | tmp->group = group; | 122 | tmp->group = group; |
100 | /* add to queue */ | 123 | /* Add to queue */ |
101 | list_add(&tmp->list, pos->prev); | 124 | list_add(&tmp->list, pos->prev); |
102 | el = tmp; | 125 | el = tmp; |
126 | VTRACE("Using list for priority %d and time %llu\n", | ||
127 | e->prio, fire); | ||
103 | } | 128 | } |
104 | return el; | 129 | return el; |
105 | } | 130 | } |
106 | 131 | ||
107 | /* | 132 | /* |
108 | * Prepare a release heap for a new set of events. | 133 | * Prepare a release list for a new set of events. |
109 | */ | 134 | */ |
110 | static void reinit_event_list(struct rt_event *e) | 135 | static void reinit_event_list(struct rt_event *e) |
111 | { | 136 | { |
112 | struct event_list *el = e->event_list; | 137 | struct event_list *el = e->event_list; |
113 | BUG_ON(hrtimer_cancel(&el->timer)); | 138 | BUG_ON(hrtimer_cancel(&el->timer)); |
139 | VTRACE("Reinitting 0x%p\n", el); | ||
114 | INIT_LIST_HEAD(&el->events); | 140 | INIT_LIST_HEAD(&el->events); |
115 | atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE); | 141 | atomic_set(&el->info.state, HRTIMER_START_ON_INACTIVE); |
116 | } | 142 | } |
@@ -122,7 +148,8 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire) | |||
122 | { | 148 | { |
123 | struct event_list *el; | 149 | struct event_list *el; |
124 | 150 | ||
125 | TRACE("Adding event with prio %d @ %llu\n", event->prio, fire); | 151 | VTRACE("Adding event with priority %d for time %llu\n", |
152 | e->prio, fire); | ||
126 | 153 | ||
127 | raw_spin_lock(&group->queue_lock); | 154 | raw_spin_lock(&group->queue_lock); |
128 | el = get_event_list(group, e, fire, 0); | 155 | el = get_event_list(group, e, fire, 0); |
@@ -140,7 +167,7 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire) | |||
140 | 167 | ||
141 | /* Arm timer if we are the owner */ | 168 | /* Arm timer if we are the owner */ |
142 | if (el == e->event_list) { | 169 | if (el == e->event_list) { |
143 | TRACE("Arming timer for %llu\n", fire); | 170 | VTRACE("Arming timer for %llu\n", fire); |
144 | if (group->cpu == smp_processor_id()) { | 171 | if (group->cpu == smp_processor_id()) { |
145 | __hrtimer_start_range_ns(&el->timer, | 172 | __hrtimer_start_range_ns(&el->timer, |
146 | ns_to_ktime(el->fire_time), | 173 | ns_to_ktime(el->fire_time), |
@@ -151,18 +178,23 @@ void add_event(struct event_group *group, struct rt_event *e, lt_t fire) | |||
151 | HRTIMER_MODE_ABS_PINNED); | 178 | HRTIMER_MODE_ABS_PINNED); |
152 | } | 179 | } |
153 | } else { | 180 | } else { |
154 | TRACE("Not my timer @%llu", fire); | 181 | VTRACE("Not my timer @%llu\n", fire); |
155 | } | 182 | } |
156 | } | 183 | } |
157 | 184 | ||
158 | /** | 185 | /** |
159 | * cancel_event() - Remove event from the group. | 186 | * cancel_event() - Remove event from the group. |
160 | */ | 187 | */ |
161 | void cancel_event(struct event_group *group, struct rt_event *e) | 188 | void cancel_event(struct rt_event *e) |
162 | { | 189 | { |
163 | raw_spin_lock(&group->queue_lock); | 190 | struct event_group *group; |
164 | list_del_init(&e->list); | 191 | if (e->list.next != &e->list) { |
165 | raw_spin_unlock(&group->queue_lock); | 192 | group = e->event_list->group; |
193 | raw_spin_lock(&group->queue_lock); | ||
194 | VTRACE("Canceling event with priority %d\n", e->prio); | ||
195 | list_del_init(&e->list); | ||
196 | raw_spin_unlock(&group->queue_lock); | ||
197 | } | ||
166 | } | 198 | } |
167 | 199 | ||
168 | /** | 200 | /** |
@@ -171,30 +203,34 @@ void cancel_event(struct event_group *group, struct rt_event *e) | |||
171 | void init_event_group(struct event_group *group, lt_t res, int cpu) | 203 | void init_event_group(struct event_group *group, lt_t res, int cpu) |
172 | { | 204 | { |
173 | int i; | 205 | int i; |
206 | VTRACE("Creating group with res %llu on CPU %d", res, cpu); | ||
174 | group->res = res; | 207 | group->res = res; |
175 | group->cpu = cpu; | 208 | group->cpu = cpu; |
176 | 209 | for (i = 0; i < EVENT_QUEUE_SLOTS; i++) | |
177 | for (i = 0; i < EVENT_QUEUE_SLOTS; i++) { | ||
178 | INIT_LIST_HEAD(&group->event_queue[i]); | 210 | INIT_LIST_HEAD(&group->event_queue[i]); |
179 | } | ||
180 | |||
181 | raw_spin_lock_init(&group->queue_lock); | 211 | raw_spin_lock_init(&group->queue_lock); |
182 | } | 212 | } |
183 | 213 | ||
184 | struct kmem_cache *event_list_cache; | 214 | struct kmem_cache *event_list_cache, *event_cache; |
185 | struct event_list * event_list_alloc(int gfp_flags) | 215 | |
216 | struct event_list* event_list_alloc(int gfp_flags) | ||
186 | { | 217 | { |
187 | struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags); | 218 | struct event_list *el = kmem_cache_alloc(event_list_cache, gfp_flags); |
188 | if (el) { | 219 | if (el) { |
189 | hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 220 | hrtimer_init(&el->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
190 | INIT_LIST_HEAD(&el->list); | 221 | INIT_LIST_HEAD(&el->list); |
191 | el->timer.function = on_timer; | 222 | el->timer.function = on_timer; |
223 | } else { | ||
224 | VTRACE("Failed to allocate event list!"); | ||
192 | } | 225 | } |
193 | return el; | 226 | return el; |
194 | } | 227 | } |
195 | 228 | ||
196 | void event_list_free(struct event_list *el) | 229 | void init_event(struct rt_event *e, int prio, fire_event_t function, |
230 | struct event_list *el) | ||
197 | { | 231 | { |
198 | hrtimer_cancel(&el->timer); | 232 | e->prio = prio; |
199 | kmem_cache_free(event_list_cache, el); | 233 | e->function = function; |
234 | e->event_list = el; | ||
235 | INIT_LIST_HEAD(&e->list); | ||
200 | } | 236 | } |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 3b502d62cb0e..deaaee3b3f20 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
14 | #include <litmus/bheap.h> | 14 | #include <litmus/bheap.h> |
15 | #include <litmus/trace.h> | 15 | #include <litmus/trace.h> |
16 | #include <litmus/event_group.h> | ||
16 | #include <litmus/rt_domain.h> | 17 | #include <litmus/rt_domain.h> |
17 | #include <litmus/litmus_proc.h> | 18 | #include <litmus/litmus_proc.h> |
18 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
@@ -21,7 +22,6 @@ | |||
21 | #include <linux/pid.h> | 22 | #include <linux/pid.h> |
22 | #include <linux/hrtimer.h> | 23 | #include <linux/hrtimer.h> |
23 | #include <litmus/sched_mc.h> | 24 | #include <litmus/sched_mc.h> |
24 | #include <litmus/event_group.h> | ||
25 | #else | 25 | #else |
26 | struct mc_task; | 26 | struct mc_task; |
27 | #endif | 27 | #endif |
@@ -43,8 +43,11 @@ atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); | |||
43 | static struct kmem_cache *bheap_node_cache; | 43 | static struct kmem_cache *bheap_node_cache; |
44 | extern struct kmem_cache *release_heap_cache; | 44 | extern struct kmem_cache *release_heap_cache; |
45 | 45 | ||
46 | #ifdef CONFIG_MERGE_TIMERS | ||
47 | extern struct kmem_cache *event_list_cache; | ||
48 | #endif | ||
46 | #ifdef CONFIG_PLUGIN_MC | 49 | #ifdef CONFIG_PLUGIN_MC |
47 | static struct kmem_cache *event_list_cache; | 50 | static struct kmem_cache *mc_data_cache; |
48 | #endif | 51 | #endif |
49 | 52 | ||
50 | struct bheap_node* bheap_node_alloc(int gfp_flags) | 53 | struct bheap_node* bheap_node_alloc(int gfp_flags) |
@@ -322,16 +325,14 @@ asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param | |||
322 | } | 325 | } |
323 | 326 | ||
324 | /* check parameters passed in are valid */ | 327 | /* check parameters passed in are valid */ |
325 | if (mc.crit < CRIT_LEVEL_A || mc.crit >= NUM_CRIT_LEVELS) | 328 | if (mc.crit < CRIT_LEVEL_A || mc.crit >= NUM_CRIT_LEVELS) { |
326 | { | ||
327 | printk(KERN_WARNING "litmus: real-time task %d rejected because " | 329 | printk(KERN_WARNING "litmus: real-time task %d rejected because " |
328 | "of invalid criticality level\n", pid); | 330 | "of invalid criticality level\n", pid); |
329 | goto out_unlock; | 331 | goto out_unlock; |
330 | } | 332 | } |
331 | if (CRIT_LEVEL_A == mc.crit && | 333 | if (CRIT_LEVEL_A == mc.crit && |
332 | (mc.lvl_a_id < 0 || | 334 | (mc.lvl_a_id < 0 || |
333 | mc.lvl_a_id >= CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS)) | 335 | mc.lvl_a_id >= CONFIG_PLUGIN_MC_LEVEL_A_MAX_TASKS)) { |
334 | { | ||
335 | printk(KERN_WARNING "litmus: real-time task %d rejected because " | 336 | printk(KERN_WARNING "litmus: real-time task %d rejected because " |
336 | "of invalid level A id\n", pid); | 337 | "of invalid level A id\n", pid); |
337 | goto out_unlock; | 338 | goto out_unlock; |
@@ -339,16 +340,15 @@ asmlinkage long sys_set_rt_task_mc_param(pid_t pid, struct mc_task __user *param | |||
339 | 340 | ||
340 | mc_data = tsk_rt(target)->mc_data; | 341 | mc_data = tsk_rt(target)->mc_data; |
341 | if (!mc_data) { | 342 | if (!mc_data) { |
342 | mc_data = kzalloc(sizeof(*mc_data), GFP_ATOMIC); | 343 | mc_data = kmem_cache_alloc(mc_data_cache, GFP_ATOMIC); |
343 | if (!mc_data) | 344 | if (!mc_data) { |
344 | { | ||
345 | retval = -ENOMEM; | 345 | retval = -ENOMEM; |
346 | goto out_unlock; | 346 | goto out_unlock; |
347 | } | 347 | } |
348 | tsk_rt(target)->mc_data = mc_data; | 348 | tsk_rt(target)->mc_data = mc_data; |
349 | } | 349 | } |
350 | |||
351 | mc_data->mc_task = mc; | 350 | mc_data->mc_task = mc; |
351 | |||
352 | retval = 0; | 352 | retval = 0; |
353 | out_unlock: | 353 | out_unlock: |
354 | read_unlock_irq(&tasklist_lock); | 354 | read_unlock_irq(&tasklist_lock); |
@@ -619,9 +619,12 @@ static int __init _init_litmus(void) | |||
619 | 619 | ||
620 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); | 620 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); |
621 | release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); | 621 | release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); |
622 | #ifdef CONFIG_PLUGIN_MC | 622 | #ifdef CONFIG_MERGE_TIMERS |
623 | event_list_cache = KMEM_CACHE(event_list, SLAB_PANIC); | 623 | event_list_cache = KMEM_CACHE(event_list, SLAB_PANIC); |
624 | #endif | 624 | #endif |
625 | #ifdef CONFIG_PLUGIN_MC | ||
626 | mc_data_cache = KMEM_CACHE(mc_data, SLAB_PANIC); | ||
627 | #endif | ||
625 | 628 | ||
626 | #ifdef CONFIG_MAGIC_SYSRQ | 629 | #ifdef CONFIG_MAGIC_SYSRQ |
627 | /* offer some debugging help */ | 630 | /* offer some debugging help */ |
@@ -641,7 +644,12 @@ static void _exit_litmus(void) | |||
641 | exit_litmus_proc(); | 644 | exit_litmus_proc(); |
642 | kmem_cache_destroy(bheap_node_cache); | 645 | kmem_cache_destroy(bheap_node_cache); |
643 | kmem_cache_destroy(release_heap_cache); | 646 | kmem_cache_destroy(release_heap_cache); |
647 | #ifdef CONFIG_MERGE_TIMERS | ||
644 | kmem_cache_destroy(event_list_cache); | 648 | kmem_cache_destroy(event_list_cache); |
649 | #endif | ||
650 | #ifdef CONFIG_PLUGIN_MC | ||
651 | kmem_cache_destroy(mc_data_cache); | ||
652 | #endif | ||
645 | } | 653 | } |
646 | 654 | ||
647 | module_init(_init_litmus); | 655 | module_init(_init_litmus); |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 3e419d7c9ae7..50a6abfd7676 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -12,13 +12,13 @@ | |||
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | 13 | ||
14 | #include <litmus/litmus.h> | 14 | #include <litmus/litmus.h> |
15 | #include <litmus/event_group.h> | ||
15 | #include <litmus/sched_plugin.h> | 16 | #include <litmus/sched_plugin.h> |
16 | #include <litmus/sched_trace.h> | 17 | #include <litmus/sched_trace.h> |
17 | #include <litmus/rt_domain.h> | 18 | #include <litmus/rt_domain.h> |
18 | #include <litmus/trace.h> | 19 | #include <litmus/trace.h> |
19 | #include <litmus/bheap.h> | 20 | #include <litmus/bheap.h> |
20 | 21 | ||
21 | |||
22 | /* Uncomment when debugging timer races... */ | 22 | /* Uncomment when debugging timer races... */ |
23 | #if 0 | 23 | #if 0 |
24 | #define VTRACE_TASK TRACE_TASK | 24 | #define VTRACE_TASK TRACE_TASK |
@@ -49,34 +49,36 @@ static unsigned int time2slot(lt_t time) | |||
49 | return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; | 49 | return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; |
50 | } | 50 | } |
51 | 51 | ||
52 | static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | 52 | static void do_release(struct release_heap *rh) |
53 | { | 53 | { |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | struct release_heap* rh; | ||
56 | |||
57 | VTRACE("on_release_timer(0x%p) starts.\n", timer); | ||
58 | |||
59 | TS_RELEASE_START; | 55 | TS_RELEASE_START; |
60 | 56 | ||
61 | rh = container_of(timer, struct release_heap, timer); | ||
62 | |||
63 | raw_spin_lock_irqsave(&rh->dom->release_lock, flags); | 57 | raw_spin_lock_irqsave(&rh->dom->release_lock, flags); |
64 | VTRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); | 58 | VTRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); |
65 | /* remove from release queue */ | 59 | /* remove from release queue */ |
66 | list_del(&rh->list); | 60 | list_del_init(&rh->list); |
67 | raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags); | 61 | raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags); |
68 | VTRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); | 62 | VTRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); |
69 | 63 | ||
70 | /* call release callback */ | 64 | /* call release callback */ |
71 | rh->dom->release_jobs(rh->dom, &rh->heap); | 65 | rh->dom->release_jobs(rh->dom, &rh->heap); |
72 | /* WARNING: rh can be referenced from other CPUs from now on. */ | ||
73 | 66 | ||
74 | TS_RELEASE_END; | 67 | TS_RELEASE_END; |
68 | } | ||
75 | 69 | ||
76 | VTRACE("on_release_timer(0x%p) ends.\n", timer); | 70 | #ifdef CONFIG_MERGE_TIMERS |
77 | 71 | static void on_release(struct rt_event *e) | |
78 | return HRTIMER_NORESTART; | 72 | { |
73 | do_release(container_of(e, struct release_heap, event)); | ||
74 | } | ||
75 | #else | ||
76 | static enum hrtimer_restart on_release(struct hrtimer *timer) | ||
77 | { | ||
78 | do_release(container_of(timer, struct release_heap, timer)); | ||
79 | return HRTIMER_NORESTART; | ||
79 | } | 80 | } |
81 | #endif | ||
80 | 82 | ||
81 | /* allocated in litmus.c */ | 83 | /* allocated in litmus.c */ |
82 | struct kmem_cache * release_heap_cache; | 84 | struct kmem_cache * release_heap_cache; |
@@ -84,11 +86,16 @@ struct kmem_cache * release_heap_cache; | |||
84 | struct release_heap* release_heap_alloc(int gfp_flags) | 86 | struct release_heap* release_heap_alloc(int gfp_flags) |
85 | { | 87 | { |
86 | struct release_heap* rh; | 88 | struct release_heap* rh; |
87 | rh= kmem_cache_alloc(release_heap_cache, gfp_flags); | 89 | rh = kmem_cache_alloc(release_heap_cache, gfp_flags); |
88 | if (rh) { | 90 | if (rh) { |
91 | #ifdef CONFIG_MERGE_TIMERS | ||
92 | init_event(&rh->event, 0, on_release, | ||
93 | event_list_alloc(GFP_ATOMIC)); | ||
94 | #else | ||
89 | /* initialize timer */ | 95 | /* initialize timer */ |
90 | hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 96 | hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
91 | rh->timer.function = on_release_timer; | 97 | rh->timer.function = on_release; |
98 | #endif | ||
92 | } | 99 | } |
93 | return rh; | 100 | return rh; |
94 | } | 101 | } |
@@ -96,7 +103,12 @@ struct release_heap* release_heap_alloc(int gfp_flags) | |||
96 | void release_heap_free(struct release_heap* rh) | 103 | void release_heap_free(struct release_heap* rh) |
97 | { | 104 | { |
98 | /* make sure timer is no longer in use */ | 105 | /* make sure timer is no longer in use */ |
106 | #ifdef CONFIG_MERGE_TIMERS | ||
107 | if (rh->dom) | ||
108 | cancel_event(&rh->event); | ||
109 | #else | ||
99 | hrtimer_cancel(&rh->timer); | 110 | hrtimer_cancel(&rh->timer); |
111 | #endif | ||
100 | kmem_cache_free(release_heap_cache, rh); | 112 | kmem_cache_free(release_heap_cache, rh); |
101 | } | 113 | } |
102 | 114 | ||
@@ -145,13 +157,17 @@ static struct release_heap* get_release_heap(rt_domain_t *rt, | |||
145 | return heap; | 157 | return heap; |
146 | } | 158 | } |
147 | 159 | ||
148 | static void reinit_release_heap(struct task_struct* t) | 160 | static void reinit_release_heap(rt_domain_t *rt, struct task_struct* t) |
149 | { | 161 | { |
150 | struct release_heap* rh; | 162 | struct release_heap* rh; |
151 | 163 | ||
152 | /* use pre-allocated release heap */ | 164 | /* use pre-allocated release heap */ |
153 | rh = tsk_rt(t)->rel_heap; | 165 | rh = tsk_rt(t)->rel_heap; |
154 | 166 | ||
167 | #ifdef CONFIG_MERGE_TIMERS | ||
168 | rh->event.prio = rt->prio; | ||
169 | cancel_event(&rh->event); | ||
170 | #else | ||
155 | /* Make sure it is safe to use. The timer callback could still | 171 | /* Make sure it is safe to use. The timer callback could still |
156 | * be executing on another CPU; hrtimer_cancel() will wait | 172 | * be executing on another CPU; hrtimer_cancel() will wait |
157 | * until the timer callback has completed. However, under no | 173 | * until the timer callback has completed. However, under no |
@@ -163,13 +179,47 @@ static void reinit_release_heap(struct task_struct* t) | |||
163 | */ | 179 | */ |
164 | BUG_ON(hrtimer_cancel(&rh->timer)); | 180 | BUG_ON(hrtimer_cancel(&rh->timer)); |
165 | 181 | ||
182 | #ifdef CONFIG_RELEASE_MASTER | ||
183 | atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | ||
184 | #endif | ||
185 | #endif | ||
166 | /* initialize */ | 186 | /* initialize */ |
167 | bheap_init(&rh->heap); | 187 | bheap_init(&rh->heap); |
188 | |||
189 | } | ||
190 | |||
191 | static void arm_release_timer(struct release_heap *rh) | ||
192 | { | ||
193 | #ifdef CONFIG_MERGE_TIMERS | ||
194 | add_event(rh->dom->event_group, &rh->event, rh->release_time); | ||
195 | #else | ||
196 | VTRACE("arming timer 0x%p\n", &rh->timer); | ||
197 | /* we cannot arm the timer using hrtimer_start() | ||
198 | * as it may deadlock on rq->lock | ||
199 | * PINNED mode is ok on both local and remote CPU | ||
200 | */ | ||
201 | |||
168 | #ifdef CONFIG_RELEASE_MASTER | 202 | #ifdef CONFIG_RELEASE_MASTER |
169 | atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | 203 | if (rt->release_master == NO_CPU && |
204 | target_cpu == NO_CPU) | ||
205 | #endif | ||
206 | __hrtimer_start_range_ns(&rh->timer, | ||
207 | ns_to_ktime(rh->release_time), | ||
208 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
209 | #ifdef CONFIG_RELEASE_MASTER | ||
210 | else | ||
211 | hrtimer_start_on(/* target_cpu overrides release master */ | ||
212 | (target_cpu != NO_CPU ? | ||
213 | target_cpu : rt->release_master), | ||
214 | &rh->info, &rh->timer, | ||
215 | ns_to_ktime(rh->release_time), | ||
216 | HRTIMER_MODE_ABS_PINNED); | ||
217 | #endif | ||
170 | #endif | 218 | #endif |
171 | } | 219 | } |
172 | /* arm_release_timer() - start local release timer or trigger | 220 | |
221 | |||
222 | /* setup_release() - start local release timer or trigger | ||
173 | * remote timer (pull timer) | 223 | * remote timer (pull timer) |
174 | * | 224 | * |
175 | * Called by add_release() with: | 225 | * Called by add_release() with: |
@@ -177,10 +227,10 @@ static void reinit_release_heap(struct task_struct* t) | |||
177 | * - IRQ disabled | 227 | * - IRQ disabled |
178 | */ | 228 | */ |
179 | #ifdef CONFIG_RELEASE_MASTER | 229 | #ifdef CONFIG_RELEASE_MASTER |
180 | #define arm_release_timer(t) arm_release_timer_on((t), NO_CPU) | 230 | #define setup_release(t) setup_release_on((t), NO_CPU) |
181 | static void arm_release_timer_on(rt_domain_t *_rt , int target_cpu) | 231 | static void setup_release_on(rt_domain_t *_rt , int target_cpu) |
182 | #else | 232 | #else |
183 | static void arm_release_timer(rt_domain_t *_rt) | 233 | static void setup_release(rt_domain_t *_rt) |
184 | #endif | 234 | #endif |
185 | { | 235 | { |
186 | rt_domain_t *rt = _rt; | 236 | rt_domain_t *rt = _rt; |
@@ -189,14 +239,14 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
189 | struct task_struct* t; | 239 | struct task_struct* t; |
190 | struct release_heap* rh; | 240 | struct release_heap* rh; |
191 | 241 | ||
192 | VTRACE("arm_release_timer() at %llu\n", litmus_clock()); | 242 | VTRACE("setup_release() at %llu\n", litmus_clock()); |
193 | list_replace_init(&rt->tobe_released, &list); | 243 | list_replace_init(&rt->tobe_released, &list); |
194 | 244 | ||
195 | list_for_each_safe(pos, safe, &list) { | 245 | list_for_each_safe(pos, safe, &list) { |
196 | /* pick task of work list */ | 246 | /* pick task of work list */ |
197 | t = list_entry(pos, struct task_struct, rt_param.list); | 247 | t = list_entry(pos, struct task_struct, rt_param.list); |
198 | sched_trace_task_release(t); | 248 | sched_trace_task_release(t); |
199 | list_del(pos); | 249 | list_del_init(pos); |
200 | 250 | ||
201 | /* put into release heap while holding release_lock */ | 251 | /* put into release heap while holding release_lock */ |
202 | raw_spin_lock(&rt->release_lock); | 252 | raw_spin_lock(&rt->release_lock); |
@@ -209,7 +259,7 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
209 | VTRACE_TASK(t, "Dropped release_lock 0x%p\n", | 259 | VTRACE_TASK(t, "Dropped release_lock 0x%p\n", |
210 | &rt->release_lock); | 260 | &rt->release_lock); |
211 | 261 | ||
212 | reinit_release_heap(t); | 262 | reinit_release_heap(rt, t); |
213 | VTRACE_TASK(t, "release_heap ready\n"); | 263 | VTRACE_TASK(t, "release_heap ready\n"); |
214 | 264 | ||
215 | raw_spin_lock(&rt->release_lock); | 265 | raw_spin_lock(&rt->release_lock); |
@@ -219,7 +269,7 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
219 | rh = get_release_heap(rt, t, 1); | 269 | rh = get_release_heap(rt, t, 1); |
220 | } | 270 | } |
221 | bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); | 271 | bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); |
222 | VTRACE_TASK(t, "arm_release_timer(): added to release heap\n"); | 272 | VTRACE_TASK(t, "setup_release(): added to release heap\n"); |
223 | 273 | ||
224 | raw_spin_unlock(&rt->release_lock); | 274 | raw_spin_unlock(&rt->release_lock); |
225 | VTRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); | 275 | VTRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); |
@@ -228,31 +278,9 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
228 | * owner do the arming (which is the "first" task to reference | 278 | * owner do the arming (which is the "first" task to reference |
229 | * this release_heap anyway). | 279 | * this release_heap anyway). |
230 | */ | 280 | */ |
231 | if (rh == tsk_rt(t)->rel_heap) { | 281 | if (rh == tsk_rt(t)->rel_heap) |
232 | VTRACE_TASK(t, "arming timer 0x%p\n", &rh->timer); | 282 | arm_release_timer(rh); |
233 | /* we cannot arm the timer using hrtimer_start() | 283 | else |
234 | * as it may deadlock on rq->lock | ||
235 | * | ||
236 | * PINNED mode is ok on both local and remote CPU | ||
237 | */ | ||
238 | #ifdef CONFIG_RELEASE_MASTER | ||
239 | if (rt->release_master == NO_CPU && | ||
240 | target_cpu == NO_CPU) | ||
241 | #endif | ||
242 | __hrtimer_start_range_ns(&rh->timer, | ||
243 | ns_to_ktime(rh->release_time), | ||
244 | 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
245 | #ifdef CONFIG_RELEASE_MASTER | ||
246 | else | ||
247 | hrtimer_start_on( | ||
248 | /* target_cpu overrides release master */ | ||
249 | (target_cpu != NO_CPU ? | ||
250 | target_cpu : rt->release_master), | ||
251 | &rh->info, &rh->timer, | ||
252 | ns_to_ktime(rh->release_time), | ||
253 | HRTIMER_MODE_ABS_PINNED); | ||
254 | #endif | ||
255 | } else | ||
256 | VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); | 284 | VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); |
257 | } | 285 | } |
258 | } | 286 | } |
@@ -260,8 +288,7 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
260 | void rt_domain_init(rt_domain_t *rt, | 288 | void rt_domain_init(rt_domain_t *rt, |
261 | bheap_prio_t order, | 289 | bheap_prio_t order, |
262 | check_resched_needed_t check, | 290 | check_resched_needed_t check, |
263 | release_jobs_t release | 291 | release_jobs_t release) |
264 | ) | ||
265 | { | 292 | { |
266 | int i; | 293 | int i; |
267 | 294 | ||
@@ -273,7 +300,7 @@ void rt_domain_init(rt_domain_t *rt, | |||
273 | if (!order) | 300 | if (!order) |
274 | order = dummy_order; | 301 | order = dummy_order; |
275 | 302 | ||
276 | #ifdef CONFIG_RELEASE_MASTER | 303 | #if defined(CONFIG_RELEASE_MASTER) && !defined(CONFIG_MERGE_TIMERS) |
277 | rt->release_master = NO_CPU; | 304 | rt->release_master = NO_CPU; |
278 | #endif | 305 | #endif |
279 | 306 | ||
@@ -329,7 +356,7 @@ void __add_release_on(rt_domain_t* rt, struct task_struct *task, | |||
329 | /* start release timer */ | 356 | /* start release timer */ |
330 | TS_SCHED2_START(task); | 357 | TS_SCHED2_START(task); |
331 | 358 | ||
332 | arm_release_timer_on(rt, target_cpu); | 359 | setup_release_on(rt, target_cpu); |
333 | 360 | ||
334 | TS_SCHED2_END(task); | 361 | TS_SCHED2_END(task); |
335 | } | 362 | } |
@@ -347,7 +374,7 @@ void __add_release(rt_domain_t* rt, struct task_struct *task) | |||
347 | /* start release timer */ | 374 | /* start release timer */ |
348 | TS_SCHED2_START(task); | 375 | TS_SCHED2_START(task); |
349 | 376 | ||
350 | arm_release_timer(rt); | 377 | setup_release(rt); |
351 | 378 | ||
352 | TS_SCHED2_END(task); | 379 | TS_SCHED2_END(task); |
353 | } | 380 | } |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 577e7d36faf5..30898246ea38 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <litmus/sched_trace.h> | 22 | #include <litmus/sched_trace.h> |
23 | #include <litmus/domain.h> | 23 | #include <litmus/domain.h> |
24 | #include <litmus/bheap.h> | 24 | #include <litmus/bheap.h> |
25 | #include <litmus/event_group.h> | ||
25 | 26 | ||
26 | #include <litmus/sched_mc.h> | 27 | #include <litmus/sched_mc.h> |
27 | 28 | ||
@@ -34,85 +35,84 @@ | |||
34 | * @timer For ghost task budget enforcement | 35 | * @timer For ghost task budget enforcement |
35 | * @node Used to sort crit_entries by preemptability in global domains | 36 | * @node Used to sort crit_entries by preemptability in global domains |
36 | */ | 37 | */ |
37 | typedef struct { | 38 | struct crit_entry { |
38 | enum crit_level level; | 39 | enum crit_level level; |
39 | struct task_struct* linked; | 40 | struct task_struct* linked; |
40 | domain_t* domain; | 41 | struct domain* domain; |
41 | int usable; | 42 | int usable; |
43 | #ifdef CONFIG_MERGE_TIMERS | ||
44 | struct rt_event event; | ||
45 | #else | ||
42 | struct hrtimer timer; | 46 | struct hrtimer timer; |
47 | #endif | ||
43 | struct bheap_node* node; | 48 | struct bheap_node* node; |
44 | atomic_t dirty; | 49 | }; |
45 | } crit_entry_t; | ||
46 | 50 | ||
47 | /** | 51 | /** |
48 | * cpu_entry_t - State of a CPU for the entire MC system | 52 | * struct cpu_entry - State of a CPU for the entire MC system |
49 | * @cpu CPU id | 53 | * @cpu CPU id |
50 | * @scheduled Task that is physically running | 54 | * @scheduled Task that is physically running |
51 | * @linked Task that should be running / is logically running | 55 | * @linked Task that should be running / is logically running |
52 | * @lock For serialization | 56 | * @lock For serialization |
53 | * @crit_entries Array of CPU state per criticality level | 57 | * @crit_entries Array of CPU state per criticality level |
54 | */ | 58 | */ |
55 | typedef struct { | 59 | struct cpu_entry { |
56 | int cpu; | 60 | int cpu; |
57 | struct task_struct* scheduled; | 61 | struct task_struct* scheduled; |
58 | struct task_struct* linked; | 62 | struct task_struct* linked; |
59 | raw_spinlock_t lock; | 63 | raw_spinlock_t lock; |
60 | crit_entry_t crit_entries[NUM_CRIT_LEVELS]; | 64 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; |
61 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 65 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
62 | struct list_head redir; | 66 | struct list_head redir; |
63 | raw_spinlock_t redir_lock; | 67 | raw_spinlock_t redir_lock; |
64 | #endif | 68 | #endif |
65 | } cpu_entry_t; | 69 | #ifdef CONFIG_MERGE_TIMERS |
70 | struct event_group* event_group; | ||
71 | #endif | ||
72 | }; | ||
66 | 73 | ||
67 | /** | 74 | /** |
68 | * domain_data_t - Wrap domains with related CPU state | 75 | * struct domain_data - Wrap domains with related CPU state |
69 | * @domain A domain for a criticality level | 76 | * @domain A domain for a criticality level |
70 | * @heap The preemptable heap of crit entries (for global domains) | 77 | * @heap The preemptable heap of crit entries (for global domains) |
71 | * @crit_entry The crit entry for this domain (for partitioned domains) | 78 | * @crit_entry The crit entry for this domain (for partitioned domains) |
72 | */ | 79 | */ |
73 | typedef struct { | 80 | struct domain_data { |
74 | domain_t domain; | 81 | struct domain domain; |
75 | struct bheap* heap; | 82 | struct bheap* heap; |
76 | crit_entry_t* crit_entry; | 83 | struct crit_entry* crit_entry; |
77 | } domain_data_t; | 84 | }; |
78 | 85 | ||
79 | static cpu_entry_t* cpus[NR_CPUS]; | 86 | DEFINE_PER_CPU(struct cpu_entry, cpus); |
80 | #ifdef CONFIG_RELEASE_MASTER | 87 | #ifdef CONFIG_RELEASE_MASTER |
81 | static int interrupt_cpu; | 88 | static int interrupt_cpu; |
82 | #endif | 89 | #endif |
90 | #ifdef CONFIG_MERGE_TIMERS | ||
91 | static struct event_group* global_group; | ||
92 | #endif | ||
83 | 93 | ||
84 | #define domain_data(dom) (container_of(dom, domain_data_t, domain)) | 94 | #define domain_data(dom) (container_of(dom, struct domain_data, domain)) |
85 | #define is_global(dom) (domain_data(dom)->heap) | 95 | #define is_global(dom) (domain_data(dom)->heap) |
86 | #define is_global_task(t) (is_global(get_task_domain(t))) | 96 | #define is_global_task(t) (is_global(get_task_domain(t))) |
87 | #define is_in_list(t) (tsk_rt(t)->list.next != tsk_rt(t)->list) | ||
88 | #define can_requeue(t) \ | 97 | #define can_requeue(t) \ |
89 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU) | 98 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU) |
90 | #define entry_level(e) \ | 99 | #define entry_level(e) \ |
91 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) | 100 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) |
92 | #define crit_cpu(ce) \ | 101 | #define crit_cpu(ce) \ |
93 | (container_of((void*)((ce) - (ce)->level), cpu_entry_t, crit_entries)) | 102 | (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) |
94 | /* Useful debug macros */ | ||
95 | #define TS "(%s/%d:%d:%s)" | ||
96 | #define TA(t) (t) ? (is_ghost(t)) ? "ghost" : t->comm : "NULL", (t) ? t->pid : 1, \ | ||
97 | (t) ? t->rt_param.job_params.job_no : 1, \ | ||
98 | (t && get_task_domain(t)) ? get_task_domain(t)->name : "" | ||
99 | #define TRACE_ENTRY(e, fmt, args...) \ | 103 | #define TRACE_ENTRY(e, fmt, args...) \ |
100 | TRACE("P%d, linked=" TS " " fmt "\n", \ | 104 | TRACE("P%d, linked=" TS " " fmt "\n", e->cpu, TA(e->linked), ##args) |
101 | e->cpu, TA(e->linked), ##args) | ||
102 | #define TRACE_CRIT_ENTRY(ce, fmt, args...) \ | 105 | #define TRACE_CRIT_ENTRY(ce, fmt, args...) \ |
103 | TRACE("%s P%d, linked=" TS " " fmt "\n", \ | 106 | TRACE("%s P%d, linked=" TS " " fmt "\n", \ |
104 | (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args) | 107 | (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args) |
105 | #undef TRACE_TASK | ||
106 | #define TRACE_TASK(t, fmt, args...) \ | ||
107 | TRACE(TS " " fmt "\n", TA(t), ##args) | ||
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Sort CPUs within a global domain by the domain's priority function. | 110 | * Sort CPUs within a global domain by the domain's priority function. |
111 | */ | 111 | */ |
112 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | 112 | static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) |
113 | { | 113 | { |
114 | domain_t *domain; | 114 | struct domain *domain; |
115 | crit_entry_t *first, *second; | 115 | struct crit_entry *first, *second; |
116 | struct task_struct *first_link, *second_link; | 116 | struct task_struct *first_link, *second_link; |
117 | 117 | ||
118 | first = a->value; | 118 | first = a->value; |
@@ -134,7 +134,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) | |||
134 | * Return true if the domain has a higher priority ready task. The curr | 134 | * Return true if the domain has a higher priority ready task. The curr |
135 | * task must belong to the domain. | 135 | * task must belong to the domain. |
136 | */ | 136 | */ |
137 | static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) | 137 | static int mc_preempt_needed(struct domain *dom, struct task_struct* curr) |
138 | { | 138 | { |
139 | struct task_struct *next = dom->peek_ready(dom); | 139 | struct task_struct *next = dom->peek_ready(dom); |
140 | if (!next || !curr) { | 140 | if (!next || !curr) { |
@@ -149,15 +149,45 @@ static noinline int mc_preempt_needed(domain_t *dom, struct task_struct* curr) | |||
149 | * Return next CPU which should preempted or NULL if the domain has no | 149 | * Return next CPU which should preempted or NULL if the domain has no |
150 | * preemptable CPUs. | 150 | * preemptable CPUs. |
151 | */ | 151 | */ |
152 | static inline crit_entry_t* lowest_prio_cpu(domain_t *dom) | 152 | static inline struct crit_entry* lowest_prio_cpu(struct domain *dom) |
153 | { | 153 | { |
154 | struct bheap *heap = domain_data(dom)->heap; | 154 | struct bheap *heap = domain_data(dom)->heap; |
155 | struct bheap_node* hn = bheap_peek(cpu_lower_prio, heap); | 155 | struct bheap_node* hn = bheap_peek(cpu_lower_prio, heap); |
156 | return (hn) ? hn->value : NULL; | 156 | return (hn) ? hn->value : NULL; |
157 | } | 157 | } |
158 | 158 | ||
159 | /** | 159 | /* |
160 | * update_ghost_time() - Time accounting for ghost tasks. | 160 | * Cancel ghost timer. |
161 | */ | ||
162 | static inline void cancel_ghost(struct crit_entry *ce) | ||
163 | { | ||
164 | #ifdef CONFIG_MERGE_TIMERS | ||
165 | cancel_event(&ce->event); | ||
166 | #else | ||
167 | hrtimer_try_to_cancel(&ce->timer); | ||
168 | #endif | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Arm ghost timer. Will merge timers if the option is specified. | ||
173 | */ | ||
174 | static inline void arm_ghost(struct crit_entry *ce, lt_t fire) | ||
175 | { | ||
176 | #ifdef CONFIG_MERGE_TIMERS | ||
177 | struct event_group* group = (is_global(ce->domain)) ? | ||
178 | global_group : crit_cpu(ce)->event_group; | ||
179 | add_event(group, &ce->event, fire); | ||
180 | #else | ||
181 | __hrtimer_start_range_ns(&ce->timer, | ||
182 | ns_to_ktime(when_to_fire), | ||
183 | 0 /* delta */, | ||
184 | HRTIMER_MODE_ABS_PINNED, | ||
185 | 0 /* no wakeup */); | ||
186 | #endif | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Time accounting for ghost tasks. | ||
161 | * Must be called before a decision is made involving the task's budget. | 191 | * Must be called before a decision is made involving the task's budget. |
162 | */ | 192 | */ |
163 | static void update_ghost_time(struct task_struct *p) | 193 | static void update_ghost_time(struct task_struct *p) |
@@ -167,14 +197,14 @@ static void update_ghost_time(struct task_struct *p) | |||
167 | BUG_ON(!is_ghost(p)); | 197 | BUG_ON(!is_ghost(p)); |
168 | if (unlikely ((s64)delta < 0)) { | 198 | if (unlikely ((s64)delta < 0)) { |
169 | delta = 0; | 199 | delta = 0; |
170 | TRACE_TASK(p, "WARNING: negative time delta"); | 200 | TRACE_MC_TASK(p, "WARNING: negative time delta"); |
171 | } | 201 | } |
172 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { | 202 | if (tsk_mc_data(p)->mc_job.ghost_budget <= delta) { |
173 | TRACE_TASK(p, "Ghost job could have ended"); | 203 | TRACE_MC_TASK(p, "Ghost job could have ended"); |
174 | tsk_mc_data(p)->mc_job.ghost_budget = 0; | 204 | tsk_mc_data(p)->mc_job.ghost_budget = 0; |
175 | p->se.exec_start = clock; | 205 | p->se.exec_start = clock; |
176 | } else { | 206 | } else { |
177 | TRACE_TASK(p, "Ghost job updated, but didn't finish"); | 207 | TRACE_MC_TASK(p, "Ghost job updated, but didn't finish"); |
178 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; | 208 | tsk_mc_data(p)->mc_job.ghost_budget -= delta; |
179 | p->se.exec_start = clock; | 209 | p->se.exec_start = clock; |
180 | } | 210 | } |
@@ -184,7 +214,7 @@ static void update_ghost_time(struct task_struct *p) | |||
184 | * link_task_to_crit() - Logically run a task at a criticality level. | 214 | * link_task_to_crit() - Logically run a task at a criticality level. |
185 | * Caller must hold @ce's domain's lock. | 215 | * Caller must hold @ce's domain's lock. |
186 | */ | 216 | */ |
187 | static void link_task_to_crit(crit_entry_t *ce, | 217 | static void link_task_to_crit(struct crit_entry *ce, |
188 | struct task_struct *task) | 218 | struct task_struct *task) |
189 | { | 219 | { |
190 | lt_t when_to_fire; | 220 | lt_t when_to_fire; |
@@ -198,10 +228,10 @@ static void link_task_to_crit(crit_entry_t *ce, | |||
198 | 228 | ||
199 | /* Unlink last task */ | 229 | /* Unlink last task */ |
200 | if (ce->linked) { | 230 | if (ce->linked) { |
201 | TRACE_TASK(ce->linked, "Unlinking"); | 231 | TRACE_MC_TASK(ce->linked, "Unlinking"); |
202 | ce->linked->rt_param.linked_on = NO_CPU; | 232 | ce->linked->rt_param.linked_on = NO_CPU; |
203 | if (is_ghost(ce->linked)) { | 233 | if (is_ghost(ce->linked)) { |
204 | hrtimer_try_to_cancel(&ce->timer); | 234 | cancel_ghost(ce); |
205 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget > 0) { | 235 | if (tsk_mc_data(ce->linked)->mc_job.ghost_budget > 0) { |
206 | /* Job isn't finished, so do accounting */ | 236 | /* Job isn't finished, so do accounting */ |
207 | update_ghost_time(ce->linked); | 237 | update_ghost_time(ce->linked); |
@@ -218,11 +248,7 @@ static void link_task_to_crit(crit_entry_t *ce, | |||
218 | task->se.exec_start = litmus_clock(); | 248 | task->se.exec_start = litmus_clock(); |
219 | when_to_fire = litmus_clock() + | 249 | when_to_fire = litmus_clock() + |
220 | tsk_mc_data(task)->mc_job.ghost_budget; | 250 | tsk_mc_data(task)->mc_job.ghost_budget; |
221 | __hrtimer_start_range_ns(&ce->timer, | 251 | arm_ghost(ce, when_to_fire); |
222 | ns_to_ktime(when_to_fire), | ||
223 | 0 /* delta */, | ||
224 | HRTIMER_MODE_ABS_PINNED, | ||
225 | 0 /* no wakeup */); | ||
226 | } | 252 | } |
227 | } | 253 | } |
228 | 254 | ||
@@ -234,17 +260,18 @@ static void link_task_to_crit(crit_entry_t *ce, | |||
234 | } | 260 | } |
235 | } | 261 | } |
236 | 262 | ||
237 | static void check_for_preempt(domain_t*); | 263 | static void check_for_preempt(struct domain*); |
238 | /** | 264 | /** |
239 | * job_arrival() - Called when a task re-enters the system. | 265 | * job_arrival() - Called when a task re-enters the system. |
240 | * Caller must hold no locks. | 266 | * Caller must hold no locks. |
241 | */ | 267 | */ |
242 | static void job_arrival(struct task_struct *task) | 268 | static void job_arrival(struct task_struct *task) |
243 | { | 269 | { |
244 | domain_t *dom = get_task_domain(task); | 270 | struct domain *dom = get_task_domain(task); |
245 | 271 | ||
246 | TRACE_TASK(task, "Job arriving"); | 272 | TRACE_MC_TASK(task, "Job arriving"); |
247 | BUG_ON(!task); | 273 | BUG_ON(!task); |
274 | |||
248 | if (can_requeue(task)) { | 275 | if (can_requeue(task)) { |
249 | raw_spin_lock(dom->lock); | 276 | raw_spin_lock(dom->lock); |
250 | dom->requeue(dom, task); | 277 | dom->requeue(dom, task); |
@@ -257,7 +284,7 @@ static void job_arrival(struct task_struct *task) | |||
257 | * causing the system to crash when the task is scheduled | 284 | * causing the system to crash when the task is scheduled |
258 | * in two places simultaneously. | 285 | * in two places simultaneously. |
259 | */ | 286 | */ |
260 | TRACE_TASK(task, "Delayed arrival of scheduled task"); | 287 | TRACE_MC_TASK(task, "Delayed arrival of scheduled task"); |
261 | } | 288 | } |
262 | } | 289 | } |
263 | 290 | ||
@@ -267,7 +294,7 @@ static void job_arrival(struct task_struct *task) | |||
267 | */ | 294 | */ |
268 | static void low_prio_arrival(struct task_struct *task) | 295 | static void low_prio_arrival(struct task_struct *task) |
269 | { | 296 | { |
270 | cpu_entry_t *entry; | 297 | struct cpu_entry *entry; |
271 | 298 | ||
272 | /* Race conditions! */ | 299 | /* Race conditions! */ |
273 | if (!can_requeue(task)) return; | 300 | if (!can_requeue(task)) return; |
@@ -278,9 +305,9 @@ static void low_prio_arrival(struct task_struct *task) | |||
278 | goto arrive; | 305 | goto arrive; |
279 | #endif | 306 | #endif |
280 | if (smp_processor_id() != interrupt_cpu) { | 307 | if (smp_processor_id() != interrupt_cpu) { |
281 | entry = cpus[smp_processor_id()]; | 308 | entry = &__get_cpu_var(cpus); |
282 | raw_spin_lock(&entry->redir_lock); | 309 | raw_spin_lock(&entry->redir_lock); |
283 | TRACE_TASK(task, "Adding to redirect queue"); | 310 | TRACE_MC_TASK(task, "Adding to redirect queue"); |
284 | list_add(&tsk_rt(task)->list, &entry->redir); | 311 | list_add(&tsk_rt(task)->list, &entry->redir); |
285 | raw_spin_unlock(&entry->redir_lock); | 312 | raw_spin_unlock(&entry->redir_lock); |
286 | litmus_reschedule(interrupt_cpu); | 313 | litmus_reschedule(interrupt_cpu); |
@@ -299,18 +326,18 @@ static void low_prio_arrival(struct task_struct *task) | |||
299 | static void fix_global_levels(void) | 326 | static void fix_global_levels(void) |
300 | { | 327 | { |
301 | int c; | 328 | int c; |
302 | cpu_entry_t *e; | 329 | struct cpu_entry *e; |
303 | struct list_head *pos, *safe; | 330 | struct list_head *pos, *safe; |
304 | struct task_struct *t; | 331 | struct task_struct *t; |
305 | 332 | ||
306 | TRACE("Fixing global levels\n"); | 333 | TRACE("Fixing global levels\n"); |
307 | for_each_online_cpu(c) { | 334 | for_each_online_cpu(c) { |
308 | e = cpus[c]; | 335 | e = &per_cpu(cpus, c); |
309 | raw_spin_lock(&e->redir_lock); | 336 | raw_spin_lock(&e->redir_lock); |
310 | list_for_each_safe(pos, safe, &e->redir) { | 337 | list_for_each_safe(pos, safe, &e->redir) { |
311 | t = list_entry(pos, struct task_struct, rt_param.list); | 338 | t = list_entry(pos, struct task_struct, rt_param.list); |
312 | TRACE_TASK(t, "Dequeued redirected job"); | 339 | BUG_ON(!t); |
313 | BUG_ON(is_queued(t)); | 340 | TRACE_MC_TASK(t, "Dequeued redirected job"); |
314 | list_del_init(pos); | 341 | list_del_init(pos); |
315 | job_arrival(t); | 342 | job_arrival(t); |
316 | } | 343 | } |
@@ -324,10 +351,10 @@ static void fix_global_levels(void) | |||
324 | * The task must first have been linked to one of the CPU's crit_entries. | 351 | * The task must first have been linked to one of the CPU's crit_entries. |
325 | * Caller must hold the entry lock. | 352 | * Caller must hold the entry lock. |
326 | */ | 353 | */ |
327 | static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) | 354 | static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) |
328 | { | 355 | { |
329 | int i = entry_level(entry); | 356 | int i = entry_level(entry); |
330 | TRACE_TASK(task, "Linking to P%d", entry->cpu); | 357 | TRACE_MC_TASK(task, "Linking to P%d", entry->cpu); |
331 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); | 358 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); |
332 | BUG_ON(task && is_ghost(task)); | 359 | BUG_ON(task && is_ghost(task)); |
333 | 360 | ||
@@ -348,10 +375,10 @@ static void link_task_to_cpu(cpu_entry_t *entry, struct task_struct *task) | |||
348 | * | 375 | * |
349 | * Caller must hold the lock for @dom and @ce's CPU lock. | 376 | * Caller must hold the lock for @dom and @ce's CPU lock. |
350 | */ | 377 | */ |
351 | static void preempt(domain_t *dom, crit_entry_t *ce) | 378 | static void preempt(struct domain *dom, struct crit_entry *ce) |
352 | { | 379 | { |
353 | struct task_struct *task = dom->take_ready(dom); | 380 | struct task_struct *task = dom->take_ready(dom); |
354 | cpu_entry_t *entry = crit_cpu(ce); | 381 | struct cpu_entry *entry = crit_cpu(ce); |
355 | 382 | ||
356 | BUG_ON(!task); | 383 | BUG_ON(!task); |
357 | TRACE_CRIT_ENTRY(ce, "Preempted by " TS, TA(task)); | 384 | TRACE_CRIT_ENTRY(ce, "Preempted by " TS, TA(task)); |
@@ -373,17 +400,25 @@ static void preempt(domain_t *dom, crit_entry_t *ce) | |||
373 | * This should be called after a new task has been linked to @entry. | 400 | * This should be called after a new task has been linked to @entry. |
374 | * The caller must hold the @entry->lock, but this method will release it. | 401 | * The caller must hold the @entry->lock, but this method will release it. |
375 | */ | 402 | */ |
376 | static void update_crit_levels(cpu_entry_t *entry) | 403 | static void update_crit_levels(struct cpu_entry *entry) |
377 | { | 404 | { |
378 | int i; | 405 | int i, global_preempted; |
379 | crit_entry_t *ce; | 406 | struct crit_entry *ce; |
380 | struct task_struct *tasks[NUM_CRIT_LEVELS]; | 407 | struct task_struct *readmit[NUM_CRIT_LEVELS]; |
381 | enum crit_level level = entry_level(entry); | 408 | enum crit_level level = entry_level(entry); |
382 | 409 | ||
383 | /* Remove lower priority tasks from the entry */ | 410 | /* Remove lower priority tasks from the entry */ |
384 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { | 411 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { |
385 | ce = &entry->crit_entries[i]; | 412 | ce = &entry->crit_entries[i]; |
386 | tasks[i] = ce->linked; | 413 | |
414 | global_preempted = entry->scheduled == ce->linked && | ||
415 | ce->linked && entry->linked && | ||
416 | !is_ghost(ce->linked) && is_global(ce->domain); | ||
417 | /* Do not readmit global tasks which are preempted! These can't | ||
418 | * ever be re-admitted until they are descheduled for reasons | ||
419 | * explained in job_arrival. | ||
420 | */ | ||
421 | readmit[i] = (!global_preempted) ? ce->linked : NULL; | ||
387 | ce->usable = 0; | 422 | ce->usable = 0; |
388 | if (ce->linked) | 423 | if (ce->linked) |
389 | link_task_to_crit(ce, NULL); | 424 | link_task_to_crit(ce, NULL); |
@@ -394,8 +429,8 @@ static void update_crit_levels(cpu_entry_t *entry) | |||
394 | /* Re-admit tasks to the system */ | 429 | /* Re-admit tasks to the system */ |
395 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { | 430 | for (i = level + 1; i < NUM_CRIT_LEVELS; i++) { |
396 | ce = &entry->crit_entries[i]; | 431 | ce = &entry->crit_entries[i]; |
397 | if (tasks[i]) | 432 | if (readmit[i]) |
398 | low_prio_arrival(tasks[i]); | 433 | low_prio_arrival(readmit[i]); |
399 | } | 434 | } |
400 | } | 435 | } |
401 | 436 | ||
@@ -405,11 +440,11 @@ static void update_crit_levels(cpu_entry_t *entry) | |||
405 | * Makes gigantic nasty assumption that there is 1 global criticality level, | 440 | * Makes gigantic nasty assumption that there is 1 global criticality level, |
406 | * and it is the last one in each list, so it doesn't call update_crit.. | 441 | * and it is the last one in each list, so it doesn't call update_crit.. |
407 | */ | 442 | */ |
408 | static void check_for_preempt(domain_t *dom) | 443 | static void check_for_preempt(struct domain *dom) |
409 | { | 444 | { |
410 | int preempted = 1; | 445 | int preempted = 1; |
411 | cpu_entry_t *entry; | 446 | struct cpu_entry *entry; |
412 | crit_entry_t *ce; | 447 | struct crit_entry *ce; |
413 | 448 | ||
414 | if (is_global(dom)) { | 449 | if (is_global(dom)) { |
415 | /* Loop until we find a non-preemptable CPU */ | 450 | /* Loop until we find a non-preemptable CPU */ |
@@ -443,17 +478,17 @@ static void check_for_preempt(domain_t *dom) | |||
443 | static void remove_from_all(struct task_struct* task) | 478 | static void remove_from_all(struct task_struct* task) |
444 | { | 479 | { |
445 | int update = 0; | 480 | int update = 0; |
446 | cpu_entry_t *entry; | 481 | struct cpu_entry *entry; |
447 | crit_entry_t *ce; | 482 | struct crit_entry *ce; |
448 | domain_t *dom = get_task_domain(task); | 483 | struct domain *dom = get_task_domain(task); |
449 | 484 | ||
450 | TRACE_TASK(task, "Removing from everything"); | 485 | TRACE_MC_TASK(task, "Removing from everything"); |
451 | BUG_ON(!task); | 486 | BUG_ON(!task); |
452 | 487 | ||
453 | raw_spin_lock(dom->lock); | 488 | raw_spin_lock(dom->lock); |
454 | 489 | ||
455 | if (task->rt_param.linked_on != NO_CPU) { | 490 | if (task->rt_param.linked_on != NO_CPU) { |
456 | entry = cpus[task->rt_param.linked_on]; | 491 | entry = &per_cpu(cpus, task->rt_param.linked_on); |
457 | raw_spin_lock(&entry->lock); | 492 | raw_spin_lock(&entry->lock); |
458 | 493 | ||
459 | /* Unlink only if task is still linked post lock */ | 494 | /* Unlink only if task is still linked post lock */ |
@@ -492,7 +527,7 @@ static void remove_from_all(struct task_struct* task) | |||
492 | */ | 527 | */ |
493 | static void job_completion(struct task_struct *task, int forced) | 528 | static void job_completion(struct task_struct *task, int forced) |
494 | { | 529 | { |
495 | TRACE_TASK(task, "Completed"); | 530 | TRACE_MC_TASK(task, "Completed"); |
496 | sched_trace_task_completion(task, forced); | 531 | sched_trace_task_completion(task, forced); |
497 | BUG_ON(!task); | 532 | BUG_ON(!task); |
498 | 533 | ||
@@ -525,11 +560,19 @@ static void job_completion(struct task_struct *task, int forced) | |||
525 | /** | 560 | /** |
526 | * mc_ghost_exhausted() - Complete logically running ghost task. | 561 | * mc_ghost_exhausted() - Complete logically running ghost task. |
527 | */ | 562 | */ |
563 | #ifdef CONFIG_MERGE_TIMERS | ||
564 | static void mc_ghost_exhausted(struct rt_event *e) | ||
565 | { | ||
566 | struct crit_entry *ce = container_of(e, struct crit_entry, event); | ||
567 | #else | ||
528 | static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | 568 | static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) |
529 | { | 569 | { |
570 | struct crit_entry *ce = container_of(timer, struct crit_entry, timer); | ||
571 | #endif | ||
572 | |||
530 | unsigned long flags; | 573 | unsigned long flags; |
531 | struct task_struct *tmp = NULL; | 574 | struct task_struct *tmp = NULL; |
532 | crit_entry_t *ce = container_of(timer, crit_entry_t, timer);; | 575 | |
533 | 576 | ||
534 | local_irq_save(flags); | 577 | local_irq_save(flags); |
535 | TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); | 578 | TRACE_CRIT_ENTRY(ce, "Ghost exhausted firing"); |
@@ -553,7 +596,9 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
553 | job_completion(tmp, 0); | 596 | job_completion(tmp, 0); |
554 | 597 | ||
555 | local_irq_restore(flags); | 598 | local_irq_restore(flags); |
599 | #ifndef CONFIG_MERGE_TIMERS | ||
556 | return HRTIMER_NORESTART; | 600 | return HRTIMER_NORESTART; |
601 | #endif | ||
557 | } | 602 | } |
558 | 603 | ||
559 | /** | 604 | /** |
@@ -563,10 +608,10 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
563 | { | 608 | { |
564 | unsigned long flags; | 609 | unsigned long flags; |
565 | struct task_struct *first = bheap_peek(rt->order, tasks)->value; | 610 | struct task_struct *first = bheap_peek(rt->order, tasks)->value; |
566 | domain_t *dom = get_task_domain(first); | 611 | struct domain *dom = get_task_domain(first); |
567 | 612 | ||
568 | raw_spin_lock_irqsave(dom->lock, flags); | 613 | raw_spin_lock_irqsave(dom->lock, flags); |
569 | TRACE_TASK(first, "Jobs released"); | 614 | TRACE_MC_TASK(first, "Jobs released"); |
570 | __merge_ready(rt, tasks); | 615 | __merge_ready(rt, tasks); |
571 | check_for_preempt(dom); | 616 | check_for_preempt(dom); |
572 | raw_spin_unlock_irqrestore(dom->lock, flags); | 617 | raw_spin_unlock_irqrestore(dom->lock, flags); |
@@ -579,7 +624,7 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
579 | static void mc_task_new(struct task_struct *t, int on_rq, int running) | 624 | static void mc_task_new(struct task_struct *t, int on_rq, int running) |
580 | { | 625 | { |
581 | unsigned long flags; | 626 | unsigned long flags; |
582 | cpu_entry_t* entry; | 627 | struct cpu_entry* entry; |
583 | enum crit_level level = tsk_mc_crit(t); | 628 | enum crit_level level = tsk_mc_crit(t); |
584 | 629 | ||
585 | local_irq_save(flags); | 630 | local_irq_save(flags); |
@@ -587,9 +632,9 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running) | |||
587 | 632 | ||
588 | /* Assign domain */ | 633 | /* Assign domain */ |
589 | if (level < CRIT_LEVEL_C) | 634 | if (level < CRIT_LEVEL_C) |
590 | entry = cpus[get_partition(t)]; | 635 | entry = &per_cpu(cpus, get_partition(t)); |
591 | else | 636 | else |
592 | entry = cpus[task_cpu(t)]; | 637 | entry = &per_cpu(cpus, task_cpu(t)); |
593 | t->rt_param._domain = entry->crit_entries[level].domain; | 638 | t->rt_param._domain = entry->crit_entries[level].domain; |
594 | 639 | ||
595 | /* Setup job params */ | 640 | /* Setup job params */ |
@@ -619,7 +664,7 @@ static void mc_task_wake_up(struct task_struct *task) | |||
619 | lt_t now = litmus_clock(); | 664 | lt_t now = litmus_clock(); |
620 | local_irq_save(flags); | 665 | local_irq_save(flags); |
621 | 666 | ||
622 | TRACE_TASK(task, "Wakes up"); | 667 | TRACE_MC_TASK(task, "Wakes up"); |
623 | if (is_tardy(task, now)) { | 668 | if (is_tardy(task, now)) { |
624 | /* Task missed its last release */ | 669 | /* Task missed its last release */ |
625 | release_at(task, now); | 670 | release_at(task, now); |
@@ -638,7 +683,7 @@ static void mc_task_block(struct task_struct *task) | |||
638 | { | 683 | { |
639 | unsigned long flags; | 684 | unsigned long flags; |
640 | local_irq_save(flags); | 685 | local_irq_save(flags); |
641 | TRACE_TASK(task, "Block at %llu", litmus_clock()); | 686 | TRACE_MC_TASK(task, "Block at %llu", litmus_clock()); |
642 | remove_from_all(task); | 687 | remove_from_all(task); |
643 | local_irq_restore(flags); | 688 | local_irq_restore(flags); |
644 | } | 689 | } |
@@ -651,11 +696,11 @@ static void mc_task_exit(struct task_struct *task) | |||
651 | unsigned long flags; | 696 | unsigned long flags; |
652 | local_irq_save(flags); | 697 | local_irq_save(flags); |
653 | BUG_ON(!is_realtime(task)); | 698 | BUG_ON(!is_realtime(task)); |
654 | TRACE_TASK(task, "RIP"); | 699 | TRACE_MC_TASK(task, "RIP"); |
655 | 700 | ||
656 | remove_from_all(task); | 701 | remove_from_all(task); |
657 | if (tsk_rt(task)->scheduled_on != NO_CPU) { | 702 | if (tsk_rt(task)->scheduled_on != NO_CPU) { |
658 | cpus[tsk_rt(task)->scheduled_on]->scheduled = NULL; | 703 | per_cpu(cpus, tsk_rt(task)->scheduled_on).scheduled = NULL; |
659 | tsk_rt(task)->scheduled_on = NO_CPU; | 704 | tsk_rt(task)->scheduled_on = NO_CPU; |
660 | } | 705 | } |
661 | 706 | ||
@@ -689,9 +734,9 @@ static long mc_admit_task(struct task_struct* task) | |||
689 | static struct task_struct* mc_schedule(struct task_struct * prev) | 734 | static struct task_struct* mc_schedule(struct task_struct * prev) |
690 | { | 735 | { |
691 | unsigned long flags; | 736 | unsigned long flags; |
692 | domain_t *dom; | 737 | struct domain *dom; |
693 | crit_entry_t *ce; | 738 | struct crit_entry *ce; |
694 | cpu_entry_t* entry = cpus[smp_processor_id()]; | 739 | struct cpu_entry* entry = &__get_cpu_var(cpus); |
695 | int i, out_of_time, sleep, preempt, exists, blocks, global, lower; | 740 | int i, out_of_time, sleep, preempt, exists, blocks, global, lower; |
696 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; | 741 | struct task_struct *dtask = NULL, *ready_task = NULL, *next = NULL; |
697 | 742 | ||
@@ -703,6 +748,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
703 | BUG_ON(is_realtime(prev) && !entry->scheduled); | 748 | BUG_ON(is_realtime(prev) && !entry->scheduled); |
704 | 749 | ||
705 | /* Determine state */ | 750 | /* Determine state */ |
751 | raw_spin_lock(&entry->lock); | ||
706 | exists = entry->scheduled != NULL; | 752 | exists = entry->scheduled != NULL; |
707 | blocks = exists && !is_running(entry->scheduled); | 753 | blocks = exists && !is_running(entry->scheduled); |
708 | out_of_time = exists && budget_enforced(entry->scheduled) && | 754 | out_of_time = exists && budget_enforced(entry->scheduled) && |
@@ -715,17 +761,20 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
715 | 761 | ||
716 | if (exists) { | 762 | if (exists) { |
717 | entry->scheduled->rt_param.scheduled_on = NO_CPU; | 763 | entry->scheduled->rt_param.scheduled_on = NO_CPU; |
718 | TRACE_TASK(prev, | 764 | TRACE_MC_TASK(prev, |
719 | "blocks:%d out_of_time:%d sleep:%d preempt:%d " | 765 | "blocks:%d out_of_time:%d sleep:%d preempt:%d " |
720 | "state:%d sig:%d global:%d", | 766 | "state:%d sig:%d global:%d", |
721 | blocks, out_of_time, sleep, preempt, | 767 | blocks, out_of_time, sleep, preempt, |
722 | prev->state, signal_pending(prev), global); | 768 | prev->state, signal_pending(prev), global); |
723 | } | 769 | } |
770 | raw_spin_unlock(&entry->lock); | ||
771 | |||
724 | 772 | ||
725 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 773 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
726 | if (smp_processor_id() == interrupt_cpu) | 774 | if (smp_processor_id() == interrupt_cpu) |
727 | fix_global_levels(); | 775 | fix_global_levels(); |
728 | #endif | 776 | #endif |
777 | |||
729 | /* If a task blocks we have no choice but to reschedule */ | 778 | /* If a task blocks we have no choice but to reschedule */ |
730 | if (blocks) | 779 | if (blocks) |
731 | remove_from_all(entry->scheduled); | 780 | remove_from_all(entry->scheduled); |
@@ -769,6 +818,7 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
769 | raw_spin_unlock(dom->lock); | 818 | raw_spin_unlock(dom->lock); |
770 | update_crit_levels(entry); | 819 | update_crit_levels(entry); |
771 | raw_spin_lock(&entry->lock); | 820 | raw_spin_lock(&entry->lock); |
821 | continue; | ||
772 | } | 822 | } |
773 | } | 823 | } |
774 | raw_spin_unlock(dom->lock); | 824 | raw_spin_unlock(dom->lock); |
@@ -784,22 +834,12 @@ static struct task_struct* mc_schedule(struct task_struct * prev) | |||
784 | raw_spin_unlock(&entry->lock); | 834 | raw_spin_unlock(&entry->lock); |
785 | local_irq_restore(flags); | 835 | local_irq_restore(flags); |
786 | if (next) | 836 | if (next) |
787 | TRACE_TASK(next, "Scheduled at %llu", litmus_clock()); | 837 | TRACE_MC_TASK(next, "Scheduled at %llu", litmus_clock()); |
788 | else if (exists && !next) | 838 | else if (exists && !next) |
789 | TRACE("Becomes idle at %llu\n", litmus_clock()); | 839 | TRACE("Becomes idle at %llu\n", litmus_clock()); |
790 | return next; | 840 | return next; |
791 | } | 841 | } |
792 | 842 | ||
793 | static long mc_activate_plugin(void) | ||
794 | { | ||
795 | #ifdef CONFIG_RELEASE_MASTER | ||
796 | interrupt_cpu = atomic_read(&release_master_cpu); | ||
797 | if (interrupt_cpu == NO_CPU) | ||
798 | interrupt_cpu = 0; | ||
799 | #endif | ||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | /* ************************************************************************** | 843 | /* ************************************************************************** |
804 | * Initialization | 844 | * Initialization |
805 | * ************************************************************************** */ | 845 | * ************************************************************************** */ |
@@ -807,23 +847,36 @@ static long mc_activate_plugin(void) | |||
807 | /* Initialize values here so that they are allocated with the module | 847 | /* Initialize values here so that they are allocated with the module |
808 | * and destroyed when the module is unloaded. | 848 | * and destroyed when the module is unloaded. |
809 | */ | 849 | */ |
810 | DEFINE_PER_CPU(cpu_entry_t, _mc_cpus); | 850 | |
811 | /* LVL-A */ | 851 | /* LVL-A */ |
812 | DEFINE_PER_CPU(domain_data_t, _mc_crit_a); | 852 | DEFINE_PER_CPU(struct domain_data, _mc_crit_a); |
813 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_a_rt); | 853 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_a_rt); |
814 | /* LVL-B */ | 854 | /* LVL-B */ |
815 | DEFINE_PER_CPU(domain_data_t, _mc_crit_b); | 855 | DEFINE_PER_CPU(struct domain_data, _mc_crit_b); |
816 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt); | 856 | DEFINE_PER_CPU(rt_domain_t, _mc_crit_b_rt); |
817 | /* LVL-C */ | 857 | /* LVL-C */ |
818 | static domain_data_t _mc_crit_c; | 858 | static struct domain_data _mc_crit_c; |
819 | static rt_domain_t _mc_crit_c_rt; | 859 | static rt_domain_t _mc_crit_c_rt; |
820 | struct bheap _mc_heap_c; | 860 | struct bheap _mc_heap_c; |
821 | struct bheap_node _mc_nodes_c[NR_CPUS]; | 861 | struct bheap_node _mc_nodes_c[NR_CPUS]; |
822 | 862 | ||
823 | /* | 863 | #ifdef CONFIG_MERGE_TIMERS |
824 | * XXX commented out because I think this was an obvious typo | 864 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER |
825 | */ | 865 | struct event_group _mc_group; |
826 | /* release_at)_ */ | 866 | #else |
867 | DEFINE_PER_CPU(struct event_group, _mc_groups); | ||
868 | #endif | ||
869 | #endif | ||
870 | |||
871 | static long mc_activate_plugin(void) | ||
872 | { | ||
873 | #ifdef CONFIG_RELEASE_MASTER | ||
874 | interrupt_cpu = atomic_read(&release_master_cpu); | ||
875 | if (interrupt_cpu == NO_CPU) | ||
876 | interrupt_cpu = 0; | ||
877 | #endif | ||
878 | return 0; | ||
879 | } | ||
827 | 880 | ||
828 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | 881 | static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { |
829 | .plugin_name = "MC", | 882 | .plugin_name = "MC", |
@@ -837,8 +890,8 @@ static struct sched_plugin mc_plugin __cacheline_aligned_in_smp = { | |||
837 | .activate_plugin = mc_activate_plugin, | 890 | .activate_plugin = mc_activate_plugin, |
838 | }; | 891 | }; |
839 | 892 | ||
840 | static void init_crit_entry(crit_entry_t *ce, enum crit_level level, | 893 | static void init_crit_entry(struct crit_entry *ce, enum crit_level level, |
841 | domain_data_t *dom_data, | 894 | struct domain_data *dom_data, |
842 | struct bheap_node *node) | 895 | struct bheap_node *node) |
843 | { | 896 | { |
844 | ce->level = level; | 897 | ce->level = level; |
@@ -846,12 +899,17 @@ static void init_crit_entry(crit_entry_t *ce, enum crit_level level, | |||
846 | ce->node = node; | 899 | ce->node = node; |
847 | ce->domain = &dom_data->domain; | 900 | ce->domain = &dom_data->domain; |
848 | ce->usable = 1; | 901 | ce->usable = 1; |
849 | atomic_set(&ce->dirty, 1); | 902 | #ifdef CONFIG_MERGE_TIMERS |
903 | init_event(&ce->event, level, mc_ghost_exhausted, | ||
904 | event_list_alloc(GFP_ATOMIC)); | ||
905 | #else | ||
850 | hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 906 | hrtimer_init(&ce->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
851 | ce->timer.function = mc_ghost_exhausted; | 907 | ce->timer.function = mc_ghost_exhausted; |
908 | #endif | ||
909 | |||
852 | } | 910 | } |
853 | 911 | ||
854 | static void init_local_domain(cpu_entry_t *entry, domain_data_t *dom_data, | 912 | static void init_local_domain(struct cpu_entry *entry, struct domain_data *dom_data, |
855 | enum crit_level level) | 913 | enum crit_level level) |
856 | { | 914 | { |
857 | dom_data->heap = NULL; | 915 | dom_data->heap = NULL; |
@@ -859,12 +917,12 @@ static void init_local_domain(cpu_entry_t *entry, domain_data_t *dom_data, | |||
859 | init_crit_entry(dom_data->crit_entry, level, dom_data, NULL); | 917 | init_crit_entry(dom_data->crit_entry, level, dom_data, NULL); |
860 | } | 918 | } |
861 | 919 | ||
862 | static void init_global_domain(domain_data_t *dom_data, enum crit_level level, | 920 | static void init_global_domain(struct domain_data *dom_data, enum crit_level level, |
863 | struct bheap *heap, struct bheap_node *nodes) | 921 | struct bheap *heap, struct bheap_node *nodes) |
864 | { | 922 | { |
865 | int cpu; | 923 | int cpu; |
866 | cpu_entry_t *entry; | 924 | struct cpu_entry *entry; |
867 | crit_entry_t *ce; | 925 | struct crit_entry *ce; |
868 | struct bheap_node *node; | 926 | struct bheap_node *node; |
869 | 927 | ||
870 | dom_data->crit_entry = NULL; | 928 | dom_data->crit_entry = NULL; |
@@ -872,7 +930,7 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level, | |||
872 | bheap_init(heap); | 930 | bheap_init(heap); |
873 | 931 | ||
874 | for_each_online_cpu(cpu) { | 932 | for_each_online_cpu(cpu) { |
875 | entry = cpus[cpu]; | 933 | entry = &per_cpu(cpus, cpu); |
876 | node = &nodes[cpu]; | 934 | node = &nodes[cpu]; |
877 | ce = &entry->crit_entries[level]; | 935 | ce = &entry->crit_entries[level]; |
878 | init_crit_entry(ce, level, dom_data, node); | 936 | init_crit_entry(ce, level, dom_data, node); |
@@ -881,40 +939,62 @@ static void init_global_domain(domain_data_t *dom_data, enum crit_level level, | |||
881 | } | 939 | } |
882 | } | 940 | } |
883 | 941 | ||
884 | static inline void init_edf_domain(domain_t *dom, rt_domain_t *rt) | 942 | static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, |
943 | int timer_cpu, int prio) | ||
885 | { | 944 | { |
886 | pd_domain_init(dom, rt, edf_ready_order, NULL, | 945 | pd_domain_init(dom, rt, edf_ready_order, NULL, |
887 | mc_release_jobs, mc_preempt_needed, | 946 | mc_release_jobs, mc_preempt_needed, |
888 | edf_higher_prio); | 947 | edf_higher_prio); |
948 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | ||
949 | #ifdef CONFIG_MERGE_TIMERS | ||
950 | rt->event_group = &_mc_group; | ||
951 | rt->prio = prio; | ||
952 | #else | ||
953 | rt->release_master = interrupt_cpu; | ||
954 | #endif | ||
955 | #elif CONFIG_MERGE_TIMERS | ||
956 | rt->event_group = &_mc_groups[timer_cpu]; | ||
957 | rt->prio = prio; | ||
958 | #endif | ||
889 | } | 959 | } |
890 | 960 | ||
891 | static int __init init_mc(void) | 961 | static int __init init_mc(void) |
892 | { | 962 | { |
893 | int cpu; | 963 | int cpu; |
894 | cpu_entry_t *entry; | 964 | struct cpu_entry *entry; |
965 | struct domain_data *dom_data; | ||
895 | rt_domain_t *rt; | 966 | rt_domain_t *rt; |
896 | domain_data_t *dom_data; | ||
897 | raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ | 967 | raw_spinlock_t *a_dom, *b_dom, *c_dom; /* For lock debugger */ |
898 | 968 | ||
899 | for_each_online_cpu(cpu) { | 969 | for_each_online_cpu(cpu) { |
900 | entry = &per_cpu(_mc_cpus, cpu); | 970 | entry = &per_cpu(cpus, cpu); |
901 | cpus[cpu] = entry; | ||
902 | 971 | ||
903 | /* CPU */ | 972 | /* CPU */ |
904 | entry->cpu = cpu; | 973 | entry->cpu = cpu; |
905 | entry->scheduled = NULL; | 974 | entry->scheduled = NULL; |
906 | entry->linked = NULL; | 975 | entry->linked = NULL; |
907 | raw_spin_lock_init(&entry->lock); | 976 | raw_spin_lock_init(&entry->lock); |
977 | |||
908 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 978 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
909 | raw_spin_lock_init(&entry->redir_lock); | 979 | raw_spin_lock_init(&entry->redir_lock); |
910 | INIT_LIST_HEAD(&entry->redir); | 980 | INIT_LIST_HEAD(&entry->redir); |
911 | #endif | 981 | #endif |
912 | 982 | ||
983 | #ifdef CONFIG_MERGE_TIMERS | ||
984 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | ||
985 | entry->event_group = &_mc_group; | ||
986 | #else | ||
987 | init_event_group(&_mc_groups[cpu], | ||
988 | CONFIG_MERGE_TIMERS_WINDOW, cpu); | ||
989 | entry->event_group = &_mc_groups[cpu]; | ||
990 | #endif | ||
991 | #endif | ||
992 | |||
913 | /* CRIT_LEVEL_A */ | 993 | /* CRIT_LEVEL_A */ |
914 | dom_data = &per_cpu(_mc_crit_a, cpu); | 994 | dom_data = &per_cpu(_mc_crit_a, cpu); |
915 | rt = &per_cpu(_mc_crit_a_rt, cpu); | 995 | rt = &per_cpu(_mc_crit_a_rt, cpu); |
916 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); | 996 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); |
917 | init_edf_domain(&dom_data->domain, rt); | 997 | init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_A); |
918 | a_dom = dom_data->domain.lock; | 998 | a_dom = dom_data->domain.lock; |
919 | raw_spin_lock_init(a_dom); | 999 | raw_spin_lock_init(a_dom); |
920 | dom_data->domain.name = "LVL-A"; | 1000 | dom_data->domain.name = "LVL-A"; |
@@ -923,16 +1003,25 @@ static int __init init_mc(void) | |||
923 | dom_data = &per_cpu(_mc_crit_b, cpu); | 1003 | dom_data = &per_cpu(_mc_crit_b, cpu); |
924 | rt = &per_cpu(_mc_crit_b_rt, cpu); | 1004 | rt = &per_cpu(_mc_crit_b_rt, cpu); |
925 | init_local_domain(entry, dom_data, CRIT_LEVEL_B); | 1005 | init_local_domain(entry, dom_data, CRIT_LEVEL_B); |
926 | init_edf_domain(&dom_data->domain, rt); | 1006 | init_edf_domain(&dom_data->domain, rt, cpu, CRIT_LEVEL_B); |
927 | b_dom = dom_data->domain.lock; | 1007 | b_dom = dom_data->domain.lock; |
928 | raw_spin_lock_init(b_dom); | 1008 | raw_spin_lock_init(b_dom); |
929 | dom_data->domain.name = "LVL-B"; | 1009 | dom_data->domain.name = "LVL-B"; |
930 | } | 1010 | } |
931 | 1011 | ||
1012 | #ifdef CONFIG_MERGE_TIMERS | ||
1013 | #ifdef CONFIG_PLUGIN_MC_RELEASE_MASTER | ||
1014 | init_event_group(&_mc_group, CONFIG_MERGE_TIMERS_WINDOW, interrupt_cpu); | ||
1015 | global_group = &_mc_group; | ||
1016 | #else | ||
1017 | global_group = &_mc_groups[0]; | ||
1018 | #endif | ||
1019 | #endif | ||
1020 | |||
932 | /* CRIT_LEVEL_C */ | 1021 | /* CRIT_LEVEL_C */ |
933 | init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, | 1022 | init_global_domain(&_mc_crit_c, CRIT_LEVEL_C, |
934 | &_mc_heap_c, _mc_nodes_c); | 1023 | &_mc_heap_c, _mc_nodes_c); |
935 | init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt); | 1024 | init_edf_domain(&_mc_crit_c.domain, &_mc_crit_c_rt, 0, CRIT_LEVEL_C); |
936 | c_dom = _mc_crit_c.domain.lock; | 1025 | c_dom = _mc_crit_c.domain.lock; |
937 | raw_spin_lock_init(c_dom); | 1026 | raw_spin_lock_init(c_dom); |
938 | _mc_crit_c.domain.name = "LVL-C"; | 1027 | _mc_crit_c.domain.name = "LVL-C"; |