diff options
-rw-r--r-- | include/linux/interrupt.h | 4 | ||||
-rw-r--r-- | include/litmus/litmus_softirq.h | 92 | ||||
-rw-r--r-- | include/litmus/preempt.h | 3 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 7 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 7 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | litmus/Kconfig | 6 | ||||
-rw-r--r-- | litmus/edf_common.c | 6 | ||||
-rw-r--r-- | litmus/litmus.c | 56 | ||||
-rw-r--r-- | litmus/litmus_softirq.c | 506 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 102 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 12 |
12 files changed, 644 insertions, 161 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a0384a4d1e6f..7a9c1857bf0d 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -459,6 +459,10 @@ struct tasklet_struct | |||
459 | atomic_t count; | 459 | atomic_t count; |
460 | void (*func)(unsigned long); | 460 | void (*func)(unsigned long); |
461 | unsigned long data; | 461 | unsigned long data; |
462 | |||
463 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
464 | struct task_struct *owner; | ||
465 | #endif | ||
462 | }; | 466 | }; |
463 | 467 | ||
464 | #define DECLARE_TASKLET(name, func, data) \ | 468 | #define DECLARE_TASKLET(name, func, data) \ |
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h index 37804fcfe8a3..6d1f85c2e093 100644 --- a/include/litmus/litmus_softirq.h +++ b/include/litmus/litmus_softirq.h | |||
@@ -1,11 +1,99 @@ | |||
1 | |||
2 | #include <linux/interrupt.h> | 1 | #include <linux/interrupt.h> |
3 | 2 | ||
3 | /* | ||
4 | Threaded tasklet handling for Litmus. Tasklets | ||
5 | are scheduled with the priority of the tasklet's | ||
6 | owner-- that is, the RT task on behalf the tasklet | ||
7 | runs. | ||
8 | |||
9 | Tasklets are current scheduled in FIFO order with | ||
10 | NO priority inheritance for "blocked" tasklets. | ||
11 | |||
12 | klitirqd assumes the priority of the owner of the | ||
13 | tasklet when the tasklet is next to execute. | ||
14 | |||
15 | Currently, hi-tasklets are scheduled before | ||
16 | low-tasklets, regardless of priority of low-tasklets. | ||
17 | This priority inversion probably needs to be fixed, | ||
18 | though it is not an issue if our work with GPUs as | ||
19 | GPUs are owned (and associated klitirqds) for | ||
20 | exclusive time periods, thus no inversions can | ||
21 | occur. | ||
22 | |||
23 | FIXME: Let low-tasklets with higher Litmus priority | ||
24 | be scheduled before hi-tasklets of lower Litmus | ||
25 | priority. | ||
26 | |||
27 | TODO: Decide if tasklets should really be scheduled | ||
28 | FIFO. If not, we should probably ensure tasklets with | ||
29 | the same owner still execute in FIFO order lest we | ||
30 | confuse drivers with out-of-order execution (though | ||
31 | they probably should still be able to handle it by | ||
32 | tasklet processing design). | ||
33 | */ | ||
34 | |||
35 | |||
36 | |||
4 | #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD | 37 | #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD |
5 | 38 | ||
6 | void trigger_litirqs(struct task_struct*); | ||
7 | 39 | ||
40 | //void trigger_litirqs(struct task_struct*); | ||
41 | |||
42 | /* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons. | ||
43 | Actual launch of threads is deffered to kworker's | ||
44 | workqueue, so daemons will likely not be immediately | ||
45 | running when this function returns, though the required | ||
46 | data will be initialized. */ | ||
8 | void spawn_klitirqd(void); | 47 | void spawn_klitirqd(void); |
9 | 48 | ||
49 | |||
50 | /* Raises a flag to tell klitirqds to terminate. | ||
51 | Termination is async, so some threads may be running | ||
52 | after function return. */ | ||
10 | void kill_klitirqd(void); | 53 | void kill_klitirqd(void); |
11 | 54 | ||
55 | |||
56 | /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready | ||
57 | to handle tasklets. 0, otherwise.*/ | ||
58 | int klitirqd_is_ready(void); | ||
59 | |||
60 | |||
61 | void __litmus_tasklet_schedule( | ||
62 | struct tasklet_struct *t, | ||
63 | unsigned int k_id); | ||
64 | |||
65 | /* schedule a tasklet on klitirqd #k_id */ | ||
66 | static inline void litmus_tasklet_schedule( | ||
67 | struct tasklet_struct *t, | ||
68 | unsigned int k_id) | ||
69 | { | ||
70 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
71 | __litmus_tasklet_schedule(t, k_id); | ||
72 | } | ||
73 | |||
74 | |||
75 | extern void __litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
76 | unsigned int k_id); | ||
77 | |||
78 | /* schedule a hi tasklet on klitirqd #k_id */ | ||
79 | static inline void litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
80 | unsigned int k_id) | ||
81 | { | ||
82 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
83 | __litmus_tasklet_hi_schedule(t, k_id); | ||
84 | } | ||
85 | |||
86 | |||
87 | extern void __litmus_tasklet_hi_schedule_first( | ||
88 | struct tasklet_struct *t, | ||
89 | unsigned int k_id); | ||
90 | |||
91 | /* schedule a hi tasklet on klitirqd #k_id on next go-around */ | ||
92 | /* PRECONDITION: Interrupts must be disabled. */ | ||
93 | static inline void litmus_tasklet_hi_schedule_first( | ||
94 | struct tasklet_struct *t, | ||
95 | unsigned int k_id) | ||
96 | { | ||
97 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
98 | __litmus_tasklet_hi_schedule_first(t, k_id); | ||
99 | } \ No newline at end of file | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index 260c6fe17986..cb221f33310e 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h | |||
@@ -26,10 +26,13 @@ const char* sched_state_name(int s); | |||
26 | (x), #x, __FUNCTION__); \ | 26 | (x), #x, __FUNCTION__); \ |
27 | } while (0); | 27 | } while (0); |
28 | 28 | ||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) /* ignore */ | ||
30 | /* | ||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | 31 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ |
30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | 32 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ |
31 | cpu, (x), sched_state_name(x), \ | 33 | cpu, (x), sched_state_name(x), \ |
32 | (y), sched_state_name(y)) | 34 | (y), sched_state_name(y)) |
35 | */ | ||
33 | 36 | ||
34 | 37 | ||
35 | typedef enum scheduling_state { | 38 | typedef enum scheduling_state { |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 5de422c742f6..26ee27bf98ff 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -108,6 +108,11 @@ struct rt_param { | |||
108 | /* is the task present? (true if it can be scheduled) */ | 108 | /* is the task present? (true if it can be scheduled) */ |
109 | unsigned int present:1; | 109 | unsigned int present:1; |
110 | 110 | ||
111 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
112 | /* proxy threads have minimum priority by default */ | ||
113 | unsigned int is_proxy_thread:1; | ||
114 | #endif | ||
115 | |||
111 | #ifdef CONFIG_LITMUS_LOCKING | 116 | #ifdef CONFIG_LITMUS_LOCKING |
112 | /* Is the task being priority-boosted by a locking protocol? */ | 117 | /* Is the task being priority-boosted by a locking protocol? */ |
113 | unsigned int priority_boosted:1; | 118 | unsigned int priority_boosted:1; |
@@ -128,7 +133,7 @@ struct rt_param { | |||
128 | * an increased task priority. | 133 | * an increased task priority. |
129 | */ | 134 | */ |
130 | struct task_struct* inh_task; | 135 | struct task_struct* inh_task; |
131 | 136 | ||
132 | #ifdef CONFIG_NP_SECTION | 137 | #ifdef CONFIG_NP_SECTION |
133 | /* For the FMLP under PSN-EDF, it is required to make the task | 138 | /* For the FMLP under PSN-EDF, it is required to make the task |
134 | * non-preemptive from kernel space. In order not to interfere with | 139 | * non-preemptive from kernel space. In order not to interfere with |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..a28b9eff5f27 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -58,6 +58,10 @@ typedef void (*task_exit_t) (struct task_struct *); | |||
58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | 58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, |
59 | void* __user config); | 59 | void* __user config); |
60 | 60 | ||
61 | /* Called to change inheritance levels of given task */ | ||
62 | typedef void (*set_prio_inh_t)(struct task_struct* t, | ||
63 | struct task_struct* prio_inh); | ||
64 | typedef void (*clear_prio_inh_t)(struct task_struct* t); | ||
61 | 65 | ||
62 | /********************* sys call backends ********************/ | 66 | /********************* sys call backends ********************/ |
63 | /* This function causes the caller to sleep until the next release */ | 67 | /* This function causes the caller to sleep until the next release */ |
@@ -96,6 +100,9 @@ struct sched_plugin { | |||
96 | #ifdef CONFIG_LITMUS_LOCKING | 100 | #ifdef CONFIG_LITMUS_LOCKING |
97 | /* locking protocols */ | 101 | /* locking protocols */ |
98 | allocate_lock_t allocate_lock; | 102 | allocate_lock_t allocate_lock; |
103 | |||
104 | set_prio_inh_t set_prio_inh; | ||
105 | clear_prio_inh_t clear_prio_inh; | ||
99 | #endif | 106 | #endif |
100 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | 107 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); |
101 | 108 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b1a73a..266cea2b9721 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -475,6 +475,10 @@ void tasklet_init(struct tasklet_struct *t, | |||
475 | atomic_set(&t->count, 0); | 475 | atomic_set(&t->count, 0); |
476 | t->func = func; | 476 | t->func = func; |
477 | t->data = data; | 477 | t->data = data; |
478 | |||
479 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
480 | t->owner = NULL; | ||
481 | #endif | ||
478 | } | 482 | } |
479 | 483 | ||
480 | EXPORT_SYMBOL(tasklet_init); | 484 | EXPORT_SYMBOL(tasklet_init); |
diff --git a/litmus/Kconfig b/litmus/Kconfig index a354e3dce19f..58137f3e374e 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -185,13 +185,13 @@ endmenu | |||
185 | menu "Interrupt Handling" | 185 | menu "Interrupt Handling" |
186 | 186 | ||
187 | config LITMUS_SOFTIRQD | 187 | config LITMUS_SOFTIRQD |
188 | bool "Spawn ksoftirqlitmusd interrupt handling threads." | 188 | bool "Spawn klitirqd interrupt handling threads." |
189 | depends on LITMUS_LOCKING | 189 | depends on LITMUS_LOCKING |
190 | default n | 190 | default n |
191 | help | 191 | help |
192 | Create ksoftirqlitmusd interrupt handling threads. Work must be | 192 | Create klitirqd interrupt handling threads. Work must be |
193 | specifically dispatched to these workers. (Softirqs for | 193 | specifically dispatched to these workers. (Softirqs for |
194 | Litmus tasks are not magically redirected to ksoftirqlitmusd.) | 194 | Litmus tasks are not magically redirected to klitirqd.) |
195 | 195 | ||
196 | G-EDF ONLY for now! | 196 | G-EDF ONLY for now! |
197 | 197 | ||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 9b44dc2d8d1e..fbd67ab5f467 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -65,6 +65,12 @@ int edf_higher_prio(struct task_struct* first, | |||
65 | 65 | ||
66 | 66 | ||
67 | return !is_realtime(second_task) || | 67 | return !is_realtime(second_task) || |
68 | |||
69 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
70 | /* proxy threads always lose w/o inheritance. */ | ||
71 | (first_task->rt_param.is_proxy_thread < | ||
72 | second_task->rt_param.is_proxy_thread) || | ||
73 | #endif | ||
68 | 74 | ||
69 | /* is the deadline of the first task earlier? | 75 | /* is the deadline of the first task earlier? |
70 | * Then it has higher priority. | 76 | * Then it has higher priority. |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 11ccaafd50de..f461f07d9511 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <litmus/litmus_proc.h> | 17 | #include <litmus/litmus_proc.h> |
18 | #include <litmus/sched_trace.h> | 18 | #include <litmus/sched_trace.h> |
19 | 19 | ||
20 | //#include <litmus/litmus_softirq.h> | ||
21 | |||
20 | /* Number of RT tasks that exist in the system */ | 22 | /* Number of RT tasks that exist in the system */ |
21 | atomic_t rt_task_count = ATOMIC_INIT(0); | 23 | atomic_t rt_task_count = ATOMIC_INIT(0); |
22 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | 24 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
@@ -123,6 +125,11 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | |||
123 | 125 | ||
124 | target->rt_param.task_params = tp; | 126 | target->rt_param.task_params = tp; |
125 | 127 | ||
128 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
129 | /* proxy thread off by default */ | ||
130 | target->rt_param.is_proxy_thread = 0; | ||
131 | #endif | ||
132 | |||
126 | retval = 0; | 133 | retval = 0; |
127 | out_unlock: | 134 | out_unlock: |
128 | read_unlock_irq(&tasklist_lock); | 135 | read_unlock_irq(&tasklist_lock); |
@@ -257,6 +264,34 @@ asmlinkage long sys_query_job_no(unsigned int __user *job) | |||
257 | return retval; | 264 | return retval; |
258 | } | 265 | } |
259 | 266 | ||
267 | |||
268 | #if 0 | ||
269 | static int tasklet_count = 0; | ||
270 | static struct tasklet_struct tasklet; | ||
271 | |||
272 | static void test_tasklet(unsigned long data) | ||
273 | { | ||
274 | TRACE_CUR("HELLO TASKLET!!! -- %x\n", data); | ||
275 | } | ||
276 | |||
277 | struct blarg | ||
278 | { | ||
279 | struct work_struct work; | ||
280 | struct task_struct* tsk; | ||
281 | }; | ||
282 | |||
283 | static void trigger_tasklet(struct work_struct *work) | ||
284 | { | ||
285 | struct blarg* b = container_of(work, struct blarg, work); | ||
286 | |||
287 | tasklet.owner = b->tsk; | ||
288 | litmus_tasklet_schedule(&tasklet, tasklet_count); | ||
289 | tasklet_count = (tasklet_count + 1) % NR_LITMUS_SOFTIRQD; | ||
290 | |||
291 | kfree(b); | ||
292 | } | ||
293 | #endif | ||
294 | |||
260 | /* sys_null_call() is only used for determining raw system call | 295 | /* sys_null_call() is only used for determining raw system call |
261 | * overheads (kernel entry, kernel exit). It has no useful side effects. | 296 | * overheads (kernel entry, kernel exit). It has no useful side effects. |
262 | * If ts is non-NULL, then the current Feather-Trace time is recorded. | 297 | * If ts is non-NULL, then the current Feather-Trace time is recorded. |
@@ -270,6 +305,27 @@ asmlinkage long sys_null_call(cycles_t __user *ts) | |||
270 | now = get_cycles(); | 305 | now = get_cycles(); |
271 | ret = put_user(now, ts); | 306 | ret = put_user(now, ts); |
272 | } | 307 | } |
308 | |||
309 | #if 0 | ||
310 | // TESTING-- REMOVE THIS | ||
311 | if(!current->rt_param.is_proxy_thread) | ||
312 | { | ||
313 | struct blarg* delayed_launch; | ||
314 | |||
315 | static int first = 1; | ||
316 | if(first) | ||
317 | { | ||
318 | tasklet_init(&tasklet, test_tasklet, (unsigned long)0xcafebabe); | ||
319 | first = 0; | ||
320 | } | ||
321 | |||
322 | delayed_launch = kmalloc(sizeof(struct blarg), GFP_ATOMIC); | ||
323 | INIT_WORK(&(delayed_launch->work), trigger_tasklet); | ||
324 | delayed_launch->tsk = current; | ||
325 | schedule_work(&(delayed_launch->work)); | ||
326 | } | ||
327 | // | ||
328 | #endif | ||
273 | 329 | ||
274 | return ret; | 330 | return ret; |
275 | } | 331 | } |
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c index 4517a94823e7..d1ea833dc8c4 100644 --- a/litmus/litmus_softirq.c +++ b/litmus/litmus_softirq.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
16 | #include <linux/smp.h> | 16 | #include <linux/smp.h> |
17 | #include <linux/tick.h> | 17 | #include <linux/tick.h> |
18 | #include <linux/slab.h> | ||
18 | 19 | ||
19 | #define CREATE_TRACE_POINTS | 20 | #define CREATE_TRACE_POINTS |
20 | #include <trace/events/irq.h> | 21 | #include <trace/events/irq.h> |
@@ -26,6 +27,16 @@ | |||
26 | #include <litmus/sched_plugin.h> | 27 | #include <litmus/sched_plugin.h> |
27 | #include <litmus/litmus_softirq.h> | 28 | #include <litmus/litmus_softirq.h> |
28 | 29 | ||
30 | |||
31 | /* counts number of daemons ready to handle litmus irqs. */ | ||
32 | static atomic_t num_ready_klitirqds = ATOMIC_INIT(0); | ||
33 | |||
34 | enum pending_flags | ||
35 | { | ||
36 | LIT_TASKLET_LOW = 0x1, | ||
37 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1 | ||
38 | }; | ||
39 | |||
29 | /* only support tasklet processing for now. */ | 40 | /* only support tasklet processing for now. */ |
30 | struct tasklet_head | 41 | struct tasklet_head |
31 | { | 42 | { |
@@ -33,38 +44,28 @@ struct tasklet_head | |||
33 | struct tasklet_struct **tail; | 44 | struct tasklet_struct **tail; |
34 | }; | 45 | }; |
35 | 46 | ||
36 | struct tasklet_owner | 47 | // sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m); |
37 | { | 48 | // TODO: current->extra_flags |= PFE_SOFTIRQ; |
38 | struct tasklet_owner* next; | ||
39 | struct task_struct* job; | ||
40 | }; | ||
41 | 49 | ||
42 | /* used to create a parallel list to lit_tasklet_*_vec to | 50 | struct klitirqd_info |
43 | associate a litmus priority with the tasklet */ | ||
44 | struct tasklet_owner_head | ||
45 | { | 51 | { |
46 | struct tasklet_owner *head; | 52 | struct task_struct* klitirqd; |
47 | struct tasklet_owner **tail; | 53 | raw_spinlock_t lock; |
54 | struct tasklet_head pending_tasklets; | ||
55 | struct tasklet_head pending_tasklets_hi; | ||
48 | }; | 56 | }; |
49 | 57 | ||
50 | static struct task_struct* klitirqd[NR_LITMUS_SOFTIRQD]; | ||
51 | |||
52 | /* one list for each klitirqd */ | 58 | /* one list for each klitirqd */ |
53 | static raw_spinlock_t litirq_locks[NR_LITMUS_SOFTIRQD]; | 59 | static struct klitirqd_info klitirqds[NR_LITMUS_SOFTIRQD]; |
54 | 60 | ||
55 | static struct tasklet_head lit_tasklet_vec[NR_LITMUS_SOFTIRQD]; | ||
56 | static struct tasklet_owner_head lit_tasklet_owner_vec[NR_LITMUS_SOFTIRQD]; | ||
57 | 61 | ||
58 | static struct tasklet_head lit_tasklet_hi_vec[NR_LITMUS_SOFTIRQD]; | ||
59 | static struct tasklet_owner_head lit_tasklet_hi_owner_vec[NR_LITMUS_SOFTIRQD]; | ||
60 | 62 | ||
61 | 63 | inline unsigned int klitirqd_id(struct task_struct* tsk) | |
62 | inline int klitirqd_id(struct task_struct* tsk) | ||
63 | { | 64 | { |
64 | int i; | 65 | int i; |
65 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 66 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) |
66 | { | 67 | { |
67 | if(klitirqd[i] == tsk) | 68 | if(klitirqds[i].klitirqd == tsk) |
68 | { | 69 | { |
69 | return i; | 70 | return i; |
70 | } | 71 | } |
@@ -72,23 +73,15 @@ inline int klitirqd_id(struct task_struct* tsk) | |||
72 | 73 | ||
73 | BUG(); | 74 | BUG(); |
74 | 75 | ||
75 | return -1; | 76 | return 0; |
76 | } | 77 | } |
77 | 78 | ||
78 | enum pending_flags | ||
79 | { | ||
80 | LIT_TASKLET_LOW = 0x1, | ||
81 | LIT_TASKLET_HI = 0x2 | ||
82 | }; | ||
83 | 79 | ||
84 | 80 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) | |
85 | inline static u32 litirq_pending_hi_irqoff(struct task_struct* which) | ||
86 | { | 81 | { |
87 | u32 pending = 0; | 82 | u32 pending = 0; |
88 | 83 | ||
89 | int offset = klitirqd_id(which); | 84 | if(which->pending_tasklets_hi.head != NULL) |
90 | |||
91 | if(lit_tasklet_hi_vec[offset].tail != &lit_tasklet_hi_vec[offset].head) | ||
92 | { | 85 | { |
93 | pending = LIT_TASKLET_HI; | 86 | pending = LIT_TASKLET_HI; |
94 | } | 87 | } |
@@ -96,13 +89,11 @@ inline static u32 litirq_pending_hi_irqoff(struct task_struct* which) | |||
96 | return pending; | 89 | return pending; |
97 | }; | 90 | }; |
98 | 91 | ||
99 | inline static u32 litirq_pending_low_irqoff(struct task_struct* which) | 92 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) |
100 | { | 93 | { |
101 | u32 pending = 0; | 94 | u32 pending = 0; |
102 | 95 | ||
103 | int offset = klitirqd_id(which); | 96 | if(which->pending_tasklets.head != NULL) |
104 | |||
105 | if(lit_tasklet_vec[offset].tail != &lit_tasklet_vec[offset].head) | ||
106 | { | 97 | { |
107 | pending = LIT_TASKLET_LOW; | 98 | pending = LIT_TASKLET_LOW; |
108 | } | 99 | } |
@@ -111,7 +102,7 @@ inline static u32 litirq_pending_low_irqoff(struct task_struct* which) | |||
111 | }; | 102 | }; |
112 | 103 | ||
113 | 104 | ||
114 | static u32 litirq_pending_irqoff(struct task_struct* which) | 105 | static u32 litirq_pending_irqoff(struct klitirqd_info* which) |
115 | { | 106 | { |
116 | u32 pending = 0; | 107 | u32 pending = 0; |
117 | 108 | ||
@@ -122,74 +113,71 @@ static u32 litirq_pending_irqoff(struct task_struct* which) | |||
122 | }; | 113 | }; |
123 | 114 | ||
124 | 115 | ||
125 | static u32 litirq_pending(struct task_struct* which) | 116 | static u32 litirq_pending(struct klitirqd_info* which) |
126 | { | 117 | { |
127 | unsigned long flags; | 118 | unsigned long flags; |
128 | u32 pending; | 119 | u32 pending; |
129 | 120 | ||
130 | int offset = klitirqd_id(which); | 121 | raw_spin_lock_irqsave(&which->lock, flags); |
131 | |||
132 | raw_spin_lock_irqsave(&litirq_locks[offset], flags); | ||
133 | pending = litirq_pending_irqoff(which); | 122 | pending = litirq_pending_irqoff(which); |
134 | raw_spin_unlock_irqrestore(&litirq_locks[offset], flags); | 123 | raw_spin_unlock_irqrestore(&which->lock, flags); |
135 | 124 | ||
136 | return pending; | 125 | return pending; |
137 | }; | 126 | }; |
138 | 127 | ||
139 | 128 | ||
140 | static int needs_prio_change(struct task_struct* tsk, | 129 | static int needs_prio_change(struct klitirqd_info* which, |
141 | struct tasklet_head* tasklet_vec) | 130 | struct tasklet_head* pending_tasklets) |
142 | { | 131 | { |
143 | unsigned long flags; | 132 | unsigned long flags; |
144 | int ret = 0; | 133 | int ret = 0; |
134 | |||
135 | raw_spin_lock_irqsave(&which->lock, flags); | ||
145 | 136 | ||
146 | int offset = klitirqd_id(tsk); | 137 | if((pending_tasklets == &which->pending_tasklets_hi) && litirq_pending_hi_irqoff(which)) |
147 | raw_spin_lock_irqsave(&litirq_locks[offset], flags); | ||
148 | |||
149 | if((tasklet_vec == lit_tasklet_hi_vec) && litirq_pending_hi_irqoff(tsk)) | ||
150 | { | 138 | { |
151 | if(lit_tasklet_hi_owner_vec[offset].head->job != tsk_rt(current)->inh_task) | 139 | if(which->pending_tasklets_hi.head->owner != tsk_rt(which->klitirqd)->inh_task) |
152 | { | 140 | { |
153 | ret = 1; | 141 | ret = 1; |
154 | } | 142 | } |
155 | } | 143 | } |
156 | else if((tasklet_vec == lit_tasklet_vec) && litirq_pending_irqoff(tsk)) | 144 | else if((pending_tasklets == &which->pending_tasklets) && litirq_pending_irqoff(which)) |
157 | { | 145 | { |
158 | if(lit_tasklet_owner_vec[offset].head->job != tsk_rt(current)->inh_task) | 146 | if(which->pending_tasklets.head->owner != tsk_rt(which->klitirqd)->inh_task) |
159 | { | 147 | { |
160 | ret = 1; | 148 | ret = 1; |
161 | } | 149 | } |
162 | } | 150 | } |
163 | 151 | ||
164 | raw_spin_unlock_irqrestore(&litirq_locks[offset], flags); | 152 | raw_spin_unlock_irqrestore(&which->lock, flags); |
153 | |||
154 | TRACE_TASK(which->klitirqd, "priority change needed: %d\n", ret); | ||
165 | 155 | ||
166 | return ret; | 156 | return ret; |
167 | } | 157 | } |
168 | 158 | ||
169 | 159 | ||
170 | static void reeval_prio(struct task_struct* tsk) | 160 | static void __reeval_prio(struct klitirqd_info* which) |
171 | { | 161 | { |
172 | unsigned long flags; | ||
173 | u32 pending = 0; | 162 | u32 pending = 0; |
163 | struct task_struct* tsk = which->klitirqd; | ||
174 | struct task_struct* new_prio = tsk_rt(tsk)->inh_task; | 164 | struct task_struct* new_prio = tsk_rt(tsk)->inh_task; |
175 | 165 | ||
176 | int offset = klitirqd_id(tsk); | 166 | if(litirq_pending_irqoff(which)) |
177 | |||
178 | raw_spin_lock_irqsave(&litirq_locks[offset], flags); | ||
179 | |||
180 | if(pending |= litirq_pending_irqoff(tsk)) | ||
181 | { | 167 | { |
182 | if(lit_tasklet_owner_vec[offset].head->job != tsk_rt(current)->inh_task) | 168 | pending = 1; |
169 | if(which->pending_tasklets.head->owner != tsk_rt(tsk)->inh_task) | ||
183 | { | 170 | { |
184 | new_prio = lit_tasklet_owner_vec[offset].head->job; | 171 | new_prio = which->pending_tasklets.head->owner; |
185 | } | 172 | } |
186 | } | 173 | } |
187 | 174 | ||
188 | if(pending |= litirq_pending_hi_irqoff(tsk)) | 175 | if(litirq_pending_hi_irqoff(which)) |
189 | { | 176 | { |
190 | if(lit_tasklet_hi_owner_vec[offset].head->job != tsk_rt(current)->inh_task) | 177 | pending = 1; |
178 | if(which->pending_tasklets_hi.head->owner != tsk_rt(tsk)->inh_task) | ||
191 | { | 179 | { |
192 | new_prio = lit_tasklet_hi_owner_vec[offset].head->job; | 180 | new_prio = which->pending_tasklets_hi.head->owner; |
193 | } | 181 | } |
194 | } | 182 | } |
195 | 183 | ||
@@ -200,60 +188,125 @@ static void reeval_prio(struct task_struct* tsk) | |||
200 | 188 | ||
201 | if(new_prio != tsk_rt(tsk)->inh_task) | 189 | if(new_prio != tsk_rt(tsk)->inh_task) |
202 | { | 190 | { |
203 | /* Change priority!! */ | 191 | if(new_prio != NULL) |
192 | { | ||
193 | if(!in_interrupt()) | ||
194 | { | ||
195 | TRACE_CUR("%s: Priority change: %s/%d to %s/%d\n", __FUNCTION__, | ||
196 | ((tsk_rt(tsk)->inh_task) ? tsk_rt(tsk)->inh_task : tsk)->comm, | ||
197 | ((tsk_rt(tsk)->inh_task) ? tsk_rt(tsk)->inh_task : tsk)->pid, | ||
198 | new_prio->comm, new_prio->pid); | ||
199 | } | ||
200 | else | ||
201 | { | ||
202 | TRACE("%s: Priority change: %s/%d to %s/%d\n", __FUNCTION__, | ||
203 | ((tsk_rt(tsk)->inh_task) ? tsk_rt(tsk)->inh_task : tsk)->comm, | ||
204 | ((tsk_rt(tsk)->inh_task) ? tsk_rt(tsk)->inh_task : tsk)->pid, | ||
205 | new_prio->comm, new_prio->pid); | ||
206 | } | ||
207 | litmus->set_prio_inh(tsk, new_prio); | ||
208 | } | ||
209 | else | ||
210 | { | ||
211 | if(likely(!in_interrupt())) | ||
212 | { | ||
213 | TRACE_CUR("%s: Priority change: %s/%d to NULL (reverting)\n", | ||
214 | __FUNCTION__, tsk->comm, tsk->pid); | ||
215 | } | ||
216 | else | ||
217 | { | ||
218 | // is this a bug? | ||
219 | TRACE("%s: Priority change: %s/%d to NULL (reverting)\n", | ||
220 | __FUNCTION__, tsk->comm, tsk->pid); | ||
221 | } | ||
222 | |||
223 | litmus->clear_prio_inh(tsk); | ||
224 | } | ||
204 | } | 225 | } |
226 | } | ||
227 | |||
228 | static void reeval_prio(struct klitirqd_info* which) | ||
229 | { | ||
230 | unsigned long flags; | ||
205 | 231 | ||
206 | raw_spin_unlock_irqrestore(&litirq_locks[offset], flags); | 232 | raw_spin_lock_irqsave(&which->lock, flags); |
233 | __reeval_prio(which); | ||
234 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
207 | } | 235 | } |
208 | 236 | ||
209 | 237 | ||
210 | static void wakeup_litirqd(struct task_struct* which) | 238 | static void wakeup_litirqd_locked(struct klitirqd_info* which) |
211 | { | 239 | { |
212 | /* Interrupts are disabled: no need to stop preemption */ | 240 | /* Interrupts are disabled: no need to stop preemption */ |
213 | 241 | if (which && which->klitirqd && which->klitirqd->state != TASK_RUNNING) | |
214 | if (which && which->state != TASK_RUNNING) | 242 | { |
243 | __reeval_prio(which); /* configure the proper priority */ | ||
244 | TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__, | ||
245 | which->klitirqd->comm, which->klitirqd->pid); | ||
246 | wake_up_process(which->klitirqd); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | |||
251 | #if 0 | ||
252 | static void wakeup_litirqd(struct klitirqd_info* which) | ||
253 | { | ||
254 | /* Interrupts are disabled: no need to stop preemption */ | ||
255 | if (which && which->klitirqd && which->klitirqd->state != TASK_RUNNING) | ||
215 | { | 256 | { |
216 | reeval_prio(which); /* configure the proper priority */ | 257 | reeval_prio(which); /* configure the proper priority */ |
217 | wake_up_process(which); | 258 | TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__, |
259 | which->klitirqd->comm, which->klitirqd->pid); | ||
260 | wake_up_process(which->klitirqd); | ||
218 | } | 261 | } |
219 | } | 262 | } |
220 | 263 | ||
221 | 264 | ||
222 | static void do_lit_tasklet(struct tasklet_head* tasklet_vec, | 265 | void trigger_litirqs(struct task_struct* tsk) |
223 | struct tasklet_owner_head* owner_vec) | 266 | { |
267 | struct klitirqd_info* which = &klitirqds[klitirqd_id(tsk)]; | ||
268 | |||
269 | TRACE("%s: entering, triggering %s/%d\n", __FUNCTION__, tsk->comm, tsk->pid); | ||
270 | |||
271 | while (litirq_pending(which)) | ||
272 | { | ||
273 | wakeup_litirqd(which); | ||
274 | } | ||
275 | TRACE("%s: exiting, done triggering %s/%d\n", __FUNCTION__, tsk->comm, tsk->pid); | ||
276 | } | ||
277 | #endif | ||
278 | |||
279 | |||
280 | static void do_lit_tasklet(struct klitirqd_info* which, | ||
281 | struct tasklet_head* pending_tasklets) | ||
224 | { | 282 | { |
225 | unsigned long flags; | 283 | unsigned long flags; |
226 | struct tasklet_struct *list; | 284 | struct tasklet_struct *list; |
227 | struct tasklet_owner *owner; | 285 | |
228 | int id = klitirqd_id(current); | 286 | raw_spin_lock_irqsave(&which->lock, flags); |
229 | |||
230 | raw_spin_lock_irqsave(&litirq_locks[id], flags); | ||
231 | 287 | ||
232 | /* copy out the tasklets for our private use. */ | 288 | /* copy out the tasklets for our private use. */ |
233 | list = tasklet_vec[id].head; | 289 | list = pending_tasklets->head; |
234 | tasklet_vec[id].head = NULL; | 290 | pending_tasklets->head = NULL; |
235 | tasklet_vec[id].tail = &tasklet_vec[id].head; | 291 | pending_tasklets->tail = &pending_tasklets->head; |
236 | |||
237 | owner = owner_vec[id].head; | ||
238 | owner_vec[id].head = NULL; | ||
239 | owner_vec[id].tail = &owner_vec[id].head; | ||
240 | 292 | ||
241 | raw_spin_unlock_irqrestore(&litirq_locks[id], flags); | 293 | raw_spin_unlock_irqrestore(&which->lock, flags); |
242 | 294 | ||
243 | while(list) | 295 | while(list) |
244 | { | 296 | { |
245 | struct tasklet_struct *t = list; | 297 | struct tasklet_struct *t = list; |
246 | struct tasklet_owner *t_owner = owner; | ||
247 | 298 | ||
248 | /* advance, lest we forget */ | 299 | /* advance, lest we forget */ |
249 | list = list->next; | 300 | list = list->next; |
250 | owner = owner->next; | ||
251 | 301 | ||
252 | /* execute tasklet if it has my priority and is free */ | 302 | /* execute tasklet if it has my priority and is free */ |
253 | if ((t_owner->job == tsk_rt(current)->inh_task) && tasklet_trylock(t)) { | 303 | if ((t->owner == tsk_rt(current)->inh_task) && tasklet_trylock(t)) { |
254 | if (!atomic_read(&t->count)) { | 304 | if (!atomic_read(&t->count)) { |
255 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 305 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
306 | { | ||
256 | BUG(); | 307 | BUG(); |
308 | } | ||
309 | TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__); | ||
257 | t->func(t->data); | 310 | t->func(t->data); |
258 | tasklet_unlock(t); | 311 | tasklet_unlock(t); |
259 | continue; /* process more tasklets */ | 312 | continue; /* process more tasklets */ |
@@ -262,65 +315,137 @@ static void do_lit_tasklet(struct tasklet_head* tasklet_vec, | |||
262 | } | 315 | } |
263 | 316 | ||
264 | /* couldn't process tasklet. put it back at the end of the main queue. */ | 317 | /* couldn't process tasklet. put it back at the end of the main queue. */ |
265 | t->next = NULL; | 318 | TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__); |
266 | t_owner->next = NULL; | ||
267 | 319 | ||
268 | raw_spin_lock_irqsave(&litirq_locks[id], flags); | 320 | t->next = NULL; |
269 | 321 | ||
270 | *(tasklet_vec[id].tail) = t; | 322 | raw_spin_lock_irqsave(&which->lock, flags); |
271 | tasklet_vec[id].tail = &(t->next); | ||
272 | 323 | ||
273 | *(owner_vec[id].tail) = t_owner; | 324 | *(pending_tasklets->tail) = t; |
274 | owner_vec[id].tail = &(t_owner->next); | 325 | pending_tasklets->tail = &t->next; |
275 | 326 | ||
276 | raw_spin_unlock_irqrestore(&litirq_locks[id], flags); | 327 | raw_spin_unlock_irqrestore(&which->lock, flags); |
277 | } | 328 | } |
278 | } | 329 | } |
279 | 330 | ||
280 | static void do_litirq(void) | 331 | static void do_litirq(struct klitirqd_info* which) |
281 | { | 332 | { |
282 | u32 pending; | 333 | u32 pending; |
283 | int resched = 0; | 334 | int resched = 0; |
284 | 335 | ||
285 | if (in_interrupt() || !is_realtime(current)) | 336 | if(in_interrupt()) |
286 | { | 337 | { |
338 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); | ||
339 | return; | ||
340 | } | ||
341 | |||
342 | if(which->klitirqd != current) | ||
343 | { | ||
344 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", | ||
345 | __FUNCTION__, current->comm, current->pid, | ||
346 | which->klitirqd->comm, which->klitirqd->pid); | ||
347 | return; | ||
348 | } | ||
349 | |||
350 | if(!is_realtime(current)) | ||
351 | { | ||
352 | TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n", | ||
353 | __FUNCTION__, current->policy); | ||
287 | return; | 354 | return; |
288 | } | 355 | } |
289 | 356 | ||
290 | /* since we only handle tasklets, no need for RCU triggers? */ | 357 | /* since we only handle tasklets, no need for RCU triggers? */ |
291 | 358 | ||
292 | pending = litirq_pending(current); | 359 | pending = litirq_pending(which); |
293 | if(pending) | 360 | if(pending) |
294 | { | 361 | { |
295 | /* extract the work to do and do it! */ | 362 | /* extract the work to do and do it! */ |
296 | if(pending & LIT_TASKLET_HI) | 363 | if(pending & LIT_TASKLET_HI) |
297 | { | 364 | { |
298 | do_lit_tasklet(lit_tasklet_hi_vec, lit_tasklet_hi_owner_vec); | 365 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); |
299 | resched = needs_prio_change(current, lit_tasklet_hi_vec); | 366 | do_lit_tasklet(which, &which->pending_tasklets_hi); |
367 | resched = needs_prio_change(which, &which->pending_tasklets_hi); | ||
368 | |||
369 | if(resched) | ||
370 | { | ||
371 | TRACE_CUR("%s: HI tasklets of another priority remain. Skipping LOW tasklets.\n", __FUNCTION__); | ||
372 | } | ||
300 | } | 373 | } |
301 | 374 | ||
302 | if(!resched && (pending & LIT_TASKLET_LOW)) | 375 | if(!resched && (pending & LIT_TASKLET_LOW)) |
303 | { | 376 | { |
304 | do_lit_tasklet(lit_tasklet_vec, lit_tasklet_owner_vec); | 377 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); |
305 | resched = needs_prio_change(current, lit_tasklet_vec); | 378 | do_lit_tasklet(which, &which->pending_tasklets); |
379 | resched = needs_prio_change(which, &which->pending_tasklets); | ||
306 | } | 380 | } |
307 | } | 381 | } |
308 | } | 382 | } |
309 | 383 | ||
310 | 384 | ||
311 | /* TODO: WHAT'S THE DEAL WITH BOTTOM HALVES? */ | 385 | int set_litmus_daemon_sched(struct klitirqd_info* which) |
386 | { | ||
387 | /* set up a daemon job that will never complete. | ||
388 | it should only ever run on behalf of another | ||
389 | real-time task. | ||
390 | |||
391 | TODO: Transition to a new job whenever a | ||
392 | new tasklet is handled */ | ||
393 | |||
394 | int ret = 0; | ||
395 | struct task_struct* tsk = which->klitirqd; | ||
396 | |||
397 | struct rt_task tp = { | ||
398 | .exec_cost = 0, | ||
399 | .period = 1000000000, /* dummy 1 second period */ | ||
400 | .phase = 0, | ||
401 | .cpu = 0, | ||
402 | .budget_policy = NO_ENFORCEMENT, | ||
403 | .cls = RT_CLASS_BEST_EFFORT | ||
404 | }; | ||
405 | |||
406 | struct sched_param param = { .sched_priority = 0}; | ||
407 | |||
408 | |||
409 | /* test task params and mark as proxy thread. */ | ||
410 | tsk->rt_param.task_params = tp; | ||
411 | tsk->rt_param.is_proxy_thread = 1; | ||
412 | |||
413 | /* litmus_admit_task */ | ||
414 | ret = litmus_admit_task(tsk); | ||
415 | |||
416 | /* inform the OS we're SCHED_LITMUS -- | ||
417 | must happen after litmus_admit_task() */ | ||
418 | sched_setscheduler_nocheck(current, SCHED_LITMUS, ¶m); | ||
419 | |||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | |||
424 | /* TODO: WHAT'S THE DEAL WITH ENABLE/DISABLE BOTTOM HALVES | ||
425 | IN ORIGINAL softirq.c? DO WE NEED IT HERE TOO? */ | ||
312 | 426 | ||
313 | /* main loop for klitsoftirqd */ | 427 | /* main loop for klitsoftirqd */ |
314 | static int run_klitirqd(void* dummy) | 428 | static int run_klitirqd(void* dummy) |
315 | { | 429 | { |
316 | /* TODO: Set as best-effort Litmus thread. */ | 430 | struct klitirqd_info* which = &klitirqds[klitirqd_id(current)]; |
431 | |||
432 | int rt_status = set_litmus_daemon_sched(which); | ||
433 | |||
434 | if(rt_status != 0) | ||
435 | { | ||
436 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); | ||
437 | goto rt_failed; | ||
438 | } | ||
317 | 439 | ||
440 | atomic_inc(&num_ready_klitirqds); | ||
441 | |||
318 | set_current_state(TASK_INTERRUPTIBLE); | 442 | set_current_state(TASK_INTERRUPTIBLE); |
319 | 443 | ||
320 | while (!kthread_should_stop()) { | 444 | while (!kthread_should_stop()) { |
321 | preempt_disable(); | 445 | preempt_disable(); |
322 | if (!litirq_pending(current)) { | 446 | if (!litirq_pending(which)) { |
323 | /* sleep for work */ | 447 | /* sleep for work */ |
448 | TRACE_CUR("%s: No more tasklets. Going to sleep.\n", __FUNCTION__); | ||
324 | preempt_enable_no_resched(); | 449 | preempt_enable_no_resched(); |
325 | schedule(); | 450 | schedule(); |
326 | preempt_disable(); | 451 | preempt_disable(); |
@@ -328,11 +453,12 @@ static int run_klitirqd(void* dummy) | |||
328 | 453 | ||
329 | __set_current_state(TASK_RUNNING); | 454 | __set_current_state(TASK_RUNNING); |
330 | 455 | ||
331 | while (litirq_pending(current)) { | 456 | while (litirq_pending(which)) { |
332 | 457 | TRACE_CUR("%s: Executing tasklets.\n", __FUNCTION__); | |
333 | do_litirq(); | 458 | do_litirq(which); |
334 | 459 | ||
335 | reeval_prio(current); /* check if we need to change priority here */ | 460 | TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__); |
461 | reeval_prio(which); /* check if we need to change priority here */ | ||
336 | 462 | ||
337 | preempt_enable_no_resched(); | 463 | preempt_enable_no_resched(); |
338 | cond_resched(); | 464 | cond_resched(); |
@@ -343,47 +469,185 @@ static int run_klitirqd(void* dummy) | |||
343 | set_current_state(TASK_INTERRUPTIBLE); | 469 | set_current_state(TASK_INTERRUPTIBLE); |
344 | } | 470 | } |
345 | __set_current_state(TASK_RUNNING); | 471 | __set_current_state(TASK_RUNNING); |
346 | return 0; | 472 | |
473 | atomic_dec(&num_ready_klitirqds); | ||
474 | |||
475 | rt_failed: | ||
476 | litmus_exit_task(current); | ||
477 | |||
478 | return rt_status; | ||
347 | } | 479 | } |
348 | 480 | ||
349 | 481 | ||
350 | void trigger_litirqs(struct task_struct* which) | 482 | /* executed by a kworker from workqueues */ |
483 | static void launch_klitirqd(struct work_struct *unused) | ||
351 | { | 484 | { |
352 | while (litirq_pending(which)) | 485 | int i; |
486 | |||
487 | TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
488 | |||
489 | /* create the daemon threads */ | ||
490 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
353 | { | 491 | { |
354 | wakeup_litirqd(which); | 492 | klitirqds[i].klitirqd = |
355 | } | 493 | kthread_create(run_klitirqd, NULL, "klitirqd_th%d", i); |
494 | } | ||
495 | |||
496 | TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
497 | |||
498 | /* unleash the daemons */ | ||
499 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
500 | { | ||
501 | wake_up_process(klitirqds[i].klitirqd); | ||
502 | } | ||
503 | |||
504 | kfree(unused); | ||
356 | } | 505 | } |
357 | 506 | ||
358 | 507 | ||
359 | void spawn_klitirqd(void) | 508 | void spawn_klitirqd(void) |
360 | { | 509 | { |
361 | int i; | 510 | int i; |
511 | struct work_struct* delayed_launch; | ||
512 | |||
513 | if(atomic_read(&num_ready_klitirqds) != 0) | ||
514 | { | ||
515 | TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n"); | ||
516 | return; | ||
517 | } | ||
518 | |||
519 | /* init the tasklet queues */ | ||
362 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 520 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) |
363 | { | 521 | { |
364 | klitirqd[i] = kthread_create(run_klitirqd, NULL, "klitirqd_th%d", i); | 522 | klitirqds[i].pending_tasklets.head = NULL; |
523 | klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head; | ||
524 | |||
525 | klitirqds[i].pending_tasklets_hi.head = NULL; | ||
526 | klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head; | ||
527 | |||
528 | raw_spin_lock_init(&klitirqds[i].lock); | ||
365 | } | 529 | } |
366 | 530 | ||
367 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 531 | /* wait to flush the initializations to memory since other threads |
368 | { | 532 | will access it. */ |
369 | wake_up_process(klitirqd[i]); | 533 | mb(); |
370 | } | 534 | |
535 | /* tell a work queue to launch the threads. we can't make scheduling | ||
536 | calls since we're in an atomic state. */ | ||
537 | TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__); | ||
538 | delayed_launch = kmalloc(sizeof(struct work_struct), GFP_ATOMIC); | ||
539 | INIT_WORK(delayed_launch, launch_klitirqd); | ||
540 | schedule_work(delayed_launch); | ||
371 | } | 541 | } |
372 | 542 | ||
543 | |||
373 | void kill_klitirqd(void) | 544 | void kill_klitirqd(void) |
374 | { | 545 | { |
375 | int i; | 546 | int i; |
376 | 547 | ||
377 | /* TODO: Put pending tasklets SOMEWHERE-- back to the OS? */ | 548 | TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); |
549 | |||
378 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 550 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) |
379 | { | 551 | { |
380 | kthread_stop(klitirqd[i]); | 552 | kthread_stop(klitirqds[i].klitirqd); |
381 | } | 553 | } |
554 | |||
555 | /* TODO: Put pending tasklets SOMEWHERE-- back to the OS? */ | ||
382 | } | 556 | } |
383 | 557 | ||
384 | 558 | ||
559 | int klitirqd_is_ready(void) | ||
560 | { | ||
561 | return(atomic_read(&num_ready_klitirqds) == NR_LITMUS_SOFTIRQD); | ||
562 | } | ||
385 | 563 | ||
386 | 564 | ||
387 | 565 | ||
566 | void __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
567 | { | ||
568 | unsigned long flags; | ||
569 | |||
570 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
571 | { | ||
572 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
573 | BUG(); | ||
574 | } | ||
575 | |||
576 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
577 | { | ||
578 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
579 | BUG(); | ||
580 | } | ||
581 | |||
582 | raw_spin_lock_irqsave(&klitirqds[k_id].lock, flags); | ||
583 | |||
584 | t->next = NULL; | ||
585 | *klitirqds[k_id].pending_tasklets.tail = t; | ||
586 | klitirqds[k_id].pending_tasklets.tail = &t->next; | ||
587 | |||
588 | wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */ | ||
589 | |||
590 | raw_spin_unlock_irqrestore(&klitirqds[k_id].lock, flags); | ||
591 | } | ||
592 | |||
593 | EXPORT_SYMBOL(__litmus_tasklet_schedule); | ||
594 | |||
595 | |||
596 | void __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) | ||
597 | { | ||
598 | unsigned long flags; | ||
599 | |||
600 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
601 | { | ||
602 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
603 | BUG(); | ||
604 | } | ||
605 | |||
606 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
607 | { | ||
608 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
609 | BUG(); | ||
610 | } | ||
611 | |||
612 | raw_spin_lock_irqsave(&klitirqds[k_id].lock, flags); | ||
613 | |||
614 | t->next = NULL; | ||
615 | *klitirqds[k_id].pending_tasklets_hi.tail = t; | ||
616 | klitirqds[k_id].pending_tasklets_hi.tail = &t->next; | ||
617 | |||
618 | wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */ | ||
619 | |||
620 | raw_spin_unlock_irqrestore(&klitirqds[k_id].lock, flags); | ||
621 | } | ||
622 | |||
623 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | ||
624 | |||
625 | |||
626 | void __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id) | ||
627 | { | ||
628 | BUG_ON(!irqs_disabled()); | ||
629 | |||
630 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | ||
631 | { | ||
632 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
633 | BUG(); | ||
634 | } | ||
635 | |||
636 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
637 | { | ||
638 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | ||
639 | BUG(); | ||
640 | } | ||
641 | |||
642 | raw_spin_lock(&klitirqds[k_id].lock); | ||
643 | |||
644 | t->next = klitirqds[k_id].pending_tasklets_hi.head; | ||
645 | klitirqds[k_id].pending_tasklets_hi.head = t; | ||
646 | |||
647 | wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */ | ||
648 | |||
649 | raw_spin_unlock(&klitirqds[k_id].lock); | ||
650 | } | ||
388 | 651 | ||
652 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | ||
389 | 653 | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index d5d834cc411b..586b7c3f7de1 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -25,6 +25,10 @@ | |||
25 | 25 | ||
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | 27 | ||
28 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
29 | #include <litmus/litmus_softirq.h> | ||
30 | #endif | ||
31 | |||
28 | /* Overview of GSN-EDF operations. | 32 | /* Overview of GSN-EDF operations. |
29 | * | 33 | * |
30 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | 34 | * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This |
@@ -277,7 +281,7 @@ static void check_for_preemptions(void) | |||
277 | static noinline void gsnedf_job_arrival(struct task_struct* task) | 281 | static noinline void gsnedf_job_arrival(struct task_struct* task) |
278 | { | 282 | { |
279 | BUG_ON(!task); | 283 | BUG_ON(!task); |
280 | 284 | ||
281 | requeue(task); | 285 | requeue(task); |
282 | check_for_preemptions(); | 286 | check_for_preemptions(); |
283 | } | 287 | } |
@@ -298,7 +302,7 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
298 | static noinline void job_completion(struct task_struct *t, int forced) | 302 | static noinline void job_completion(struct task_struct *t, int forced) |
299 | { | 303 | { |
300 | BUG_ON(!t); | 304 | BUG_ON(!t); |
301 | 305 | ||
302 | sched_trace_task_completion(t, forced); | 306 | sched_trace_task_completion(t, forced); |
303 | 307 | ||
304 | TRACE_TASK(t, "job_completion().\n"); | 308 | TRACE_TASK(t, "job_completion().\n"); |
@@ -534,8 +538,8 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
534 | static void gsnedf_task_wake_up(struct task_struct *task) | 538 | static void gsnedf_task_wake_up(struct task_struct *task) |
535 | { | 539 | { |
536 | unsigned long flags; | 540 | unsigned long flags; |
537 | lt_t now; | 541 | lt_t now; |
538 | 542 | ||
539 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | 543 | TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); |
540 | 544 | ||
541 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 545 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
@@ -606,43 +610,44 @@ static long gsnedf_admit_task(struct task_struct* tsk) | |||
606 | 610 | ||
607 | #include <litmus/fdso.h> | 611 | #include <litmus/fdso.h> |
608 | 612 | ||
609 | /* called with IRQs off */ | 613 | inline static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
610 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) | ||
611 | { | 614 | { |
612 | int linked_on; | 615 | int linked_on; |
613 | int check_preempt = 0; | 616 | int check_preempt = 0; |
614 | 617 | ||
615 | raw_spin_lock(&gsnedf_lock); | 618 | if(prio_inh != NULL) |
616 | 619 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | |
617 | TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); | 620 | else |
621 | TRACE_TASK(t, "inherits priority from NULL\n"); | ||
622 | |||
618 | tsk_rt(t)->inh_task = prio_inh; | 623 | tsk_rt(t)->inh_task = prio_inh; |
619 | 624 | ||
620 | linked_on = tsk_rt(t)->linked_on; | 625 | linked_on = tsk_rt(t)->linked_on; |
621 | 626 | ||
622 | /* If it is scheduled, then we need to reorder the CPU heap. */ | 627 | /* If it is scheduled, then we need to reorder the CPU heap. */ |
623 | if (linked_on != NO_CPU) { | 628 | if (linked_on != NO_CPU) { |
624 | TRACE_TASK(t, "%s: linked on %d\n", | 629 | TRACE_TASK(t, "%s: linked on %d\n", |
625 | __FUNCTION__, linked_on); | 630 | __FUNCTION__, linked_on); |
626 | /* Holder is scheduled; need to re-order CPUs. | 631 | /* Holder is scheduled; need to re-order CPUs. |
627 | * We can't use heap_decrease() here since | 632 | * We can't use heap_decrease() here since |
628 | * the cpu_heap is ordered in reverse direction, so | 633 | * the cpu_heap is ordered in reverse direction, so |
629 | * it is actually an increase. */ | 634 | * it is actually an increase. */ |
630 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | 635 | bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, |
631 | gsnedf_cpus[linked_on]->hn); | 636 | gsnedf_cpus[linked_on]->hn); |
632 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | 637 | bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, |
633 | gsnedf_cpus[linked_on]->hn); | 638 | gsnedf_cpus[linked_on]->hn); |
634 | } else { | 639 | } else { |
635 | /* holder may be queued: first stop queue changes */ | 640 | /* holder may be queued: first stop queue changes */ |
636 | raw_spin_lock(&gsnedf.release_lock); | 641 | raw_spin_lock(&gsnedf.release_lock); |
637 | if (is_queued(t)) { | 642 | if (is_queued(t)) { |
638 | TRACE_TASK(t, "%s: is queued\n", | 643 | TRACE_TASK(t, "%s: is queued\n", |
639 | __FUNCTION__); | 644 | __FUNCTION__); |
640 | /* We need to update the position of holder in some | 645 | /* We need to update the position of holder in some |
641 | * heap. Note that this could be a release heap if we | 646 | * heap. Note that this could be a release heap if we |
642 | * budget enforcement is used and this job overran. */ | 647 | * budget enforcement is used and this job overran. */ |
643 | check_preempt = | 648 | check_preempt = |
644 | !bheap_decrease(edf_ready_order, | 649 | !bheap_decrease(edf_ready_order, |
645 | tsk_rt(t)->heap_node); | 650 | tsk_rt(t)->heap_node); |
646 | } else { | 651 | } else { |
647 | /* Nothing to do: if it is not queued and not linked | 652 | /* Nothing to do: if it is not queued and not linked |
648 | * then it is either sleeping or currently being moved | 653 | * then it is either sleeping or currently being moved |
@@ -650,10 +655,10 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
650 | * will use the correct priority when enqueuing the | 655 | * will use the correct priority when enqueuing the |
651 | * task. */ | 656 | * task. */ |
652 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", | 657 | TRACE_TASK(t, "%s: is NOT queued => Done.\n", |
653 | __FUNCTION__); | 658 | __FUNCTION__); |
654 | } | 659 | } |
655 | raw_spin_unlock(&gsnedf.release_lock); | 660 | raw_spin_unlock(&gsnedf.release_lock); |
656 | 661 | ||
657 | /* If holder was enqueued in a release heap, then the following | 662 | /* If holder was enqueued in a release heap, then the following |
658 | * preemption check is pointless, but we can't easily detect | 663 | * preemption check is pointless, but we can't easily detect |
659 | * that case. If you want to fix this, then consider that | 664 | * that case. If you want to fix this, then consider that |
@@ -665,30 +670,42 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct* | |||
665 | * sure preemption checks get the right task, not the | 670 | * sure preemption checks get the right task, not the |
666 | * potentially stale cache. */ | 671 | * potentially stale cache. */ |
667 | bheap_uncache_min(edf_ready_order, | 672 | bheap_uncache_min(edf_ready_order, |
668 | &gsnedf.ready_queue); | 673 | &gsnedf.ready_queue); |
669 | check_for_preemptions(); | 674 | check_for_preemptions(); |
670 | } | 675 | } |
671 | } | 676 | } |
672 | |||
673 | raw_spin_unlock(&gsnedf_lock); | ||
674 | } | 677 | } |
675 | 678 | ||
676 | /* called with IRQs off */ | 679 | /* called with IRQs off */ |
677 | static void clear_priority_inheritance(struct task_struct* t) | 680 | static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) |
678 | { | 681 | { |
679 | raw_spin_lock(&gsnedf_lock); | 682 | raw_spin_lock(&gsnedf_lock); |
680 | 683 | ||
681 | /* A job only stops inheriting a priority when it releases a | 684 | __set_priority_inheritance(t, prio_inh); |
682 | * resource. Thus we can make the following assumption.*/ | ||
683 | BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); | ||
684 | 685 | ||
685 | TRACE_TASK(t, "priority restored\n"); | 686 | raw_spin_unlock(&gsnedf_lock); |
686 | tsk_rt(t)->inh_task = NULL; | 687 | } |
687 | 688 | ||
688 | /* Check if rescheduling is necessary. We can't use heap_decrease() | 689 | /* called with IRQs off */ |
689 | * since the priority was effectively lowered. */ | 690 | static void clear_priority_inheritance(struct task_struct* t) |
690 | unlink(t); | 691 | { |
691 | gsnedf_job_arrival(t); | 692 | raw_spin_lock(&gsnedf_lock); |
693 | |||
694 | TRACE_TASK(t, "priority restored\n"); | ||
695 | |||
696 | if(tsk_rt(t)->scheduled_on != NO_CPU) | ||
697 | { | ||
698 | tsk_rt(t)->inh_task = NULL; | ||
699 | |||
700 | /* Check if rescheduling is necessary. We can't use heap_decrease() | ||
701 | * since the priority was effectively lowered. */ | ||
702 | unlink(t); | ||
703 | gsnedf_job_arrival(t); | ||
704 | } | ||
705 | else | ||
706 | { | ||
707 | __set_priority_inheritance(t, NULL); | ||
708 | } | ||
692 | 709 | ||
693 | raw_spin_unlock(&gsnedf_lock); | 710 | raw_spin_unlock(&gsnedf_lock); |
694 | } | 711 | } |
@@ -919,7 +936,6 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, | |||
919 | 936 | ||
920 | #endif | 937 | #endif |
921 | 938 | ||
922 | |||
923 | static long gsnedf_activate_plugin(void) | 939 | static long gsnedf_activate_plugin(void) |
924 | { | 940 | { |
925 | int cpu; | 941 | int cpu; |
@@ -946,10 +962,22 @@ static long gsnedf_activate_plugin(void) | |||
946 | } | 962 | } |
947 | #endif | 963 | #endif |
948 | } | 964 | } |
965 | |||
966 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
967 | spawn_klitirqd(); | ||
968 | #endif | ||
949 | 969 | ||
950 | return 0; | 970 | return 0; |
951 | } | 971 | } |
952 | 972 | ||
973 | static long gsnedf_deactivate_plugin(void) | ||
974 | { | ||
975 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
976 | kill_klitirqd(); | ||
977 | #endif | ||
978 | return 0; | ||
979 | } | ||
980 | |||
953 | /* Plugin object */ | 981 | /* Plugin object */ |
954 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | 982 | static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { |
955 | .plugin_name = "GSN-EDF", | 983 | .plugin_name = "GSN-EDF", |
@@ -966,6 +994,12 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
966 | #ifdef CONFIG_LITMUS_LOCKING | 994 | #ifdef CONFIG_LITMUS_LOCKING |
967 | .allocate_lock = gsnedf_allocate_lock, | 995 | .allocate_lock = gsnedf_allocate_lock, |
968 | #endif | 996 | #endif |
997 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
998 | .set_prio_inh = set_priority_inheritance, | ||
999 | .clear_prio_inh = clear_priority_inheritance, | ||
1000 | |||
1001 | .deactivate_plugin = gsnedf_deactivate_plugin, | ||
1002 | #endif | ||
969 | }; | 1003 | }; |
970 | 1004 | ||
971 | 1005 | ||
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d54886df1f57..9769b6040f8a 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -129,6 +129,14 @@ static long litmus_dummy_allocate_lock(struct litmus_lock **lock, int type, | |||
129 | return -ENXIO; | 129 | return -ENXIO; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void litmus_dummy_set_prio_inh(struct task_struct* a, struct task_struct* b) | ||
133 | { | ||
134 | } | ||
135 | |||
136 | static void litmus_dummy_clear_prio_inh(struct task_struct* t) | ||
137 | { | ||
138 | } | ||
139 | |||
132 | #endif | 140 | #endif |
133 | 141 | ||
134 | 142 | ||
@@ -149,6 +157,8 @@ struct sched_plugin linux_sched_plugin = { | |||
149 | .deactivate_plugin = litmus_dummy_deactivate_plugin, | 157 | .deactivate_plugin = litmus_dummy_deactivate_plugin, |
150 | #ifdef CONFIG_LITMUS_LOCKING | 158 | #ifdef CONFIG_LITMUS_LOCKING |
151 | .allocate_lock = litmus_dummy_allocate_lock, | 159 | .allocate_lock = litmus_dummy_allocate_lock, |
160 | .set_prio_inh = litmus_dummy_set_prio_inh, | ||
161 | .clear_prio_inh = litmus_dummy_clear_prio_inh, | ||
152 | #endif | 162 | #endif |
153 | .admit_task = litmus_dummy_admit_task | 163 | .admit_task = litmus_dummy_admit_task |
154 | }; | 164 | }; |
@@ -187,6 +197,8 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
187 | CHECK(deactivate_plugin); | 197 | CHECK(deactivate_plugin); |
188 | #ifdef CONFIG_LITMUS_LOCKING | 198 | #ifdef CONFIG_LITMUS_LOCKING |
189 | CHECK(allocate_lock); | 199 | CHECK(allocate_lock); |
200 | CHECK(set_prio_inh); | ||
201 | CHECK(clear_prio_inh); | ||
190 | #endif | 202 | #endif |
191 | CHECK(admit_task); | 203 | CHECK(admit_task); |
192 | 204 | ||