diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2011-03-03 00:17:19 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2011-03-03 00:17:19 -0500 |
commit | 9765eaebc4ffb1f1bdf6998c0491d2e37de1a994 (patch) | |
tree | 31ff20e3b542b23790430a3af2b17d2a569ab44e | |
parent | 0e6a634b0b02a86c9591c1bf8c73ff0cf68475fc (diff) |
Kludge work-queue processing into klitirqd.wip-kernthreads
NEEDS TESTING!
-rw-r--r-- | include/linux/workqueue.h | 17 | ||||
-rw-r--r-- | include/litmus/litmus_softirq.h | 24 | ||||
-rw-r--r-- | litmus/litmus_softirq.c | 151 |
3 files changed, 163 insertions, 29 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 25e02c941bac..a5c6cd3210be 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -83,6 +83,9 @@ struct work_struct { | |||
83 | #ifdef CONFIG_LOCKDEP | 83 | #ifdef CONFIG_LOCKDEP |
84 | struct lockdep_map lockdep_map; | 84 | struct lockdep_map lockdep_map; |
85 | #endif | 85 | #endif |
86 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
87 | struct task_struct *owner; | ||
88 | #endif | ||
86 | }; | 89 | }; |
87 | 90 | ||
88 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) | 91 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) |
@@ -115,11 +118,25 @@ struct execute_work { | |||
115 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | 118 | #define __WORK_INIT_LOCKDEP_MAP(n, k) |
116 | #endif | 119 | #endif |
117 | 120 | ||
121 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
122 | #define __WORK_INIT_OWNER() \ | ||
123 | .owner = NULL, | ||
124 | |||
125 | #define PREPARE_OWNER(_work, _owner) \ | ||
126 | do { \ | ||
127 | (_work)->owner = (_owner); \ | ||
128 | } while(0) | ||
129 | #else | ||
130 | #define __WORK_INIT_OWNER() | ||
131 | #define PREPARE_OWNER(_work, _owner) | ||
132 | #endif | ||
133 | |||
118 | #define __WORK_INITIALIZER(n, f) { \ | 134 | #define __WORK_INITIALIZER(n, f) { \ |
119 | .data = WORK_DATA_STATIC_INIT(), \ | 135 | .data = WORK_DATA_STATIC_INIT(), \ |
120 | .entry = { &(n).entry, &(n).entry }, \ | 136 | .entry = { &(n).entry, &(n).entry }, \ |
121 | .func = (f), \ | 137 | .func = (f), \ |
122 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | 138 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
139 | __WORK_INIT_OWNER() \ | ||
123 | } | 140 | } |
124 | 141 | ||
125 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | 142 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ |
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h index 712c8119ae15..e130a26685a2 100644 --- a/include/litmus/litmus_softirq.h +++ b/include/litmus/litmus_softirq.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __LITMUS_SOFTIRQ_H | 2 | #define __LITMUS_SOFTIRQ_H |
3 | 3 | ||
4 | #include <linux/interrupt.h> | 4 | #include <linux/interrupt.h> |
5 | #include <linux/workqueue.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | Threaded tasklet handling for Litmus. Tasklets | 8 | Threaded tasklet handling for Litmus. Tasklets |
@@ -105,4 +106,25 @@ static inline void litmus_tasklet_hi_schedule_first( | |||
105 | __litmus_tasklet_hi_schedule_first(t, k_id); | 106 | __litmus_tasklet_hi_schedule_first(t, k_id); |
106 | } | 107 | } |
107 | 108 | ||
108 | #endif \ No newline at end of file | 109 | ////////////// |
110 | |||
111 | extern void __litmus_schedule_work( | ||
112 | struct work_struct* w, | ||
113 | unsigned int k_id); | ||
114 | |||
115 | static inline void litmus_schedule_work( | ||
116 | struct work_struct* w, | ||
117 | unsigned int k_id) | ||
118 | { | ||
119 | __litmus_schedule_work(w, k_id); | ||
120 | } | ||
121 | |||
122 | #endif | ||
123 | |||
124 | |||
125 | |||
126 | |||
127 | |||
128 | |||
129 | |||
130 | |||
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c index dbefcb560063..734a7dcdc9ca 100644 --- a/litmus/litmus_softirq.c +++ b/litmus/litmus_softirq.c | |||
@@ -21,7 +21,8 @@ static atomic_t num_ready_klitirqds = ATOMIC_INIT(0); | |||
21 | enum pending_flags | 21 | enum pending_flags |
22 | { | 22 | { |
23 | LIT_TASKLET_LOW = 0x1, | 23 | LIT_TASKLET_LOW = 0x1, |
24 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1 | 24 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1, |
25 | LIT_WORK = LIT_TASKLET_HI<<1 | ||
25 | }; | 26 | }; |
26 | 27 | ||
27 | /* only support tasklet processing for now. */ | 28 | /* only support tasklet processing for now. */ |
@@ -37,8 +38,11 @@ struct klitirqd_info | |||
37 | raw_spinlock_t lock; | 38 | raw_spinlock_t lock; |
38 | 39 | ||
39 | u32 pending; | 40 | u32 pending; |
41 | |||
42 | /* in order of priority */ | ||
43 | struct tasklet_head pending_tasklets_hi; | ||
40 | struct tasklet_head pending_tasklets; | 44 | struct tasklet_head pending_tasklets; |
41 | struct tasklet_head pending_tasklets_hi; | 45 | struct list_head worklist; |
42 | }; | 46 | }; |
43 | 47 | ||
44 | /* one list for each klitirqd */ | 48 | /* one list for each klitirqd */ |
@@ -66,18 +70,22 @@ inline unsigned int klitirqd_id(struct task_struct* tsk) | |||
66 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) | 70 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) |
67 | { | 71 | { |
68 | return (which->pending & LIT_TASKLET_HI); | 72 | return (which->pending & LIT_TASKLET_HI); |
69 | }; | 73 | } |
70 | 74 | ||
71 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) | 75 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) |
72 | { | 76 | { |
73 | return (which->pending & LIT_TASKLET_LOW); | 77 | return (which->pending & LIT_TASKLET_LOW); |
74 | }; | 78 | } |
75 | 79 | ||
80 | inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which) | ||
81 | { | ||
82 | return (which->pending & LIT_WORK); | ||
83 | } | ||
76 | 84 | ||
77 | inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) | 85 | inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) |
78 | { | 86 | { |
79 | return(which->pending); | 87 | return(which->pending); |
80 | }; | 88 | } |
81 | 89 | ||
82 | 90 | ||
83 | inline static u32 litirq_pending(struct klitirqd_info* which) | 91 | inline static u32 litirq_pending(struct klitirqd_info* which) |
@@ -93,9 +101,15 @@ inline static u32 litirq_pending(struct klitirqd_info* which) | |||
93 | }; | 101 | }; |
94 | 102 | ||
95 | 103 | ||
104 | /* returns true if thread needs to change priority to continue processing | ||
105 | pending tasklets. | ||
106 | */ | ||
96 | static int needs_prio_change(struct klitirqd_info* which, | 107 | static int needs_prio_change(struct klitirqd_info* which, |
97 | struct tasklet_head* pending_tasklets) | 108 | struct tasklet_head* pending_tasklets) |
98 | { | 109 | { |
110 | /* this function doesn't have to look at work objects since they have | ||
111 | priority below tasklets. */ | ||
112 | |||
99 | unsigned long flags; | 113 | unsigned long flags; |
100 | int ret = 0; | 114 | int ret = 0; |
101 | 115 | ||
@@ -126,33 +140,41 @@ static int needs_prio_change(struct klitirqd_info* which, | |||
126 | 140 | ||
127 | static void __reeval_prio(struct klitirqd_info* which) | 141 | static void __reeval_prio(struct klitirqd_info* which) |
128 | { | 142 | { |
129 | u32 pending = 0; | ||
130 | struct task_struct* tsk = which->klitirqd; | 143 | struct task_struct* tsk = which->klitirqd; |
131 | struct task_struct* new_prio = tsk_rt(tsk)->inh_task; | 144 | struct task_struct* new_prio = tsk_rt(tsk)->inh_task; |
132 | 145 | ||
133 | if(litirq_pending_irqoff(which)) | 146 | /* Check in prio-order */ |
134 | { | 147 | u32 pending = litirq_pending_irqoff(which); |
135 | pending = 1; | 148 | if(pending & LIT_TASKLET_HI) |
136 | if(which->pending_tasklets.head->owner != tsk_rt(tsk)->inh_task) | 149 | { |
137 | { | 150 | if(which->pending_tasklets_hi.head->owner != tsk_rt(tsk)->inh_task) |
138 | new_prio = which->pending_tasklets.head->owner; | 151 | { |
139 | } | 152 | new_prio = which->pending_tasklets_hi.head->owner; |
140 | } | 153 | } |
141 | 154 | } | |
142 | if(litirq_pending_hi_irqoff(which)) | 155 | else if(pending & LIT_TASKLET_LOW) |
143 | { | 156 | { |
144 | pending = 1; | 157 | if(which->pending_tasklets.head->owner != tsk_rt(tsk)->inh_task) |
145 | if(which->pending_tasklets_hi.head->owner != tsk_rt(tsk)->inh_task) | 158 | { |
146 | { | 159 | new_prio = which->pending_tasklets.head->owner; |
147 | new_prio = which->pending_tasklets_hi.head->owner; | 160 | } |
148 | } | 161 | } |
149 | } | 162 | else if(pending & LIT_WORK) |
150 | 163 | { | |
151 | if(!pending) | 164 | struct work_struct* work = |
165 | list_entry(&which->worklist, struct work_struct, entry); | ||
166 | if(work->owner != tsk_rt(tsk)->inh_task) | ||
167 | { | ||
168 | new_prio = work->owner; | ||
169 | } | ||
170 | } | ||
171 | else | ||
152 | { | 172 | { |
153 | new_prio = NULL; | 173 | new_prio = NULL; |
154 | } | 174 | } |
155 | 175 | ||
176 | |||
177 | /* inherit the proper priority */ | ||
156 | if(new_prio != tsk_rt(tsk)->inh_task) | 178 | if(new_prio != tsk_rt(tsk)->inh_task) |
157 | { | 179 | { |
158 | if(new_prio != NULL) | 180 | if(new_prio != NULL) |
@@ -301,6 +323,30 @@ static void do_lit_tasklet(struct klitirqd_info* which, | |||
301 | } | 323 | } |
302 | } | 324 | } |
303 | 325 | ||
326 | static void do_lit_work(struct klitirqd_info* which) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | work_func_t f; | ||
330 | struct work_struct* work; | ||
331 | |||
332 | // only execute one work-queue item to yield to tasklets. | ||
333 | // ...is this a good idea? | ||
334 | |||
335 | raw_spin_lock_irqsave(&which->lock, flags); | ||
336 | |||
337 | work = list_entry(&which->worklist, struct work_struct, entry); | ||
338 | list_del_init(&work->entry); | ||
339 | |||
340 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
341 | |||
342 | // do the work! | ||
343 | work_clear_pending(work); | ||
344 | f = work->func; | ||
345 | f(work); /* can't touch 'work' after this point, | ||
346 | the user may have free'd it. */ | ||
347 | } | ||
348 | |||
349 | |||
304 | static void do_litirq(struct klitirqd_info* which) | 350 | static void do_litirq(struct klitirqd_info* which) |
305 | { | 351 | { |
306 | u32 pending; | 352 | u32 pending; |
@@ -351,6 +397,15 @@ static void do_litirq(struct klitirqd_info* which) | |||
351 | do_lit_tasklet(which, &which->pending_tasklets); | 397 | do_lit_tasklet(which, &which->pending_tasklets); |
352 | resched = needs_prio_change(which, &which->pending_tasklets); | 398 | resched = needs_prio_change(which, &which->pending_tasklets); |
353 | } | 399 | } |
400 | |||
401 | if(!resched && (pending & LIT_WORK)) | ||
402 | { | ||
403 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
404 | /* Let's pray this doesn't take a long time to execute and | ||
405 | block tasklets. TODO: TEST THIS POSSIBILITY | ||
406 | */ | ||
407 | do_lit_work(which); | ||
408 | } | ||
354 | } | 409 | } |
355 | } | 410 | } |
356 | 411 | ||
@@ -641,7 +696,7 @@ void __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k | |||
641 | 696 | ||
642 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 697 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) |
643 | { | 698 | { |
644 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | 699 | TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id); |
645 | BUG(); | 700 | BUG(); |
646 | } | 701 | } |
647 | 702 | ||
@@ -659,3 +714,43 @@ void __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k | |||
659 | 714 | ||
660 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | 715 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); |
661 | 716 | ||
717 | |||
718 | static void ___litmus_schedule_work(struct work_struct *w, | ||
719 | struct klitirqd_info *which, | ||
720 | int wakeup) | ||
721 | { | ||
722 | unsigned long flags; | ||
723 | |||
724 | raw_spin_lock_irqsave(&which->lock, flags); | ||
725 | |||
726 | work_pending(w); | ||
727 | list_add_tail(&w->entry, &which->worklist); | ||
728 | |||
729 | which->pending |= LIT_WORK; | ||
730 | |||
731 | if(wakeup) | ||
732 | { | ||
733 | wakeup_litirqd_locked(which); /* wakeup the klitirqd */ | ||
734 | } | ||
735 | |||
736 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
737 | } | ||
738 | |||
739 | void __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | ||
740 | { | ||
741 | if(unlikely(w->owner == NULL) || !is_realtime(w->owner)) | ||
742 | { | ||
743 | TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); | ||
744 | BUG(); | ||
745 | } | ||
746 | |||
747 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
748 | { | ||
749 | TRACE("%s: No klitirqd_th%u!\n", k_id); | ||
750 | BUG(); | ||
751 | } | ||
752 | |||
753 | ___litmus_schedule_work(w, &klitirqds[k_id], 1); | ||
754 | } | ||
755 | |||
756 | EXPORT_SYMBOL(__litmus_schedule_work); | ||