diff options
Diffstat (limited to 'litmus/litmus_softirq.c')
-rw-r--r-- | litmus/litmus_softirq.c | 151 |
1 files changed, 123 insertions, 28 deletions
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c index dbefcb560063..734a7dcdc9ca 100644 --- a/litmus/litmus_softirq.c +++ b/litmus/litmus_softirq.c | |||
@@ -21,7 +21,8 @@ static atomic_t num_ready_klitirqds = ATOMIC_INIT(0); | |||
21 | enum pending_flags | 21 | enum pending_flags |
22 | { | 22 | { |
23 | LIT_TASKLET_LOW = 0x1, | 23 | LIT_TASKLET_LOW = 0x1, |
24 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1 | 24 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1, |
25 | LIT_WORK = LIT_TASKLET_HI<<1 | ||
25 | }; | 26 | }; |
26 | 27 | ||
27 | /* only support tasklet processing for now. */ | 28 | /* only support tasklet processing for now. */ |
@@ -37,8 +38,11 @@ struct klitirqd_info | |||
37 | raw_spinlock_t lock; | 38 | raw_spinlock_t lock; |
38 | 39 | ||
39 | u32 pending; | 40 | u32 pending; |
41 | |||
42 | /* in order of priority */ | ||
43 | struct tasklet_head pending_tasklets_hi; | ||
40 | struct tasklet_head pending_tasklets; | 44 | struct tasklet_head pending_tasklets; |
41 | struct tasklet_head pending_tasklets_hi; | 45 | struct list_head worklist; |
42 | }; | 46 | }; |
43 | 47 | ||
44 | /* one list for each klitirqd */ | 48 | /* one list for each klitirqd */ |
@@ -66,18 +70,22 @@ inline unsigned int klitirqd_id(struct task_struct* tsk) | |||
66 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) | 70 | inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) |
67 | { | 71 | { |
68 | return (which->pending & LIT_TASKLET_HI); | 72 | return (which->pending & LIT_TASKLET_HI); |
69 | }; | 73 | } |
70 | 74 | ||
71 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) | 75 | inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) |
72 | { | 76 | { |
73 | return (which->pending & LIT_TASKLET_LOW); | 77 | return (which->pending & LIT_TASKLET_LOW); |
74 | }; | 78 | } |
75 | 79 | ||
80 | inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which) | ||
81 | { | ||
82 | return (which->pending & LIT_WORK); | ||
83 | } | ||
76 | 84 | ||
77 | inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) | 85 | inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) |
78 | { | 86 | { |
79 | return(which->pending); | 87 | return(which->pending); |
80 | }; | 88 | } |
81 | 89 | ||
82 | 90 | ||
83 | inline static u32 litirq_pending(struct klitirqd_info* which) | 91 | inline static u32 litirq_pending(struct klitirqd_info* which) |
@@ -93,9 +101,15 @@ inline static u32 litirq_pending(struct klitirqd_info* which) | |||
93 | }; | 101 | }; |
94 | 102 | ||
95 | 103 | ||
104 | /* returns true if thread needs to change priority to continue processing | ||
105 | pending tasklets. | ||
106 | */ | ||
96 | static int needs_prio_change(struct klitirqd_info* which, | 107 | static int needs_prio_change(struct klitirqd_info* which, |
97 | struct tasklet_head* pending_tasklets) | 108 | struct tasklet_head* pending_tasklets) |
98 | { | 109 | { |
110 | /* this function doesn't have to look at work objects since they have | ||
111 | priority below tasklets. */ | ||
112 | |||
99 | unsigned long flags; | 113 | unsigned long flags; |
100 | int ret = 0; | 114 | int ret = 0; |
101 | 115 | ||
@@ -126,33 +140,41 @@ static int needs_prio_change(struct klitirqd_info* which, | |||
126 | 140 | ||
127 | static void __reeval_prio(struct klitirqd_info* which) | 141 | static void __reeval_prio(struct klitirqd_info* which) |
128 | { | 142 | { |
129 | u32 pending = 0; | ||
130 | struct task_struct* tsk = which->klitirqd; | 143 | struct task_struct* tsk = which->klitirqd; |
131 | struct task_struct* new_prio = tsk_rt(tsk)->inh_task; | 144 | struct task_struct* new_prio = tsk_rt(tsk)->inh_task; |
132 | 145 | ||
133 | if(litirq_pending_irqoff(which)) | 146 | /* Check in prio-order */ |
134 | { | 147 | u32 pending = litirq_pending_irqoff(which); |
135 | pending = 1; | 148 | if(pending & LIT_TASKLET_HI) |
136 | if(which->pending_tasklets.head->owner != tsk_rt(tsk)->inh_task) | 149 | { |
137 | { | 150 | if(which->pending_tasklets_hi.head->owner != tsk_rt(tsk)->inh_task) |
138 | new_prio = which->pending_tasklets.head->owner; | 151 | { |
139 | } | 152 | new_prio = which->pending_tasklets_hi.head->owner; |
140 | } | 153 | } |
141 | 154 | } | |
142 | if(litirq_pending_hi_irqoff(which)) | 155 | else if(pending & LIT_TASKLET_LOW) |
143 | { | 156 | { |
144 | pending = 1; | 157 | if(which->pending_tasklets.head->owner != tsk_rt(tsk)->inh_task) |
145 | if(which->pending_tasklets_hi.head->owner != tsk_rt(tsk)->inh_task) | 158 | { |
146 | { | 159 | new_prio = which->pending_tasklets.head->owner; |
147 | new_prio = which->pending_tasklets_hi.head->owner; | 160 | } |
148 | } | 161 | } |
149 | } | 162 | else if(pending & LIT_WORK) |
150 | 163 | { | |
151 | if(!pending) | 164 | struct work_struct* work = |
165 | list_entry(&which->worklist, struct work_struct, entry); | ||
166 | if(work->owner != tsk_rt(tsk)->inh_task) | ||
167 | { | ||
168 | new_prio = work->owner; | ||
169 | } | ||
170 | } | ||
171 | else | ||
152 | { | 172 | { |
153 | new_prio = NULL; | 173 | new_prio = NULL; |
154 | } | 174 | } |
155 | 175 | ||
176 | |||
177 | /* inherit the proper priority */ | ||
156 | if(new_prio != tsk_rt(tsk)->inh_task) | 178 | if(new_prio != tsk_rt(tsk)->inh_task) |
157 | { | 179 | { |
158 | if(new_prio != NULL) | 180 | if(new_prio != NULL) |
@@ -301,6 +323,30 @@ static void do_lit_tasklet(struct klitirqd_info* which, | |||
301 | } | 323 | } |
302 | } | 324 | } |
303 | 325 | ||
326 | static void do_lit_work(struct klitirqd_info* which) | ||
327 | { | ||
328 | unsigned long flags; | ||
329 | work_func_t f; | ||
330 | struct work_struct* work; | ||
331 | |||
332 | // only execute one work-queue item to yield to tasklets. | ||
333 | // ...is this a good idea? | ||
334 | |||
335 | raw_spin_lock_irqsave(&which->lock, flags); | ||
336 | |||
337 | work = list_entry(&which->worklist, struct work_struct, entry); | ||
338 | list_del_init(&work->entry); | ||
339 | |||
340 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
341 | |||
342 | // do the work! | ||
343 | work_clear_pending(work); | ||
344 | f = work->func; | ||
345 | f(work); /* can't touch 'work' after this point, | ||
346 | the user may have free'd it. */ | ||
347 | } | ||
348 | |||
349 | |||
304 | static void do_litirq(struct klitirqd_info* which) | 350 | static void do_litirq(struct klitirqd_info* which) |
305 | { | 351 | { |
306 | u32 pending; | 352 | u32 pending; |
@@ -351,6 +397,15 @@ static void do_litirq(struct klitirqd_info* which) | |||
351 | do_lit_tasklet(which, &which->pending_tasklets); | 397 | do_lit_tasklet(which, &which->pending_tasklets); |
352 | resched = needs_prio_change(which, &which->pending_tasklets); | 398 | resched = needs_prio_change(which, &which->pending_tasklets); |
353 | } | 399 | } |
400 | |||
401 | if(!resched && (pending & LIT_WORK)) | ||
402 | { | ||
403 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
404 | /* Let's pray this doesn't take a long time to execute and | ||
405 | block tasklets. TODO: TEST THIS POSSIBILITY | ||
406 | */ | ||
407 | do_lit_work(which); | ||
408 | } | ||
354 | } | 409 | } |
355 | } | 410 | } |
356 | 411 | ||
@@ -641,7 +696,7 @@ void __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k | |||
641 | 696 | ||
642 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 697 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) |
643 | { | 698 | { |
644 | TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); | 699 | TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id); |
645 | BUG(); | 700 | BUG(); |
646 | } | 701 | } |
647 | 702 | ||
@@ -659,3 +714,43 @@ void __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k | |||
659 | 714 | ||
660 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | 715 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); |
661 | 716 | ||
717 | |||
718 | static void ___litmus_schedule_work(struct work_struct *w, | ||
719 | struct klitirqd_info *which, | ||
720 | int wakeup) | ||
721 | { | ||
722 | unsigned long flags; | ||
723 | |||
724 | raw_spin_lock_irqsave(&which->lock, flags); | ||
725 | |||
726 | work_pending(w); | ||
727 | list_add_tail(&w->entry, &which->worklist); | ||
728 | |||
729 | which->pending |= LIT_WORK; | ||
730 | |||
731 | if(wakeup) | ||
732 | { | ||
733 | wakeup_litirqd_locked(which); /* wakeup the klitirqd */ | ||
734 | } | ||
735 | |||
736 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
737 | } | ||
738 | |||
739 | void __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | ||
740 | { | ||
741 | if(unlikely(w->owner == NULL) || !is_realtime(w->owner)) | ||
742 | { | ||
743 | TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); | ||
744 | BUG(); | ||
745 | } | ||
746 | |||
747 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
748 | { | ||
749 | TRACE("%s: No klitirqd_th%u!\n", k_id); | ||
750 | BUG(); | ||
751 | } | ||
752 | |||
753 | ___litmus_schedule_work(w, &klitirqds[k_id], 1); | ||
754 | } | ||
755 | |||
756 | EXPORT_SYMBOL(__litmus_schedule_work); | ||