aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-04-16 15:10:09 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-04-16 15:10:09 -0400
commit1a7abf153a44781cedd7f529a70769c769f58029 (patch)
tree4850c532224eb443c87d5dd741e8ad3672ddc0b4
parentb7724735827a2e44bfea8863b66eba3d9c4a8d67 (diff)
DRAIN_SIMPLE_IO: drain on execution or I/O suspend
New budget drain policy drains while task is executing or suspends for a non-litmus-lock (non-real-time, such as for I/O) reason.
-rw-r--r--include/litmus/budget.h14
-rw-r--r--include/litmus/jobs.h2
-rw-r--r--include/litmus/rt_param.h1
-rw-r--r--litmus/budget.c65
-rw-r--r--litmus/sched_cedf.c168
-rw-r--r--litmus/sched_task_trace.c9
6 files changed, 249 insertions, 10 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index b70364e53582..d5d4d61eb06a 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -75,6 +75,18 @@ void simple_on_sleep(struct task_struct* t);
75void simple_on_exit(struct task_struct* t); 75void simple_on_exit(struct task_struct* t);
76 76
77 77
78/* Functions for DRAIN_SIMPLE_IO policy common
79 * to every scheduler. Scheduler must provide
80 * implementation for simple_io_on_exhausted().
81 */
82#define simple_io_on_scheduled simple_on_scheduled
83void simple_io_on_blocked(struct task_struct* t);
84void simple_io_on_wakeup(struct task_struct* t);
85#define simple_io_on_preempt simple_on_preempt
86#define simple_io_on_sleep simple_on_sleep
87#define simple_io_on_exit simple_on_exit
88
89
78/* Functions for DRAIN_SOBLIV policy common 90/* Functions for DRAIN_SOBLIV policy common
79 * to every scheduler. Scheduler must provide 91 * to every scheduler. Scheduler must provide
80 * implementation for sobliv_on_exhausted(). 92 * implementation for sobliv_on_exhausted().
@@ -89,7 +101,7 @@ void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh);
89void sobliv_on_enter_top_m(struct task_struct* t); 101void sobliv_on_enter_top_m(struct task_struct* t);
90void sobliv_on_exit_top_m(struct task_struct* t); 102void sobliv_on_exit_top_m(struct task_struct* t);
91 103
92void sobliv_revaluate_task(struct task_struct* t); 104void reevaluate_inheritance(struct task_struct* t);
93 105
94#define budget_state_machine(t, evt) \ 106#define budget_state_machine(t, evt) \
95 do { \ 107 do { \
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h
index 4f311a4a2a4b..ec73cc6ae7f8 100644
--- a/include/litmus/jobs.h
+++ b/include/litmus/jobs.h
@@ -22,7 +22,7 @@ typedef struct
22#define DECLARE_WORKER_VIS_FLAGS(symb) \ 22#define DECLARE_WORKER_VIS_FLAGS(symb) \
23 worker_visibility_t symb = {0, 0, 0, 0} 23 worker_visibility_t symb = {0, 0, 0, 0}
24#else 24#else
25#define DECLARE_WORKER_VIS_FLAGS(var) \ 25#define DECLARE_WORKER_VIS_FLAGS(symb) \
26 worker_visibility_t symb = {0, 0} 26 worker_visibility_t symb = {0, 0}
27#endif 27#endif
28 28
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 7828117a61a8..c480d95690f8 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -39,6 +39,7 @@ typedef enum {
39 /* all drain mechanisms are ignored if budget enforcement or signalling 39 /* all drain mechanisms are ignored if budget enforcement or signalling
40 is not in use. */ 40 is not in use. */
41 DRAIN_SIMPLE, /* drains while task is linked */ 41 DRAIN_SIMPLE, /* drains while task is linked */
42 DRAIN_SIMPLE_IO, /* drains while task is linked or blocked (not waiting for Litmus lock) */
42 DRAIN_SAWARE, /* drains according to suspension-aware analysis */ 43 DRAIN_SAWARE, /* drains according to suspension-aware analysis */
43 DRAIN_SOBLIV, /* drains according to suspension-obliv analysis */ 44 DRAIN_SOBLIV, /* drains according to suspension-obliv analysis */
44} budget_drain_policy_t; 45} budget_drain_policy_t;
diff --git a/litmus/budget.c b/litmus/budget.c
index 4b464c4d8507..9ae530ad3b2b 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -157,6 +157,68 @@ void simple_on_exit(struct task_struct* t)
157} 157}
158 158
159/* 159/*
160 * DRAIN_SIMPLE_IO
161 */
162
163void simple_io_on_blocked(struct task_struct* t)
164{
165 // hiding is turned on by locking protocols, so if there isn't any
166 // hiding, then we're blocking for some other reason. assume it's I/O.
167
168 int for_io = !tsk_rt(t)->blocked_lock || (0
169#ifdef CONFIG_REALTIME_AUX_TASKS
170 || (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks)
171#endif
172#ifdef CONFIG_LITMUS_NVIDIA
173 || (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu)
174#endif
175 );
176
177 /* we drain budget for io-based suspensions */
178 if (for_io) {
179 /* there is a fraction of time where we're double-counting the
180 * time tracked by the rq and suspension time.
181 * TODO: Do this recording closer to suspension time. */
182 tsk_rt(t)->budget.suspend_timestamp = litmus_clock();
183
184 TRACE_TASK(t, "blocking for I/O.\n");
185
186 if (!tsk_rt(t)->budget.timer.armed) {
187 bt_flag_clear(t, BTF_BUDGET_EXHAUSTED);
188
189 if (likely(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) {
190 TRACE_TASK(t, "budget timer not armed. "
191 "Raced with exhaustion-resched? Re-arming.\n");
192 arm_enforcement_timer(t, 1);
193 }
194 else {
195 TRACE_TASK(t, "not arming timer because task is waiting "
196 "for release.\n");
197 }
198 }
199 }
200 else {
201 TRACE_TASK(t, "blocking for litmus lock. stop draining.\n");
202 simple_on_blocked(t);
203 }
204}
205
206void simple_io_on_wakeup(struct task_struct* t)
207{
208 /* we're waking up from an io-based suspension */
209 if (tsk_rt(t)->budget.suspend_timestamp) {
210 lt_t suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp;
211 tsk_rt(t)->budget.suspend_timestamp = 0;
212 TRACE_TASK(t, "budget consumed while io-suspended: %llu\n", suspend_cost);
213 get_exec_time(t) += suspend_cost;
214 }
215 else {
216 TRACE_TASK(t, "waking from non-io blocking\n");
217 }
218}
219
220
221/*
160 * DRAIN_SOBLIV 222 * DRAIN_SOBLIV
161 */ 223 */
162 224
@@ -208,6 +270,7 @@ void sobliv_on_wakeup(struct task_struct* t)
208 * blocked. */ 270 * blocked. */
209 if (tsk_rt(t)->budget.suspend_timestamp) { 271 if (tsk_rt(t)->budget.suspend_timestamp) {
210 lt_t suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp; 272 lt_t suspend_cost = litmus_clock() - tsk_rt(t)->budget.suspend_timestamp;
273 tsk_rt(t)->budget.suspend_timestamp = 0;
211 TRACE_TASK(t, "budget consumed while suspended: %llu\n", suspend_cost); 274 TRACE_TASK(t, "budget consumed while suspended: %llu\n", suspend_cost);
212 get_exec_time(t) += suspend_cost; 275 get_exec_time(t) += suspend_cost;
213 } 276 }
@@ -290,7 +353,7 @@ void sobliv_on_exit_top_m(struct task_struct* t)
290} 353}
291 354
292 355
293void sobliv_revaluate_task(struct task_struct* t) 356void reevaluate_inheritance(struct task_struct* t)
294{ 357{
295#ifdef CONFIG_LITMUS_NESTED_LOCKING 358#ifdef CONFIG_LITMUS_NESTED_LOCKING
296 struct litmus_lock *blocked_lock = NULL; 359 struct litmus_lock *blocked_lock = NULL;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index aab30a786515..50eb4d446303 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -703,6 +703,150 @@ static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t)
703} 703}
704 704
705 705
706static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t)
707{
708 enum hrtimer_restart restart = HRTIMER_NORESTART;
709
710 /* t may or may not be scheduled */
711
712 if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
713 /* signal exhaustion */
714
715 /* Tasks should block SIG_BUDGET if they cannot gracefully respond to
716 * the signal while suspended. SIG_BUDGET is an rt-signal, so it will
717 * be queued and received when SIG_BUDGET is unblocked */
718 send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
719 }
720
721 if (budget_enforced(t) && !bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) {
722 int cpu = (tsk_rt(t)->linked_on != NO_CPU) ?
723 tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on;
724
725 if (is_np(t) && is_user_np(t)) {
726 TRACE_TASK(t, "is non-preemptable, preemption delayed.\n");
727 bt_flag_set(t, BTF_BUDGET_EXHAUSTED);
728 request_exit_np(t);
729 }
730 /* where do we need to call resched? */
731 else if (cpu == smp_processor_id()) {
732 TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n");
733 bt_flag_set(t, BTF_BUDGET_EXHAUSTED);
734 litmus_reschedule_local();
735 set_will_schedule();
736 }
737 else if (cpu != NO_CPU) {
738 TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu);
739 bt_flag_set(t, BTF_BUDGET_EXHAUSTED);
740 litmus_reschedule(cpu);
741 }
742 else if (unlikely(tsk_rt(t)->blocked_lock)) {
743 /* we shouldn't be draining while waiting for litmus lock, but we
744 * could have raced with the budget timer (?). */
745 WARN_ON(1);
746 }
747 else {
748 lt_t remaining;
749 cedf_domain_t *cluster;
750 unsigned long flags;
751
752 cluster = task_cpu_cluster(t);
753
754 // 1) refresh budget through job completion
755 // 2) if holds locks, tell the locking protocol to re-eval priority
756 // 3) -- the LP must undo any inheritance relations if appropriate
757
758 /* force job completion */
759 TRACE_TASK(t, "blocked, postponing deadline\n");
760
761 /* Outermost lock of the cluster. Recursive lock calls are
762 * possible on this code path. This should be the _ONLY_
763 * scenario where recursive calls are made. */
764#ifdef CONFIG_LITMUS_DGL_SUPPORT
765 /* Unfortunately, we _might_ need to grab the DGL lock, so we
766 * must grab it every time since it must be take before the
767 * cluster lock. */
768 raw_spin_lock_irqsave(&cluster->dgl_lock, flags);
769 raw_readyq_lock(&cluster->cluster_lock);
770#else
771 raw_readyq_lock_irqsave(&cluster->cluster_lock, flags);
772#endif
773
774 job_completion(t, 1); /* refreshes budget and pushes out deadline */
775
776#ifdef CONFIG_LITMUS_LOCKING
777 {
778 int i;
779 /* any linked task that inherits from 't' needs to have their
780 * cpu-position re-evaluated. we have to do this in two passes.
781 * pass 1: remove nodes from heap s.t. heap is in known good state.
782 * pass 2: re-add nodes.
783 *
784 */
785 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots));
786 i < BITS_PER_LONG;
787 i = find_next_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots), i+1))
788 {
789 struct task_struct *to_update = tsk_rt(t)->inh_task_linkbacks[i];
790 BUG_ON(!to_update);
791 if (tsk_rt(to_update)->linked_on != NO_CPU) {
792 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, tsk_rt(to_update)->linked_on);
793 BUG_ON(!binheap_is_in_heap(&entry->hn));
794 binheap_delete(&entry->hn, &cluster->cpu_heap);
795 }
796 }
797 for (i = find_first_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots));
798 i < BITS_PER_LONG;
799 i = find_next_bit(&tsk_rt(t)->used_linkback_slots, BITS_PER_BYTE*sizeof(&tsk_rt(t)->used_linkback_slots), i+1))
800 {
801 struct task_struct *to_update = tsk_rt(t)->inh_task_linkbacks[i];
802 BUG_ON(!to_update);
803 if (tsk_rt(to_update)->linked_on != NO_CPU) {
804 cpu_entry_t *entry = &per_cpu(cedf_cpu_entries, tsk_rt(to_update)->linked_on);
805 binheap_add(&entry->hn, &cluster->cpu_heap, cpu_entry_t, hn);
806 }
807 }
808 }
809
810 /* Check our inheritance and propagate any changes forward. */
811 reevaluate_inheritance(t);
812#endif
813 /* No need to recheck priority of AUX tasks. They will always
814 * inherit from 't' if they are enabled. Their prio change was
815 * captured by the cpu-heap operations above. */
816
817#ifdef CONFIG_LITMUS_NVIDIA
818 /* Re-eval priority of GPU interrupt threads. */
819 if(tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu)
820 gpu_owner_decrease_priority(t);
821#endif
822
823#ifdef CONFIG_LITMUS_LOCKING
824 /* double-check that everything is okay */
825 check_for_preemptions(cluster);
826#endif
827
828 /* should be the outermost unlock call */
829#ifdef CONFIG_LITMUS_DGL_SUPPORT
830 raw_readyq_unlock(&cluster->cluster_lock);
831 raw_spin_unlock_irqrestore(&cluster->dgl_lock, flags);
832#else
833 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
834#endif
835
836 /* we need to set up the budget timer since we're within the callback. */
837 hrtimer_forward_now(&get_budget_timer(t).timer.timer,
838 ns_to_ktime(budget_remaining(t)));
839 remaining = hrtimer_get_expires_ns(&get_budget_timer(t).timer.timer);
840
841 TRACE_TASK(t, "rearmed timer to %ld\n", remaining);
842 restart = HRTIMER_RESTART;
843 }
844 }
845
846 return restart;
847}
848
849
706#ifdef CONFIG_LITMUS_LOCKING 850#ifdef CONFIG_LITMUS_LOCKING
707static void __cedf_trigger_vunlock(struct task_struct *t) 851static void __cedf_trigger_vunlock(struct task_struct *t)
708{ 852{
@@ -841,7 +985,7 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
841 } 985 }
842 986
843 /* Check our inheritance and propagate any changes forward. */ 987 /* Check our inheritance and propagate any changes forward. */
844 sobliv_revaluate_task(t); 988 reevaluate_inheritance(t);
845 989
846 if (tsk_rt(t)->outermost_lock && tsk_rt(t)->outermost_lock->ops->is_omlp_family) 990 if (tsk_rt(t)->outermost_lock && tsk_rt(t)->outermost_lock->ops->is_omlp_family)
847 __cedf_trigger_vunlock(t); 991 __cedf_trigger_vunlock(t);
@@ -854,7 +998,6 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t)
854 /* Re-eval priority of GPU interrupt threads. */ 998 /* Re-eval priority of GPU interrupt threads. */
855 if(tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) 999 if(tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu)
856 gpu_owner_decrease_priority(t); 1000 gpu_owner_decrease_priority(t);
857// recheck_gpu_owner(t);
858#endif 1001#endif
859 1002
860#ifdef CONFIG_LITMUS_LOCKING 1003#ifdef CONFIG_LITMUS_LOCKING
@@ -1647,6 +1790,23 @@ static struct budget_tracker_ops cedf_drain_simple_ops =
1647 .on_exhausted = cedf_simple_on_exhausted, 1790 .on_exhausted = cedf_simple_on_exhausted,
1648}; 1791};
1649 1792
1793static struct budget_tracker_ops cedf_drain_simple_io_ops =
1794{
1795 .on_scheduled = simple_io_on_scheduled,
1796 .on_blocked = simple_io_on_blocked,
1797 .on_preempt = simple_io_on_preempt,
1798 .on_sleep = simple_io_on_sleep,
1799 .on_exit = simple_io_on_exit,
1800
1801 .on_wakeup = simple_io_on_wakeup,
1802 .on_inherit = NULL,
1803 .on_disinherit = NULL,
1804 .on_enter_top_m = NULL,
1805 .on_exit_top_m = NULL,
1806
1807 .on_exhausted = cedf_simple_io_on_exhausted,
1808};
1809
1650static struct budget_tracker_ops cedf_drain_sobliv_ops = 1810static struct budget_tracker_ops cedf_drain_sobliv_ops =
1651{ 1811{
1652 .on_scheduled = NULL, 1812 .on_scheduled = NULL,
@@ -1678,6 +1838,9 @@ static long cedf_admit_task(struct task_struct* tsk)
1678 case DRAIN_SIMPLE: 1838 case DRAIN_SIMPLE:
1679 ops = &cedf_drain_simple_ops; 1839 ops = &cedf_drain_simple_ops;
1680 break; 1840 break;
1841 case DRAIN_SIMPLE_IO:
1842 ops = &cedf_drain_simple_io_ops;
1843 break;
1681 case DRAIN_SOBLIV: 1844 case DRAIN_SOBLIV:
1682 /* budget_policy and budget_signal_policy cannot be quantum-based */ 1845 /* budget_policy and budget_signal_policy cannot be quantum-based */
1683 if (!budget_quantum_tracked(tsk) && budget_precisely_tracked(tsk)) { 1846 if (!budget_quantum_tracked(tsk) && budget_precisely_tracked(tsk)) {
@@ -1695,6 +1858,7 @@ static long cedf_admit_task(struct task_struct* tsk)
1695 } 1858 }
1696 } 1859 }
1697 1860
1861 /* always init the budget tracker, even if we're not using timers */
1698 init_budget_tracker(&tsk_rt(tsk)->budget, ops); 1862 init_budget_tracker(&tsk_rt(tsk)->budget, ops);
1699 1863
1700#ifdef CONFIG_LITMUS_NESTED_LOCKING 1864#ifdef CONFIG_LITMUS_NESTED_LOCKING
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index af318058f379..931028fcf4a4 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -211,15 +211,14 @@ feather_callback void do_sched_trace_task_block(unsigned long id,
211 211
212 // hiding is turned on by locking protocols, so if there isn't any 212 // hiding is turned on by locking protocols, so if there isn't any
213 // hiding, then we're blocking for some other reason. assume it's I/O. 213 // hiding, then we're blocking for some other reason. assume it's I/O.
214 rec->data.block.for_io = 0 214 rec->data.block.for_io = !tsk_rt(t)->blocked_lock || (0
215#ifdef CONFIG_REALTIME_AUX_TASKS 215#ifdef CONFIG_REALTIME_AUX_TASKS
216 || (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) 216 || (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks)
217#endif 217#endif
218#ifdef CONFIG_LITMUS_NVIDIA 218#ifdef CONFIG_LITMUS_NVIDIA
219 || (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) 219 || (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu)
220#endif 220#endif
221 ; 221 );
222
223 put_record(rec); 222 put_record(rec);
224 } 223 }
225} 224}