diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-04-05 17:19:15 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-04-05 18:12:00 -0400 |
commit | 472e2944b12226d9d4407fb702ce98bba76b1b7d (patch) | |
tree | f0f3ff3def3204cf47063f25ce837a0aee4faf7e | |
parent | bf7f892f22a6a6804f09168256226cc6c2bc230c (diff) |
CONFIG: Allow ready queue lock to be recurisve.
Adds a configuration option to allow the ready queue
to be a recursive (raw) spinlock. This is useful
in implementing inheritance from nested locking
and budget enforcement actions.
-rw-r--r-- | include/litmus/rt_domain.h | 39 | ||||
-rw-r--r-- | litmus/Kconfig | 9 | ||||
-rw-r--r-- | litmus/rt_domain.c | 2 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 5 | ||||
-rw-r--r-- | litmus/sched_pfair.c | 39 |
5 files changed, 69 insertions, 25 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index 6d3c4672dca9..182314afac7c 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -7,6 +7,10 @@ | |||
7 | 7 | ||
8 | #include <litmus/bheap.h> | 8 | #include <litmus/bheap.h> |
9 | 9 | ||
10 | #ifdef CONFIG_RECURSIVE_READYQ_LOCK | ||
11 | #include <litmus/rspinlock.h> | ||
12 | #endif | ||
13 | |||
10 | #define RELEASE_QUEUE_SLOTS 127 /* prime */ | 14 | #define RELEASE_QUEUE_SLOTS 127 /* prime */ |
11 | 15 | ||
12 | struct _rt_domain; | 16 | struct _rt_domain; |
@@ -20,9 +24,25 @@ struct release_queue { | |||
20 | struct list_head slot[RELEASE_QUEUE_SLOTS]; | 24 | struct list_head slot[RELEASE_QUEUE_SLOTS]; |
21 | }; | 25 | }; |
22 | 26 | ||
27 | #ifdef CONFIG_RECURSIVE_READYQ_LOCK | ||
28 | #define raw_readyq_spinlock_t raw_rspinlock_t | ||
29 | #define raw_readyq_lock_irqsave raw_rspin_lock_irqsave | ||
30 | #define raw_readyq_lock raw_rspin_lock | ||
31 | #define raw_readyq_unlock_irqrestore raw_rspin_unlock_irqrestore | ||
32 | #define raw_readyq_unlock raw_rspin_unlock | ||
33 | #define raw_readyq_lock_init raw_rspin_lock_init | ||
34 | #else | ||
35 | #define raw_readyq_spinlock_t raw_spinlock_t | ||
36 | #define raw_readyq_lock_irqsave raw_spin_lock_irqsave | ||
37 | #define raw_readyq_lock raw_spin_lock | ||
38 | #define raw_readyq_unlock_irqrestore raw_spin_unlock_irqrestore | ||
39 | #define raw_readyq_unlock raw_spin_unlock | ||
40 | #define raw_readyq_lock_init raw_spin_lock_init | ||
41 | #endif | ||
42 | |||
23 | typedef struct _rt_domain { | 43 | typedef struct _rt_domain { |
24 | /* runnable rt tasks are in here */ | 44 | /* runnable rt tasks are in here */ |
25 | raw_spinlock_t ready_lock; | 45 | raw_readyq_spinlock_t ready_lock; |
26 | struct bheap ready_queue; | 46 | struct bheap ready_queue; |
27 | 47 | ||
28 | /* real-time tasks waiting for release are in here */ | 48 | /* real-time tasks waiting for release are in here */ |
@@ -65,6 +85,7 @@ struct release_heap { | |||
65 | }; | 85 | }; |
66 | 86 | ||
67 | 87 | ||
88 | |||
68 | static inline struct task_struct* __next_ready(rt_domain_t* rt) | 89 | static inline struct task_struct* __next_ready(rt_domain_t* rt) |
69 | { | 90 | { |
70 | struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); | 91 | struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); |
@@ -125,17 +146,17 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | |||
125 | { | 146 | { |
126 | unsigned long flags; | 147 | unsigned long flags; |
127 | /* first we need the write lock for rt_ready_queue */ | 148 | /* first we need the write lock for rt_ready_queue */ |
128 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | 149 | raw_readyq_lock_irqsave(&rt->ready_lock, flags); |
129 | __add_ready(rt, new); | 150 | __add_ready(rt, new); |
130 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | 151 | raw_readyq_unlock_irqrestore(&rt->ready_lock, flags); |
131 | } | 152 | } |
132 | 153 | ||
133 | static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) | 154 | static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) |
134 | { | 155 | { |
135 | unsigned long flags; | 156 | unsigned long flags; |
136 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | 157 | raw_readyq_lock_irqsave(&rt->ready_lock, flags); |
137 | __merge_ready(rt, tasks); | 158 | __merge_ready(rt, tasks); |
138 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | 159 | raw_readyq_unlock_irqrestore(&rt->ready_lock, flags); |
139 | } | 160 | } |
140 | 161 | ||
141 | static inline struct task_struct* take_ready(rt_domain_t* rt) | 162 | static inline struct task_struct* take_ready(rt_domain_t* rt) |
@@ -143,9 +164,9 @@ static inline struct task_struct* take_ready(rt_domain_t* rt) | |||
143 | unsigned long flags; | 164 | unsigned long flags; |
144 | struct task_struct* ret; | 165 | struct task_struct* ret; |
145 | /* first we need the write lock for rt_ready_queue */ | 166 | /* first we need the write lock for rt_ready_queue */ |
146 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | 167 | raw_readyq_lock_irqsave(&rt->ready_lock, flags); |
147 | ret = __take_ready(rt); | 168 | ret = __take_ready(rt); |
148 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | 169 | raw_readyq_unlock_irqrestore(&rt->ready_lock, flags); |
149 | return ret; | 170 | return ret; |
150 | } | 171 | } |
151 | 172 | ||
@@ -183,9 +204,9 @@ static inline int jobs_pending(rt_domain_t* rt) | |||
183 | unsigned long flags; | 204 | unsigned long flags; |
184 | int ret; | 205 | int ret; |
185 | /* first we need the write lock for rt_ready_queue */ | 206 | /* first we need the write lock for rt_ready_queue */ |
186 | raw_spin_lock_irqsave(&rt->ready_lock, flags); | 207 | raw_readyq_lock_irqsave(&rt->ready_lock, flags); |
187 | ret = !bheap_empty(&rt->ready_queue); | 208 | ret = !bheap_empty(&rt->ready_queue); |
188 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | 209 | raw_readyq_unlock_irqrestore(&rt->ready_lock, flags); |
189 | return ret; | 210 | return ret; |
190 | } | 211 | } |
191 | 212 | ||
diff --git a/litmus/Kconfig b/litmus/Kconfig index 437ce82f97e9..a1a6cc699348 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -12,6 +12,15 @@ config PLUGIN_CEDF | |||
12 | On smaller platforms (e.g., ARM PB11MPCore), using C-EDF | 12 | On smaller platforms (e.g., ARM PB11MPCore), using C-EDF |
13 | makes little sense since there aren't any shared caches. | 13 | makes little sense since there aren't any shared caches. |
14 | 14 | ||
15 | config RECURSIVE_READYQ_LOCK | ||
16 | bool "Recursive Ready Queue Lock" | ||
17 | default n | ||
18 | help | ||
19 | Protects ready queues with a raw recursive spinlock instead | ||
20 | of a normal raw spinlock. | ||
21 | |||
22 | If unsure, say No. | ||
23 | |||
15 | config PLUGIN_PFAIR | 24 | config PLUGIN_PFAIR |
16 | bool "PFAIR" | 25 | bool "PFAIR" |
17 | depends on HIGH_RES_TIMERS && !NO_HZ | 26 | depends on HIGH_RES_TIMERS && !NO_HZ |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 4528c4362ecf..abf54ad5c2f2 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -286,7 +286,7 @@ void rt_domain_init(rt_domain_t *rt, | |||
286 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | 286 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) |
287 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); | 287 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); |
288 | 288 | ||
289 | raw_spin_lock_init(&rt->ready_lock); | 289 | raw_readyq_lock_init(&rt->ready_lock); |
290 | raw_spin_lock_init(&rt->release_lock); | 290 | raw_spin_lock_init(&rt->release_lock); |
291 | raw_spin_lock_init(&rt->tobe_lock); | 291 | raw_spin_lock_init(&rt->tobe_lock); |
292 | 292 | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 2837f12953ea..e8de5dd18b7f 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -2288,7 +2288,10 @@ UNSUPPORTED_AFF_OBS: | |||
2288 | return err; | 2288 | return err; |
2289 | } | 2289 | } |
2290 | #endif | 2290 | #endif |
2291 | #endif // CONFIG_LITMUS_AFFINITY_LOCKING | 2291 | |
2292 | |||
2293 | |||
2294 | #endif // CONFIG_LITMUS_NESTED_LOCKING | ||
2292 | 2295 | ||
2293 | 2296 | ||
2294 | #ifdef VERBOSE_INIT | 2297 | #ifdef VERBOSE_INIT |
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index c06326faf9ce..707e6b6f2483 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -120,13 +120,13 @@ static inline struct pfair_cluster* from_domain(rt_domain_t* rt) | |||
120 | return container_of(rt, struct pfair_cluster, pfair); | 120 | return container_of(rt, struct pfair_cluster, pfair); |
121 | } | 121 | } |
122 | 122 | ||
123 | static inline raw_spinlock_t* cluster_lock(struct pfair_cluster* cluster) | 123 | static inline raw_readyq_spinlock_t* cluster_lock(struct pfair_cluster* cluster) |
124 | { | 124 | { |
125 | /* The ready_lock is used to serialize all scheduling events. */ | 125 | /* The ready_lock is used to serialize all scheduling events. */ |
126 | return &cluster->pfair.ready_lock; | 126 | return &cluster->pfair.ready_lock; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline raw_spinlock_t* cpu_lock(struct pfair_state* state) | 129 | static inline raw_readyq_spinlock_t* cpu_lock(struct pfair_state* state) |
130 | { | 130 | { |
131 | return cluster_lock(cpu_cluster(state)); | 131 | return cluster_lock(cpu_cluster(state)); |
132 | } | 132 | } |
@@ -452,11 +452,12 @@ static void schedule_next_quantum(struct pfair_cluster *cluster, quanta_t time) | |||
452 | { | 452 | { |
453 | struct pfair_state *cpu; | 453 | struct pfair_state *cpu; |
454 | struct list_head* pos; | 454 | struct list_head* pos; |
455 | raw_readyq_spinlock_t* readyq_lock = cluster_lock(cluster); | ||
455 | 456 | ||
456 | /* called with interrupts disabled */ | 457 | /* called with interrupts disabled */ |
457 | PTRACE("--- Q %lu at %llu PRE-SPIN\n", | 458 | PTRACE("--- Q %lu at %llu PRE-SPIN\n", |
458 | time, litmus_clock()); | 459 | time, litmus_clock()); |
459 | raw_spin_lock(cluster_lock(cluster)); | 460 | raw_readyq_lock(readyq_lock); |
460 | PTRACE("<<< Q %lu at %llu\n", | 461 | PTRACE("<<< Q %lu at %llu\n", |
461 | time, litmus_clock()); | 462 | time, litmus_clock()); |
462 | 463 | ||
@@ -491,7 +492,7 @@ static void schedule_next_quantum(struct pfair_cluster *cluster, quanta_t time) | |||
491 | } | 492 | } |
492 | PTRACE(">>> Q %lu at %llu\n", | 493 | PTRACE(">>> Q %lu at %llu\n", |
493 | time, litmus_clock()); | 494 | time, litmus_clock()); |
494 | raw_spin_unlock(cluster_lock(cluster)); | 495 | raw_readyq_unlock(readyq_lock); |
495 | } | 496 | } |
496 | 497 | ||
497 | static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) | 498 | static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) |
@@ -607,6 +608,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
607 | struct pfair_cluster* cluster = cpu_cluster(state); | 608 | struct pfair_cluster* cluster = cpu_cluster(state); |
608 | int blocks, completion, out_of_time; | 609 | int blocks, completion, out_of_time; |
609 | struct task_struct* next = NULL; | 610 | struct task_struct* next = NULL; |
611 | raw_readyq_spinlock_t* readyq_lock; | ||
610 | 612 | ||
611 | #ifdef CONFIG_RELEASE_MASTER | 613 | #ifdef CONFIG_RELEASE_MASTER |
612 | /* Bail out early if we are the release master. | 614 | /* Bail out early if we are the release master. |
@@ -618,7 +620,8 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
618 | } | 620 | } |
619 | #endif | 621 | #endif |
620 | 622 | ||
621 | raw_spin_lock(cpu_lock(state)); | 623 | readyq_lock = cpu_lock(state); |
624 | raw_readyq_lock(readyq_lock); | ||
622 | 625 | ||
623 | blocks = is_realtime(prev) && !is_running(prev); | 626 | blocks = is_realtime(prev) && !is_running(prev); |
624 | completion = is_realtime(prev) && is_completed(prev); | 627 | completion = is_realtime(prev) && is_completed(prev); |
@@ -650,7 +653,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
650 | tsk_rt(next)->scheduled_on = cpu_id(state); | 653 | tsk_rt(next)->scheduled_on = cpu_id(state); |
651 | } | 654 | } |
652 | sched_state_task_picked(); | 655 | sched_state_task_picked(); |
653 | raw_spin_unlock(cpu_lock(state)); | 656 | raw_readyq_unlock(readyq_lock); |
654 | 657 | ||
655 | if (next) | 658 | if (next) |
656 | TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", | 659 | TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", |
@@ -665,12 +668,14 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) | |||
665 | { | 668 | { |
666 | unsigned long flags; | 669 | unsigned long flags; |
667 | struct pfair_cluster* cluster; | 670 | struct pfair_cluster* cluster; |
671 | raw_readyq_spinlock_t* readyq_lock; | ||
668 | 672 | ||
669 | TRACE("pfair: task new %d state:%d\n", t->pid, t->state); | 673 | TRACE("pfair: task new %d state:%d\n", t->pid, t->state); |
670 | 674 | ||
671 | cluster = tsk_pfair(t)->cluster; | 675 | cluster = tsk_pfair(t)->cluster; |
676 | readyq_lock = cluster_lock(cluster); | ||
672 | 677 | ||
673 | raw_spin_lock_irqsave(cluster_lock(cluster), flags); | 678 | raw_readyq_lock_irqsave(readyq_lock, flags); |
674 | 679 | ||
675 | prepare_release(t, cluster->pfair_time + 1); | 680 | prepare_release(t, cluster->pfair_time + 1); |
676 | 681 | ||
@@ -686,7 +691,7 @@ static void pfair_task_new(struct task_struct * t, int on_rq, int running) | |||
686 | 691 | ||
687 | check_preempt(t); | 692 | check_preempt(t); |
688 | 693 | ||
689 | raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); | 694 | raw_readyq_unlock_irqrestore(readyq_lock, flags); |
690 | } | 695 | } |
691 | 696 | ||
692 | static void pfair_task_wake_up(struct task_struct *t) | 697 | static void pfair_task_wake_up(struct task_struct *t) |
@@ -695,13 +700,15 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
695 | lt_t now; | 700 | lt_t now; |
696 | int requeue = 0; | 701 | int requeue = 0; |
697 | struct pfair_cluster* cluster; | 702 | struct pfair_cluster* cluster; |
703 | raw_readyq_spinlock_t* readyq_lock; | ||
698 | 704 | ||
699 | cluster = tsk_pfair(t)->cluster; | 705 | cluster = tsk_pfair(t)->cluster; |
706 | readyq_lock = cluster_lock(cluster); | ||
700 | 707 | ||
701 | TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", | 708 | TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", |
702 | litmus_clock(), cur_release(t), cluster->pfair_time); | 709 | litmus_clock(), cur_release(t), cluster->pfair_time); |
703 | 710 | ||
704 | raw_spin_lock_irqsave(cluster_lock(cluster), flags); | 711 | raw_readyq_lock_irqsave(readyq_lock, flags); |
705 | 712 | ||
706 | /* If a task blocks and wakes before its next job release, | 713 | /* If a task blocks and wakes before its next job release, |
707 | * then it may resume if it is currently linked somewhere | 714 | * then it may resume if it is currently linked somewhere |
@@ -726,7 +733,7 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
726 | 733 | ||
727 | check_preempt(t); | 734 | check_preempt(t); |
728 | 735 | ||
729 | raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); | 736 | raw_readyq_unlock_irqrestore(readyq_lock, flags); |
730 | TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); | 737 | TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); |
731 | } | 738 | } |
732 | 739 | ||
@@ -741,10 +748,12 @@ static void pfair_task_exit(struct task_struct * t) | |||
741 | { | 748 | { |
742 | unsigned long flags; | 749 | unsigned long flags; |
743 | struct pfair_cluster *cluster; | 750 | struct pfair_cluster *cluster; |
751 | raw_readyq_spinlock_t* readyq_lock; | ||
744 | 752 | ||
745 | BUG_ON(!is_realtime(t)); | 753 | BUG_ON(!is_realtime(t)); |
746 | 754 | ||
747 | cluster = tsk_pfair(t)->cluster; | 755 | cluster = tsk_pfair(t)->cluster; |
756 | readyq_lock = cluster_lock(cluster); | ||
748 | 757 | ||
749 | /* Remote task from release or ready queue, and ensure | 758 | /* Remote task from release or ready queue, and ensure |
750 | * that it is not the scheduled task for ANY CPU. We | 759 | * that it is not the scheduled task for ANY CPU. We |
@@ -753,12 +762,12 @@ static void pfair_task_exit(struct task_struct * t) | |||
753 | * might not be the same as the CPU that the PFAIR scheduler | 762 | * might not be the same as the CPU that the PFAIR scheduler |
754 | * has chosen for it. | 763 | * has chosen for it. |
755 | */ | 764 | */ |
756 | raw_spin_lock_irqsave(cluster_lock(cluster), flags); | 765 | raw_readyq_lock_irqsave(readyq_lock, flags); |
757 | 766 | ||
758 | TRACE_TASK(t, "RIP, state:%d\n", t->state); | 767 | TRACE_TASK(t, "RIP, state:%d\n", t->state); |
759 | drop_all_references(t); | 768 | drop_all_references(t); |
760 | 769 | ||
761 | raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); | 770 | raw_readyq_unlock_irqrestore(readyq_lock, flags); |
762 | 771 | ||
763 | kfree(t->rt_param.pfair); | 772 | kfree(t->rt_param.pfair); |
764 | t->rt_param.pfair = NULL; | 773 | t->rt_param.pfair = NULL; |
@@ -771,12 +780,14 @@ static void pfair_release_at(struct task_struct* task, lt_t start) | |||
771 | quanta_t release; | 780 | quanta_t release; |
772 | 781 | ||
773 | struct pfair_cluster *cluster; | 782 | struct pfair_cluster *cluster; |
783 | raw_readyq_spinlock_t* readyq_lock; | ||
774 | 784 | ||
775 | cluster = tsk_pfair(task)->cluster; | 785 | cluster = tsk_pfair(task)->cluster; |
786 | readyq_lock = cluster_lock(cluster); | ||
776 | 787 | ||
777 | BUG_ON(!is_realtime(task)); | 788 | BUG_ON(!is_realtime(task)); |
778 | 789 | ||
779 | raw_spin_lock_irqsave(cluster_lock(cluster), flags); | 790 | raw_readyq_lock_irqsave(readyq_lock, flags); |
780 | release_at(task, start); | 791 | release_at(task, start); |
781 | release = time2quanta(start, CEIL); | 792 | release = time2quanta(start, CEIL); |
782 | 793 | ||
@@ -786,7 +797,7 @@ static void pfair_release_at(struct task_struct* task, lt_t start) | |||
786 | prepare_release(task, release); | 797 | prepare_release(task, release); |
787 | add_release(&cluster->pfair, task); | 798 | add_release(&cluster->pfair, task); |
788 | 799 | ||
789 | raw_spin_unlock_irqrestore(cluster_lock(cluster), flags); | 800 | raw_readyq_unlock_irqrestore(readyq_lock, flags); |
790 | } | 801 | } |
791 | 802 | ||
792 | static void init_subtask(struct subtask* sub, unsigned long i, | 803 | static void init_subtask(struct subtask* sub, unsigned long i, |