aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-05-14 19:05:27 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-05-14 19:05:27 -0400
commit5f73f6b75bb8d37de13eea4278b36a3767e6ef5a (patch)
treec985e02ebb78abfdc0ad4c3a632e90f0c09202e6
parent918fe97af91d4bee25b80fe058ae6ad333d2c86e (diff)
hoist task wakeups out of DGL crit csx's.
a very ugly kludge. wake ups are queued in a per-cpu buffer. lock, unlock, and budget operations that affect priority then have to flush the wake queue.
-rw-r--r--include/litmus/budget.h2
-rw-r--r--include/litmus/locking.h2
-rw-r--r--kernel/hrtimer.c5
-rw-r--r--litmus/ikglp_lock.c4
-rw-r--r--litmus/locking.c79
-rw-r--r--litmus/sched_cedf.c14
-rw-r--r--litmus/sched_crm.c14
7 files changed, 110 insertions, 10 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 0ae9c9f30023..44a667c94720 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -10,7 +10,7 @@ struct enforcement_timer
10{ 10{
11 raw_spinlock_t lock; 11 raw_spinlock_t lock;
12 struct hrtimer timer; 12 struct hrtimer timer;
13 int armed:1; 13 unsigned int armed:1;
14 unsigned int job_when_armed; 14 unsigned int job_when_armed;
15}; 15};
16 16
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 39e32d3f48c7..b03e22658298 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -241,6 +241,8 @@ struct litmus_lock_ops {
241 241
242void suspend_for_lock(void); 242void suspend_for_lock(void);
243int wake_up_for_lock(struct task_struct* t); 243int wake_up_for_lock(struct task_struct* t);
244int flush_pending_wakes(void);
245void init_wake_queues(void);
244 246
245/* thread safe?? */ 247/* thread safe?? */
246#ifndef CONFIG_LITMUS_NESTED_LOCKING 248#ifndef CONFIG_LITMUS_NESTED_LOCKING
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 11e896903828..b836a4357fc3 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1317,8 +1317,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1317 * hrtimer_start_range_ns() or in hrtimer_interrupt() 1317 * hrtimer_start_range_ns() or in hrtimer_interrupt()
1318 */ 1318 */
1319 if (restart != HRTIMER_NORESTART) { 1319 if (restart != HRTIMER_NORESTART) {
1320 BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); 1320 if (likely(timer->state == HRTIMER_STATE_CALLBACK))
1321 enqueue_hrtimer(timer, base); 1321 enqueue_hrtimer(timer, base);
1322 // BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
1322 } 1323 }
1323 1324
1324 WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); 1325 WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index d9316766c71b..71d3c3392b16 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -2823,6 +2823,7 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff,
2823 else if(waitqueue_active(&fq->wait)) { 2823 else if(waitqueue_active(&fq->wait)) {
2824 struct list_head *pos; 2824 struct list_head *pos;
2825 2825
2826 TRACE_CUR("searching fq %d for donee\n", ikglp_get_idx(sem, fq));
2826 2827
2827// TRACE_CUR("fq %d: owner: %s/%d, deadline = %d: (donor) = %s/%d " 2828// TRACE_CUR("fq %d: owner: %s/%d, deadline = %d: (donor) = %s/%d "
2828// "(mth_highest != fq->owner) = %d " 2829// "(mth_highest != fq->owner) = %d "
@@ -2997,7 +2998,8 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(
2997 donee_node = default_donee; 2998 donee_node = default_donee;
2998 2999
2999 TRACE_CUR("Could not find a donee. We have to steal one.\n"); 3000 TRACE_CUR("Could not find a donee. We have to steal one.\n");
3000 WARN_ON(default_donee->donor_info == NULL); 3001 // TODO: vv Is the below a bug when raised?
3002 //WARN_ON(default_donee->donor_info == NULL);
3001 } 3003 }
3002 3004
3003out: 3005out:
diff --git a/litmus/locking.c b/litmus/locking.c
index 37aaab5fb7e8..f1673535f114 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -122,8 +122,11 @@ asmlinkage long sys_litmus_lock(int lock_od)
122 122
123 entry = get_entry_for_od(lock_od); 123 entry = get_entry_for_od(lock_od);
124 if (entry && is_lock(entry)) { 124 if (entry && is_lock(entry)) {
125 unsigned long flags;
125 l = get_lock(entry); 126 l = get_lock(entry);
126 TRACE_CUR("Attempts to lock %d\n", l->ident); 127 TRACE_CUR("Attempts to lock %d\n", l->ident);
128
129 local_irq_save(flags);
127 err = l->ops->lock(l); 130 err = l->ops->lock(l);
128 if (!err) { 131 if (!err) {
129 sched_trace_lock(current, l->ident, 1); 132 sched_trace_lock(current, l->ident, 1);
@@ -135,6 +138,8 @@ asmlinkage long sys_litmus_lock(int lock_od)
135 tsk_rt(current)->outermost_lock = l; 138 tsk_rt(current)->outermost_lock = l;
136 } 139 }
137 } 140 }
141 flush_pending_wakes();
142 local_irq_restore(flags);
138 } 143 }
139 144
140 /* Note: task my have been suspended or preempted in between! Take 145 /* Note: task my have been suspended or preempted in between! Take
@@ -160,6 +165,7 @@ asmlinkage long sys_litmus_unlock(int lock_od)
160 165
161 entry = get_entry_for_od(lock_od); 166 entry = get_entry_for_od(lock_od);
162 if (entry && is_lock(entry)) { 167 if (entry && is_lock(entry)) {
168 unsigned long flags;
163 l = get_lock(entry); 169 l = get_lock(entry);
164 170
165 if (l == tsk_rt(current)->outermost_lock) { 171 if (l == tsk_rt(current)->outermost_lock) {
@@ -168,11 +174,14 @@ asmlinkage long sys_litmus_unlock(int lock_od)
168 } 174 }
169 175
170 TRACE_CUR("Attempts to unlock %d\n", l->ident); 176 TRACE_CUR("Attempts to unlock %d\n", l->ident);
177 local_irq_save(flags);
171 err = l->ops->unlock(l); 178 err = l->ops->unlock(l);
172 if (!err) { 179 if (!err) {
173 sched_trace_lock(current, l->ident, 0); 180 sched_trace_lock(current, l->ident, 0);
174 TRACE_CUR("Unlocked %d\n", l->ident); 181 TRACE_CUR("Unlocked %d\n", l->ident);
175 } 182 }
183 flush_pending_wakes();
184 local_irq_restore(flags);
176 } 185 }
177 186
178 /* Note: task my have been preempted in between! Take this into 187 /* Note: task my have been preempted in between! Take this into
@@ -429,6 +438,7 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
429{ 438{
430 int i; 439 int i;
431 unsigned long irqflags; //, dummyflags; 440 unsigned long irqflags; //, dummyflags;
441 unsigned long kludge_flags;
432 raw_spinlock_t *dgl_lock; 442 raw_spinlock_t *dgl_lock;
433 443
434#ifdef CONFIG_SCHED_DEBUG_TRACE 444#ifdef CONFIG_SCHED_DEBUG_TRACE
@@ -444,6 +454,8 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
444 dgl_wait->nr_remaining = dgl_wait->size; 454 dgl_wait->nr_remaining = dgl_wait->size;
445 455
446 dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); 456 dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
457
458 local_irq_save(kludge_flags);
447 raw_spin_lock_irqsave(dgl_lock, irqflags); 459 raw_spin_lock_irqsave(dgl_lock, irqflags);
448 460
449 // try to acquire each lock. enqueue (non-blocking) if it is unavailable. 461 // try to acquire each lock. enqueue (non-blocking) if it is unavailable.
@@ -463,6 +475,7 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
463 // acquired entire group immediatly 475 // acquired entire group immediatly
464 TRACE_CUR("Acquired all locks in DGL immediatly!\n"); 476 TRACE_CUR("Acquired all locks in DGL immediatly!\n");
465 raw_spin_unlock_irqrestore(dgl_lock, irqflags); 477 raw_spin_unlock_irqrestore(dgl_lock, irqflags);
478 local_irq_restore(kludge_flags);
466 } 479 }
467 else { 480 else {
468 struct litmus_lock *first_primary; 481 struct litmus_lock *first_primary;
@@ -484,6 +497,8 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
484 TS_DGL_LOCK_SUSPEND; 497 TS_DGL_LOCK_SUSPEND;
485 498
486 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending 499 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
500 flush_pending_wakes();
501 local_irq_restore(kludge_flags);
487 suspend_for_lock(); 502 suspend_for_lock();
488 503
489 TS_DGL_LOCK_RESUME; 504 TS_DGL_LOCK_RESUME;
@@ -508,6 +523,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
508{ 523{
509 int i; 524 int i;
510 unsigned long irqflags; //, dummyflags; 525 unsigned long irqflags; //, dummyflags;
526 unsigned long kludge_flags;
511 raw_spinlock_t *dgl_lock; 527 raw_spinlock_t *dgl_lock;
512 struct task_struct *t = current; 528 struct task_struct *t = current;
513 529
@@ -523,6 +539,7 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
523 539
524 BUG_ON(dgl_wait->task != t); 540 BUG_ON(dgl_wait->task != t);
525 541
542 local_irq_save(kludge_flags);
526 raw_spin_lock_irqsave(dgl_lock, irqflags); 543 raw_spin_lock_irqsave(dgl_lock, irqflags);
527 544
528 545
@@ -556,6 +573,8 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
556 TS_DGL_LOCK_SUSPEND; 573 TS_DGL_LOCK_SUSPEND;
557 574
558 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending 575 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
576 flush_pending_wakes();
577 local_irq_restore(kludge_flags);
559 suspend_for_lock(); // suspend!!! 578 suspend_for_lock(); // suspend!!!
560 579
561 TS_DGL_LOCK_RESUME; 580 TS_DGL_LOCK_RESUME;
@@ -565,6 +584,8 @@ static long do_litmus_dgl_atomic_lock(dgl_wait_state_t *dgl_wait)
565 goto all_acquired; // we should hold all locks when we wake up. 584 goto all_acquired; // we should hold all locks when we wake up.
566 } 585 }
567 raw_spin_unlock_irqrestore(dgl_lock, irqflags); 586 raw_spin_unlock_irqrestore(dgl_lock, irqflags);
587 flush_pending_wakes();
588 local_irq_restore(kludge_flags);
568 589
569all_acquired: 590all_acquired:
570 591
@@ -661,6 +682,7 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
661{ 682{
662 int i; 683 int i;
663 long err = 0; 684 long err = 0;
685 unsigned long flags;
664 686
665#ifdef CONFIG_SCHED_DEBUG_TRACE 687#ifdef CONFIG_SCHED_DEBUG_TRACE
666 { 688 {
@@ -670,6 +692,7 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
670 } 692 }
671#endif 693#endif
672 694
695 local_irq_save(flags);
673 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order 696 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order
674 697
675 struct litmus_lock *l = dgl_locks[i]; 698 struct litmus_lock *l = dgl_locks[i];
@@ -685,6 +708,8 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
685 err = tmp_err; 708 err = tmp_err;
686 } 709 }
687 } 710 }
711 flush_pending_wakes();
712 local_irq_restore(flags);
688 713
689 TRACE_CUR("DGL unlocked. err = %d\n", err); 714 TRACE_CUR("DGL unlocked. err = %d\n", err);
690 715
@@ -878,15 +903,65 @@ void suspend_for_lock(void)
878#endif 903#endif
879} 904}
880 905
906#define WAKE_Q_SZ 32
907
908typedef struct wake_queue
909{
910 struct task_struct *to_wake[WAKE_Q_SZ];
911 int count;
912} wake_queue_t;
913
914DEFINE_PER_CPU(wake_queue_t, wqueues);
915
916void init_wake_queues()
917{
918 int cpu = 0;
919 for_each_online_cpu(cpu)
920 {
921 wake_queue_t *q = &per_cpu(wqueues, cpu);
922 memset(q, 0, sizeof(*q));
923 }
924}
925
881int wake_up_for_lock(struct task_struct* t) 926int wake_up_for_lock(struct task_struct* t)
882{ 927{
883 int ret; 928 int ret = 1; /* mimic success of wake_up_process() */
929 wake_queue_t *q;
884 930
885 ret = wake_up_process(t); 931 //ret = wake_up_process(t);
932
933 TRACE_TASK(t, "is queued for wakeup\n");
934 q = &per_cpu(wqueues, smp_processor_id());
935 q->to_wake[q->count] = t;
936 ++(q->count);
937
938 BUG_ON(q->count >= WAKE_Q_SZ);
886 939
887 return ret; 940 return ret;
888} 941}
889 942
943int flush_pending_wakes()
944{
945 int count = 0, i;
946
947 wake_queue_t *q = &per_cpu(wqueues, smp_processor_id());
948 for(i = 0; i < q->count; ++i)
949 {
950 if (q->to_wake[i]) {
951 struct task_struct *t = q->to_wake[i];
952 q->to_wake[i] = NULL;
953
954 TRACE_TASK(t, "is being woken up\n");
955 wake_up_process(t);
956 ++count;
957 }
958 }
959 WARN_ON(count != q->count);
960 q->count = 0;
961
962 return count;
963}
964
890 965
891#else // CONFIG_LITMUS_LOCKING 966#else // CONFIG_LITMUS_LOCKING
892 967
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index e9422077f9dd..00dabbf65af7 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -779,7 +779,7 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t, i
779 else { 779 else {
780 lt_t remaining; 780 lt_t remaining;
781 cedf_domain_t *cluster; 781 cedf_domain_t *cluster;
782 unsigned long flags; 782 unsigned long flags, kludge_flags;
783 783
784 BUG_ON(in_schedule); 784 BUG_ON(in_schedule);
785 785
@@ -792,6 +792,8 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t, i
792 /* force job completion */ 792 /* force job completion */
793 TRACE_TASK(t, "blocked, postponing deadline\n"); 793 TRACE_TASK(t, "blocked, postponing deadline\n");
794 794
795 local_irq_save(kludge_flags);
796
795 /* Outermost lock of the cluster. Recursive lock calls are 797 /* Outermost lock of the cluster. Recursive lock calls are
796 * possible on this code path. This should be the _ONLY_ 798 * possible on this code path. This should be the _ONLY_
797 * scenario where recursive calls are made. */ 799 * scenario where recursive calls are made. */
@@ -866,6 +868,8 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t, i
866#else 868#else
867 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags); 869 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
868#endif 870#endif
871 flush_pending_wakes();
872 local_irq_restore(kludge_flags);
869 873
870 /* we need to set up the budget timer since we're within the callback. */ 874 /* we need to set up the budget timer since we're within the callback. */
871 hrtimer_forward_now(&get_budget_timer(t).timer.timer, 875 hrtimer_forward_now(&get_budget_timer(t).timer.timer,
@@ -971,7 +975,7 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t, int
971 else { 975 else {
972 lt_t remaining; 976 lt_t remaining;
973 cedf_domain_t *cluster; 977 cedf_domain_t *cluster;
974 unsigned long flags; 978 unsigned long flags, kludge_flags;
975 979
976 BUG_ON(in_schedule); 980 BUG_ON(in_schedule);
977 981
@@ -984,6 +988,8 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t, int
984 /* force job completion */ 988 /* force job completion */
985 TRACE_TASK(t, "blocked, postponing deadline\n"); 989 TRACE_TASK(t, "blocked, postponing deadline\n");
986 990
991 local_irq_save(kludge_flags);
992
987 /* Outermost lock of the cluster. Recursive lock calls are 993 /* Outermost lock of the cluster. Recursive lock calls are
988 * possible on this code path. This should be the _ONLY_ 994 * possible on this code path. This should be the _ONLY_
989 * scenario where recursive calls are made. */ 995 * scenario where recursive calls are made. */
@@ -1061,6 +1067,8 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t, int
1061#else 1067#else
1062 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags); 1068 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1063#endif 1069#endif
1070 flush_pending_wakes();
1071 local_irq_restore(kludge_flags);
1064 1072
1065 /* we need to set up the budget timer since we're within the callback. */ 1073 /* we need to set up the budget timer since we're within the callback. */
1066 if (bt_flag_is_set(t, BTF_IS_TOP_M)) { 1074 if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
@@ -2816,6 +2824,8 @@ static long cedf_activate_plugin(void)
2816#ifdef CONFIG_LITMUS_NVIDIA 2824#ifdef CONFIG_LITMUS_NVIDIA
2817 init_nvidia_info(); 2825 init_nvidia_info();
2818#endif 2826#endif
2827
2828 init_wake_queues();
2819 2829
2820 free_cpumask_var(mask); 2830 free_cpumask_var(mask);
2821 clusters_allocated = 1; 2831 clusters_allocated = 1;
diff --git a/litmus/sched_crm.c b/litmus/sched_crm.c
index b3f98f17ac0b..a60f819138d8 100644
--- a/litmus/sched_crm.c
+++ b/litmus/sched_crm.c
@@ -780,7 +780,7 @@ static enum hrtimer_restart crm_simple_io_on_exhausted(struct task_struct *t, in
780 else { 780 else {
781 lt_t remaining; 781 lt_t remaining;
782 crm_domain_t *cluster; 782 crm_domain_t *cluster;
783 unsigned long flags; 783 unsigned long flags, kludge_flags;
784 784
785 BUG_ON(in_schedule); 785 BUG_ON(in_schedule);
786 786
@@ -793,6 +793,8 @@ static enum hrtimer_restart crm_simple_io_on_exhausted(struct task_struct *t, in
793 /* force job completion */ 793 /* force job completion */
794 TRACE_TASK(t, "blocked, postponing deadline\n"); 794 TRACE_TASK(t, "blocked, postponing deadline\n");
795 795
796 local_irq_save(kludge_flags);
797
796 /* Outermost lock of the cluster. Recursive lock calls are 798 /* Outermost lock of the cluster. Recursive lock calls are
797 * possible on this code path. This should be the _ONLY_ 799 * possible on this code path. This should be the _ONLY_
798 * scenario where recursive calls are made. */ 800 * scenario where recursive calls are made. */
@@ -867,6 +869,8 @@ static enum hrtimer_restart crm_simple_io_on_exhausted(struct task_struct *t, in
867#else 869#else
868 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags); 870 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
869#endif 871#endif
872 flush_pending_wakes();
873 local_irq_restore(kludge_flags);
870 874
871 /* we need to set up the budget timer since we're within the callback. */ 875 /* we need to set up the budget timer since we're within the callback. */
872 hrtimer_forward_now(&get_budget_timer(t).timer.timer, 876 hrtimer_forward_now(&get_budget_timer(t).timer.timer,
@@ -972,7 +976,7 @@ static enum hrtimer_restart crm_sobliv_on_exhausted(struct task_struct *t, int i
972 else { 976 else {
973 lt_t remaining; 977 lt_t remaining;
974 crm_domain_t *cluster; 978 crm_domain_t *cluster;
975 unsigned long flags; 979 unsigned long flags, kludge_flags;
976 980
977 BUG_ON(in_schedule); 981 BUG_ON(in_schedule);
978 982
@@ -988,6 +992,7 @@ static enum hrtimer_restart crm_sobliv_on_exhausted(struct task_struct *t, int i
988 /* Outermost lock of the cluster. Recursive lock calls are 992 /* Outermost lock of the cluster. Recursive lock calls are
989 * possible on this code path. This should be the _ONLY_ 993 * possible on this code path. This should be the _ONLY_
990 * scenario where recursive calls are made. */ 994 * scenario where recursive calls are made. */
995 local_irq_save(kludge_flags);
991#ifdef CONFIG_LITMUS_DGL_SUPPORT 996#ifdef CONFIG_LITMUS_DGL_SUPPORT
992 /* Unfortunately, we _might_ need to grab the DGL lock, so we 997 /* Unfortunately, we _might_ need to grab the DGL lock, so we
993 * must grab it every time since it must be take before the 998 * must grab it every time since it must be take before the
@@ -1062,6 +1067,8 @@ static enum hrtimer_restart crm_sobliv_on_exhausted(struct task_struct *t, int i
1062#else 1067#else
1063 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags); 1068 raw_readyq_unlock_irqrestore(&cluster->cluster_lock, flags);
1064#endif 1069#endif
1070 flush_pending_wakes();
1071 local_irq_restore(kludge_flags);
1065 1072
1066 /* we need to set up the budget timer since we're within the callback. */ 1073 /* we need to set up the budget timer since we're within the callback. */
1067 if (bt_flag_is_set(t, BTF_IS_TOP_M)) { 1074 if (bt_flag_is_set(t, BTF_IS_TOP_M)) {
@@ -2677,6 +2684,7 @@ static long crm_activate_plugin(void)
2677 2684
2678 /* de-allocate old clusters, if any */ 2685 /* de-allocate old clusters, if any */
2679 cleanup_crm(); 2686 cleanup_crm();
2687
2680 2688
2681 printk(KERN_INFO "C-RM: Activate Plugin, cluster configuration = %d\n", 2689 printk(KERN_INFO "C-RM: Activate Plugin, cluster configuration = %d\n",
2682 cluster_config); 2690 cluster_config);
@@ -2818,6 +2826,8 @@ static long crm_activate_plugin(void)
2818 init_nvidia_info(); 2826 init_nvidia_info();
2819#endif 2827#endif
2820 2828
2829 init_wake_queues();
2830
2821 free_cpumask_var(mask); 2831 free_cpumask_var(mask);
2822 clusters_allocated = 1; 2832 clusters_allocated = 1;
2823 return 0; 2833 return 0;