aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_edf_hsb.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_edf_hsb.c')
-rwxr-xr-xlitmus/sched_edf_hsb.c278
1 files changed, 174 insertions, 104 deletions
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c
index a00d1df8ba27..eed7f23352fd 100755
--- a/litmus/sched_edf_hsb.c
+++ b/litmus/sched_edf_hsb.c
@@ -54,6 +54,7 @@ struct cpu_entry {
54 int cpu; 54 int cpu;
55 struct task_struct* scheduled; 55 struct task_struct* scheduled;
56 struct task_struct* linked; 56 struct task_struct* linked;
57 struct bheap_node* hn;
57 58
58 /* Used to execute timers on remote CPUs */ 59 /* Used to execute timers on remote CPUs */
59 struct hrtimer_start_on_info budget_timer_info; 60 struct hrtimer_start_on_info budget_timer_info;
@@ -65,10 +66,11 @@ struct cpu_entry {
65/* 66/*
66 * Forward declarations 67 * Forward declarations
67 */ 68 */
68static void link_task_to_cpu(struct task_struct*, struct cpu_entry*);
69static struct task_struct* next_eligible_hrt(struct hrt_server*); 69static struct task_struct* next_eligible_hrt(struct hrt_server*);
70static void preempt(struct task_struct*, struct cpu_entry*); 70static void preempt(struct task_struct*, struct cpu_entry*);
71static void requeue(struct task_struct*, rt_domain_t*); 71static void requeue(struct task_struct*, rt_domain_t*);
72static struct cpu_entry* lowest_prio_cpu(void);
73static void link_task_to_cpu(struct task_struct*, struct cpu_entry*);
72static void unlink(struct task_struct*, rt_domain_t*); 74static void unlink(struct task_struct*, rt_domain_t*);
73static enum hrtimer_restart budget_timer_fire(struct hrtimer*); 75static enum hrtimer_restart budget_timer_fire(struct hrtimer*);
74static void timer_cancel(struct hrtimer *timer); 76static void timer_cancel(struct hrtimer *timer);
@@ -89,7 +91,9 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_entry, cpu_entries);
89/* 91/*
90 * Global variables 92 * Global variables
91 */ 93 */
92static rt_domain_t srt_domain; 94static rt_domain_t srt_domain;
95static struct bheap cpu_heap;
96static struct bheap_node cpu_heap_node[NR_CPUS];
93 97
94/* 98/*
95 * Forces a reschedule on the current cpu if there is an eligible hrt task and 99 * Forces a reschedule on the current cpu if there is an eligible hrt task and
@@ -124,23 +128,21 @@ static void check_for_hrt_preempt(void)
124 */ 128 */
125static void check_for_srt_preempt(void) 129static void check_for_srt_preempt(void)
126{ 130{
127 struct cpu_entry *entry; 131 struct cpu_entry *entry;
128 struct task_struct *next_srt; 132 struct task_struct *next_srt;
129 int cpu;
130
131 for_each_online_cpu(cpu) {
132 entry = &per_cpu(cpu_entries, cpu);
133 133
134 if ((!entry->linked || is_srt(entry->linked)) && 134 /* Ugly! Fix! Should probably create srt_preemption_needed() */
135 edf_preemption_needed(&srt_domain, entry->linked)) { 135 for (entry = lowest_prio_cpu();
136 (!entry->linked || is_srt(entry->linked)) &&
137 edf_preemption_needed(&srt_domain, entry->linked);
138 entry = lowest_prio_cpu()) {
136 139
137 next_srt = __take_ready(&srt_domain); 140 next_srt = __take_ready(&srt_domain);
138 141
139 TRACE("forcing a reschedule with srt task %d at %llu\n", 142 TRACE("forcing a reschedule with srt task %d at %llu\n",
140 next_srt->pid, litmus_clock()); 143 next_srt->pid, litmus_clock());
141 144
142 preempt(next_srt, entry); 145 preempt(next_srt, entry);
143 }
144 } 146 }
145} 147}
146 148
@@ -224,7 +226,6 @@ static noinline void job_completion(struct task_struct *task,
224 if (is_running(task)) 226 if (is_running(task))
225 job_arrival(domain, task); 227 job_arrival(domain, task);
226 228
227
228 if (is_hrt(task)) { 229 if (is_hrt(task)) {
229 BUG_ON(task_cpu(task) != entry->cpu); 230 BUG_ON(task_cpu(task) != entry->cpu);
230 231
@@ -233,60 +234,6 @@ static noinline void job_completion(struct task_struct *task,
233 } 234 }
234} 235}
235 236
236/*
237 * Update the link of a CPU.
238 * Handles the case where the to-be-linked task is already scheduled on
239 * a different CPU.
240 */
241static noinline void link_task_to_cpu(struct task_struct *linked,
242 struct cpu_entry *entry)
243{
244 struct cpu_entry *sched;
245 struct task_struct* tmp;
246 int on_cpu;
247
248 BUG_ON(linked && !is_realtime(linked));
249
250 /* Currently linked task is set to be unlinked */
251 if (entry->linked) {
252 entry->linked->rt_param.linked_on = NO_CPU;
253 }
254
255 /* Link new task to CPU */
256 if (linked) {
257 set_rt_flags(linked, RT_F_RUNNING);
258 /* Handle task is already scheduled somewhere! */
259 on_cpu = linked->rt_param.scheduled_on;
260 if (on_cpu != NO_CPU) {
261 sched = &per_cpu(cpu_entries, on_cpu);
262 /* This should only happen if not linked already */
263 BUG_ON(sched->linked == linked);
264
265 /* If we are already scheduled on the CPU to which we
266 * wanted to link, we don't need to do the swap --
267 * we just link ourselves to the CPU and depend on
268 * the caller to get things right
269 */
270 if (entry != sched) {
271 TRACE_TASK(linked,
272 "already scheduled on %d, updating link.\n",
273 sched->cpu);
274 tmp = sched->linked;
275 linked->rt_param.linked_on = sched->cpu;
276 sched->linked = linked;
277 linked = tmp;
278 }
279 }
280 if (linked) /* Might be NULL due to swap */
281 linked->rt_param.linked_on = entry->cpu;
282 }
283 entry->linked = linked;
284#ifdef WANT_ALL_SCHED_EVENTS
285 if (linked)
286 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
287#endif
288}
289
290/* If the server is eligible, return the next eligible job. If 237/* If the server is eligible, return the next eligible job. If
291 * the server is ineligible or there are no eligible jobs, 238 * the server is ineligible or there are no eligible jobs,
292 * returns NULL. 239 * returns NULL.
@@ -317,12 +264,15 @@ static struct task_struct* next_eligible_hrt(struct hrt_server *server)
317 } 264 }
318} 265}
319 266
267/*
268 * Suspend the current task and force a reschedule, if possible.
269 * If task is NULL, this will still force a reschedule if a preemption
270 * is possible.
271 */
320static void preempt(struct task_struct *task, 272static void preempt(struct task_struct *task,
321 struct cpu_entry *entry) 273 struct cpu_entry *entry)
322{ 274{
323 struct task_struct *curr = entry->linked; 275 struct task_struct *curr = entry->linked;
324 BUG_ON(!task);
325
326 276
327 if (curr && is_hrt(curr)) 277 if (curr && is_hrt(curr))
328 requeue(curr, &entry->hrt_server.domain); 278 requeue(curr, &entry->hrt_server.domain);
@@ -371,9 +321,11 @@ static noinline void requeue(struct task_struct *task, rt_domain_t *domain)
371 BUG_ON(is_queued(task)); 321 BUG_ON(is_queued(task));
372 322
373 if (is_released(task, litmus_clock())) { 323 if (is_released(task, litmus_clock())) {
324 TRACE_TASK(task, "requeuing on ready");
374 __add_ready(domain, task); 325 __add_ready(domain, task);
375 } else { 326 } else {
376 /* Task needs to wait until it is released */ 327 /* Task needs to wait until it is released */
328 TRACE_TASK(task, "requeueing on release");
377 add_release(domain, task); 329 add_release(domain, task);
378 } 330 }
379} 331}
@@ -493,6 +445,115 @@ out:
493 return rv; 445 return rv;
494} 446}
495 447
448
449/******************************************************************************
450 * CPU linkage methods
451 ******************************************************************************/
452
453static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b)
454{
455 int rv;
456 struct cpu_entry *first, *second;
457 struct task_struct *a, *b;
458
459 /* Clumsy, are casts allowed? */
460 first = _a->value;
461 second = _b->value;
462 a = first->linked;
463 b = second->linked;
464
465 /* Note that a and b are inverted: we want the lowest-priority CPU at
466 * the top of the heap.
467 */
468 if (a && b) {
469 if (is_hrt(a) && is_srt(b)) {
470 rv = 0;
471 goto out;
472 } else if (is_hrt(b) && is_srt(a)) {
473 rv = 1;
474 goto out;
475 }
476 }
477
478 rv = edf_higher_prio(b, a);
479 out:
480 return rv;
481}
482
483/*
484 * Move the cpu entry to the correct place in the global cpu queue.
485 */
486static void update_cpu_position(struct cpu_entry* entry)
487{
488 if (bheap_node_in_heap(entry->hn))
489 bheap_delete(cpu_lower_prio, &cpu_heap, entry->hn);
490 bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn);
491}
492
493/* Caller must hold global lock lock */
494static struct cpu_entry* lowest_prio_cpu(void)
495{
496 struct bheap_node *hn;
497 hn = bheap_peek(cpu_lower_prio, &cpu_heap);
498 return hn->value;
499}
500
501/*
502 * Update the link of a CPU.
503 * Handles the case where the to-be-linked task is already scheduled on
504 * a different CPU.
505 */
506static noinline void link_task_to_cpu(struct task_struct *linked,
507 struct cpu_entry *entry)
508{
509 struct cpu_entry *sched;
510 struct task_struct* tmp;
511 int on_cpu;
512
513 BUG_ON(linked && !is_realtime(linked));
514
515 /* Currently linked task is set to be unlinked */
516 if (entry->linked) {
517 entry->linked->rt_param.linked_on = NO_CPU;
518 }
519
520 /* Link new task to CPU */
521 if (linked) {
522 set_rt_flags(linked, RT_F_RUNNING);
523 /* Handle task is already scheduled somewhere! */
524 on_cpu = linked->rt_param.scheduled_on;
525 if (on_cpu != NO_CPU) {
526 sched = &per_cpu(cpu_entries, on_cpu);
527 /* This should only happen if not linked already */
528 BUG_ON(sched->linked == linked);
529
530 /* If we are already scheduled on the CPU to which we
531 * wanted to link, we don't need to do the swap --
532 * we just link ourselves to the CPU and depend on
533 * the caller to get things right
534 */
535 if (entry != sched) {
536 TRACE_TASK(linked,
537 "already scheduled on %d, updating link.\n",
538 sched->cpu);
539 tmp = sched->linked;
540 linked->rt_param.linked_on = sched->cpu;
541 sched->linked = linked;
542 update_cpu_position(sched);
543 linked = tmp;
544 }
545 }
546 if (linked) /* Might be NULL due to swap */
547 linked->rt_param.linked_on = entry->cpu;
548 }
549 entry->linked = linked;
550
551 if (linked)
552 TRACE_TASK(linked, "linked to %d.\n", entry->cpu);
553
554 update_cpu_position(entry);
555}
556
496static noinline void unlink(struct task_struct *task, rt_domain_t *domain) 557static noinline void unlink(struct task_struct *task, rt_domain_t *domain)
497{ 558{
498 struct cpu_entry *entry; 559 struct cpu_entry *entry;
@@ -523,7 +584,6 @@ static noinline void unlink(struct task_struct *task, rt_domain_t *domain)
523 } 584 }
524} 585}
525 586
526
527/****************************************************************************** 587/******************************************************************************
528 * Timer methods 588 * Timer methods
529 ******************************************************************************/ 589 ******************************************************************************/
@@ -629,8 +689,11 @@ static enum hrtimer_restart budget_timer_fire(struct hrtimer *timer)
629 /* Need to preempt the currently running hrt task because 689 /* Need to preempt the currently running hrt task because
630 * its server has run out of time. 690 * its server has run out of time.
631 */ 691 */
692 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags);
632 next = take_next_ready(entry); 693 next = take_next_ready(entry);
633 preempt(next, entry); 694 preempt(next, entry);
695 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags);
696
634 } else { 697 } else {
635 /* If the budget isn't 0, a task just finished executing */ 698 /* If the budget isn't 0, a task just finished executing */
636 litmus_reschedule_local(); 699 litmus_reschedule_local();
@@ -677,7 +740,8 @@ static enum hrtimer_restart refill_timer_fire(struct hrtimer *timer)
677 TRACE("refill timer fired on cpu %d at litmus time: %llu\n", 740 TRACE("refill timer fired on cpu %d at litmus time: %llu\n",
678 entry->cpu, litmus_clock()); 741 entry->cpu, litmus_clock());
679 742
680 local_irq_save(flags); 743 /*local_irq_save(flags);*/
744 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags);
681 745
682 server->budget = server->wcet; 746 server->budget = server->wcet;
683 server->deadline += server->period; 747 server->deadline += server->period;
@@ -692,14 +756,15 @@ static enum hrtimer_restart refill_timer_fire(struct hrtimer *timer)
692 hrtimer_set_expires(timer, ns_to_ktime(server->deadline)); 756 hrtimer_set_expires(timer, ns_to_ktime(server->deadline));
693 757
694 /* Activate scheduler, if necessary */ 758 /* Activate scheduler, if necessary */
695 if (curr && is_hrt(curr) && is_eligible(curr, server)) { 759 if (curr && is_hrt(curr) && !is_eligible(curr, server)) {
696 next = take_next_ready(entry); 760 next = take_next_ready(entry);
697 preempt(next, entry); 761 preempt(next, entry);
698 } else { 762 } else {
699 check_for_hrt_preempt(); 763 check_for_hrt_preempt();
700 } 764 }
701 765
702 local_irq_restore(flags); 766 /*local_irq_restore(flags);*/
767 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags);
703 768
704 return HRTIMER_RESTART; 769 return HRTIMER_RESTART;
705} 770}
@@ -742,7 +807,9 @@ static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer)
742 struct cpu_entry *entry = &__get_cpu_var(cpu_entries); 807 struct cpu_entry *entry = &__get_cpu_var(cpu_entries);
743 struct hrt_server *server = &entry->hrt_server; 808 struct hrt_server *server = &entry->hrt_server;
744 809
745 local_irq_save(flags); 810 /*local_irq_save(flags);*/
811 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags);
812
746 server->no_slack = 1; 813 server->no_slack = 1;
747 814
748 TRACE("slack timer fired at litmus time: %llu\n", litmus_clock()); 815 TRACE("slack timer fired at litmus time: %llu\n", litmus_clock());
@@ -750,7 +817,8 @@ static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer)
750 /* activate scheduler, if necessary */ 817 /* activate scheduler, if necessary */
751 check_for_hrt_preempt(); 818 check_for_hrt_preempt();
752 819
753 local_irq_restore(flags); 820 /*local_irq_restore(flags);*/
821 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags);
754 822
755 return HRTIMER_NORESTART; 823 return HRTIMER_NORESTART;
756} 824}
@@ -770,7 +838,7 @@ static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer)
770static long edf_hsb_activate_plugin(void) 838static long edf_hsb_activate_plugin(void)
771{ 839{
772 lt_t slack_fire, common_server_start = litmus_clock(); 840 lt_t slack_fire, common_server_start = litmus_clock();
773 struct cpu_entry *cpu_entry; 841 struct cpu_entry *entry;
774 struct hrt_server* server; 842 struct hrt_server* server;
775 long rv = 0; 843 long rv = 0;
776 int cpu; 844 int cpu;
@@ -782,10 +850,16 @@ static long edf_hsb_activate_plugin(void)
782 goto out; 850 goto out;
783 } 851 }
784 852
853 bheap_init(&cpu_heap);
854
785 for_each_online_cpu(cpu) { 855 for_each_online_cpu(cpu) {
786 cpu_entry = &per_cpu(cpu_entries, cpu); 856 entry = &per_cpu(cpu_entries, cpu);
787 server = &cpu_entry->hrt_server; 857
858 entry->hn = &cpu_heap_node[cpu];
859 bheap_node_init(&entry->hn, entry);
860 update_cpu_position(entry);
788 861
862 server = &entry->hrt_server;
789 server->no_slack = 0; 863 server->no_slack = 0;
790 server->deadline = common_server_start + server->period; 864 server->deadline = common_server_start + server->period;
791 server->budget = server->wcet; 865 server->budget = server->wcet;
@@ -794,15 +868,15 @@ static long edf_hsb_activate_plugin(void)
794 868
795 TRACE("setting up cpu %d to have timer deadline %llu\n", 869 TRACE("setting up cpu %d to have timer deadline %llu\n",
796 cpu, server->deadline); 870 cpu, server->deadline);
797 hrtimer_start_on(cpu, &cpu_entry->budget_timer_info, 871 hrtimer_start_on(cpu, &entry->budget_timer_info,
798 &server->refill_timer, 872 &server->refill_timer,
799 ns_to_ktime(server->deadline), 873 ns_to_ktime(server->deadline),
800 HRTIMER_MODE_ABS_PINNED); 874 HRTIMER_MODE_ABS_PINNED);
801 875
802 hrtimer_start_on(cpu, &cpu_entry->slack_timer_info, 876 hrtimer_start_on(cpu, &entry->slack_timer_info,
803 &server->slack_timer, 877 &server->slack_timer,
804 ns_to_ktime(slack_fire), 878 ns_to_ktime(slack_fire),
805 HRTIMER_MODE_ABS_PINNED); 879 HRTIMER_MODE_ABS_PINNED);
806 880
807 } 881 }
808out: 882out:
@@ -974,15 +1048,14 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
974 */ 1048 */
975static void edf_hsb_task_block(struct task_struct *task) 1049static void edf_hsb_task_block(struct task_struct *task)
976{ 1050{
977 struct cpu_entry *entry; 1051 struct cpu_entry *entry = &__get_cpu_var(cpu_entries);
978 struct hrt_server *server = NULL; 1052 struct hrt_server *server = &entry->hrt_server;
979 rt_domain_t *domain; 1053 rt_domain_t *domain = NULL;
980 unsigned long flags; 1054 unsigned long flags;
981 1055
982 TRACE_TASK(task, "block at %llu\n", litmus_clock()); 1056 TRACE_TASK(task, "block at %llu\n", litmus_clock());
983 BUG_ON(!is_realtime(task)); 1057 BUG_ON(!is_realtime(task));
984 1058
985 entry = &__get_cpu_var(cpu_entries);
986 /* 1059 /*
987 * Which domain should we lock? 1060 * Which domain should we lock?
988 */ 1061 */
@@ -993,7 +1066,7 @@ static void edf_hsb_task_block(struct task_struct *task)
993 } else 1066 } else
994 domain = &srt_domain; 1067 domain = &srt_domain;
995 1068
996 raw_spin_lock_irqsave(&domain->ready_lock, flags); 1069 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags);
997 1070
998 if (is_hrt(task)) { 1071 if (is_hrt(task)) {
999 /* If an hrt task is blocked, we must do the work of the 1072 /* If an hrt task is blocked, we must do the work of the
@@ -1006,7 +1079,7 @@ static void edf_hsb_task_block(struct task_struct *task)
1006 /* Unlink if the task is linked to a CPU */ 1079 /* Unlink if the task is linked to a CPU */
1007 unlink(task, domain); 1080 unlink(task, domain);
1008 1081
1009 raw_spin_unlock_irqrestore(&domain->ready_lock, flags); 1082 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags);
1010} 1083}
1011 1084
1012static void edf_hsb_task_exit(struct task_struct *task) 1085static void edf_hsb_task_exit(struct task_struct *task)
@@ -1028,9 +1101,9 @@ static void edf_hsb_task_exit(struct task_struct *task)
1028 } 1101 }
1029 1102
1030 /* Unlink if the task is linked to a CPU */ 1103 /* Unlink if the task is linked to a CPU */
1031 raw_spin_lock_irqsave(&domain->ready_lock, flags); 1104 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags);
1032 unlink(task, domain); 1105 unlink(task, domain);
1033 raw_spin_unlock_irqrestore(&domain->ready_lock, flags); 1106 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags);
1034 1107
1035 entry->scheduled = NULL; 1108 entry->scheduled = NULL;
1036} 1109}
@@ -1040,17 +1113,14 @@ static void edf_hsb_task_exit(struct task_struct *task)
1040 */ 1113 */
1041static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) 1114static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
1042{ 1115{
1043 struct cpu_entry *entry;
1044 struct hrt_server *server;
1045 rt_domain_t *domain;
1046 unsigned long flags; 1116 unsigned long flags;
1117 rt_domain_t *domain;
1118 struct cpu_entry *entry = &per_cpu(cpu_entries, task_cpu(task));
1119 struct hrt_server *server = &entry->hrt_server;
1047 1120
1048 TRACE_TASK(task, "edf_hsb: task new, cpu = %d\n", 1121 TRACE_TASK(task, "edf_hsb: task new, cpu = %d\n",
1049 task->rt_param.task_params.cpu); 1122 task->rt_param.task_params.cpu);
1050 1123
1051 entry = &per_cpu(cpu_entries, task_cpu(task));
1052 server = &entry->hrt_server;
1053
1054 if (is_hrt(task)) { 1124 if (is_hrt(task)) {
1055 BUG_ON(task_cpu(task) != entry->cpu); 1125 BUG_ON(task_cpu(task) != entry->cpu);
1056 domain = &server->domain; 1126 domain = &server->domain;
@@ -1060,7 +1130,7 @@ static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
1060 return; 1130 return;
1061 } 1131 }
1062 1132
1063 raw_spin_lock_irqsave(&domain->ready_lock, flags); 1133 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags);
1064 1134
1065 /* Setup job parameters */ 1135 /* Setup job parameters */
1066 release_at(task, litmus_clock()); 1136 release_at(task, litmus_clock());
@@ -1079,7 +1149,7 @@ static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
1079 1149
1080 job_arrival(domain, task); 1150 job_arrival(domain, task);
1081 1151
1082 raw_spin_unlock_irqrestore(&domain->ready_lock, flags); 1152 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags);
1083} 1153}
1084 1154
1085static void edf_hsb_task_wake_up(struct task_struct *task) 1155static void edf_hsb_task_wake_up(struct task_struct *task)
@@ -1102,7 +1172,7 @@ static void edf_hsb_task_wake_up(struct task_struct *task)
1102 domain = &srt_domain; 1172 domain = &srt_domain;
1103 } 1173 }
1104 1174
1105 raw_spin_lock_irqsave(&domain->ready_lock, flags); 1175 raw_spin_lock_irqsave(&srt_domain.ready_lock, flags);
1106 1176
1107 /* If job resumes after being suspended due to acquiring a semaphore, 1177 /* If job resumes after being suspended due to acquiring a semaphore,
1108 * it should never be treated as a job release */ 1178 * it should never be treated as a job release */
@@ -1126,7 +1196,7 @@ static void edf_hsb_task_wake_up(struct task_struct *task)
1126 1196
1127 requeue(task, domain); 1197 requeue(task, domain);
1128 1198
1129 raw_spin_unlock_irqrestore(&domain->ready_lock, flags); 1199 raw_spin_unlock_irqrestore(&srt_domain.ready_lock, flags);
1130} 1200}
1131 1201
1132/* 1202/*