diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-07-07 20:56:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-07 21:23:41 -0400 |
commit | c66d5dd6b5b62e1435b95c0fb42f6bcddeb395ea (patch) | |
tree | bb7a7241f07b1f7c75101469f87e2485f486e592 /arch | |
parent | d200903e11f6867b91dffa81b2038e55be599f49 (diff) |
[PATCH] ppc64: Move pSeries idle functions into pSeries_setup.c
dedicated_idle() and shared_idle() are only used by pSeries, so move them into
pSeries_setup.c
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ppc64/kernel/idle.c | 131 | ||||
-rw-r--r-- | arch/ppc64/kernel/pSeries_setup.c | 127 |
2 files changed, 127 insertions, 131 deletions
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 226152467791..69b7c22bad54 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c | |||
@@ -74,137 +74,6 @@ int default_idle(void) | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | #ifdef CONFIG_PPC_PSERIES | ||
78 | |||
79 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); | ||
80 | |||
81 | int dedicated_idle(void) | ||
82 | { | ||
83 | long oldval; | ||
84 | struct paca_struct *lpaca = get_paca(), *ppaca; | ||
85 | unsigned long start_snooze; | ||
86 | unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); | ||
87 | unsigned int cpu = smp_processor_id(); | ||
88 | |||
89 | ppaca = &paca[cpu ^ 1]; | ||
90 | |||
91 | while (1) { | ||
92 | /* | ||
93 | * Indicate to the HV that we are idle. Now would be | ||
94 | * a good time to find other work to dispatch. | ||
95 | */ | ||
96 | lpaca->lppaca.idle = 1; | ||
97 | |||
98 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | ||
99 | if (!oldval) { | ||
100 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
101 | start_snooze = __get_tb() + | ||
102 | *smt_snooze_delay * tb_ticks_per_usec; | ||
103 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
104 | /* | ||
105 | * Go into low thread priority and possibly | ||
106 | * low power mode. | ||
107 | */ | ||
108 | HMT_low(); | ||
109 | HMT_very_low(); | ||
110 | |||
111 | if (*smt_snooze_delay == 0 || | ||
112 | __get_tb() < start_snooze) | ||
113 | continue; | ||
114 | |||
115 | HMT_medium(); | ||
116 | |||
117 | if (!(ppaca->lppaca.idle)) { | ||
118 | local_irq_disable(); | ||
119 | |||
120 | /* | ||
121 | * We are about to sleep the thread | ||
122 | * and so wont be polling any | ||
123 | * more. | ||
124 | */ | ||
125 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
126 | |||
127 | /* | ||
128 | * SMT dynamic mode. Cede will result | ||
129 | * in this thread going dormant, if the | ||
130 | * partner thread is still doing work. | ||
131 | * Thread wakes up if partner goes idle, | ||
132 | * an interrupt is presented, or a prod | ||
133 | * occurs. Returning from the cede | ||
134 | * enables external interrupts. | ||
135 | */ | ||
136 | if (!need_resched()) | ||
137 | cede_processor(); | ||
138 | else | ||
139 | local_irq_enable(); | ||
140 | } else { | ||
141 | /* | ||
142 | * Give the HV an opportunity at the | ||
143 | * processor, since we are not doing | ||
144 | * any work. | ||
145 | */ | ||
146 | poll_pending(); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
151 | } else { | ||
152 | set_need_resched(); | ||
153 | } | ||
154 | |||
155 | HMT_medium(); | ||
156 | lpaca->lppaca.idle = 0; | ||
157 | schedule(); | ||
158 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) | ||
159 | cpu_die(); | ||
160 | } | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static int shared_idle(void) | ||
165 | { | ||
166 | struct paca_struct *lpaca = get_paca(); | ||
167 | unsigned int cpu = smp_processor_id(); | ||
168 | |||
169 | while (1) { | ||
170 | /* | ||
171 | * Indicate to the HV that we are idle. Now would be | ||
172 | * a good time to find other work to dispatch. | ||
173 | */ | ||
174 | lpaca->lppaca.idle = 1; | ||
175 | |||
176 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
177 | local_irq_disable(); | ||
178 | |||
179 | /* | ||
180 | * Yield the processor to the hypervisor. We return if | ||
181 | * an external interrupt occurs (which are driven prior | ||
182 | * to returning here) or if a prod occurs from another | ||
183 | * processor. When returning here, external interrupts | ||
184 | * are enabled. | ||
185 | * | ||
186 | * Check need_resched() again with interrupts disabled | ||
187 | * to avoid a race. | ||
188 | */ | ||
189 | if (!need_resched()) | ||
190 | cede_processor(); | ||
191 | else | ||
192 | local_irq_enable(); | ||
193 | } | ||
194 | |||
195 | HMT_medium(); | ||
196 | lpaca->lppaca.idle = 0; | ||
197 | schedule(); | ||
198 | if (cpu_is_offline(smp_processor_id()) && | ||
199 | system_state == SYSTEM_RUNNING) | ||
200 | cpu_die(); | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | #endif /* CONFIG_PPC_PSERIES */ | ||
207 | |||
208 | int native_idle(void) | 77 | int native_idle(void) |
209 | { | 78 | { |
210 | while(1) { | 79 | while(1) { |
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c index 44d9af72d225..849ed9ba7856 100644 --- a/arch/ppc64/kernel/pSeries_setup.c +++ b/arch/ppc64/kernel/pSeries_setup.c | |||
@@ -418,6 +418,133 @@ static int __init pSeries_probe(int platform) | |||
418 | return 1; | 418 | return 1; |
419 | } | 419 | } |
420 | 420 | ||
421 | DECLARE_PER_CPU(unsigned long, smt_snooze_delay); | ||
422 | |||
423 | int dedicated_idle(void) | ||
424 | { | ||
425 | long oldval; | ||
426 | struct paca_struct *lpaca = get_paca(), *ppaca; | ||
427 | unsigned long start_snooze; | ||
428 | unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); | ||
429 | unsigned int cpu = smp_processor_id(); | ||
430 | |||
431 | ppaca = &paca[cpu ^ 1]; | ||
432 | |||
433 | while (1) { | ||
434 | /* | ||
435 | * Indicate to the HV that we are idle. Now would be | ||
436 | * a good time to find other work to dispatch. | ||
437 | */ | ||
438 | lpaca->lppaca.idle = 1; | ||
439 | |||
440 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | ||
441 | if (!oldval) { | ||
442 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
443 | start_snooze = __get_tb() + | ||
444 | *smt_snooze_delay * tb_ticks_per_usec; | ||
445 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
446 | /* | ||
447 | * Go into low thread priority and possibly | ||
448 | * low power mode. | ||
449 | */ | ||
450 | HMT_low(); | ||
451 | HMT_very_low(); | ||
452 | |||
453 | if (*smt_snooze_delay == 0 || | ||
454 | __get_tb() < start_snooze) | ||
455 | continue; | ||
456 | |||
457 | HMT_medium(); | ||
458 | |||
459 | if (!(ppaca->lppaca.idle)) { | ||
460 | local_irq_disable(); | ||
461 | |||
462 | /* | ||
463 | * We are about to sleep the thread | ||
464 | * and so wont be polling any | ||
465 | * more. | ||
466 | */ | ||
467 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
468 | |||
469 | /* | ||
470 | * SMT dynamic mode. Cede will result | ||
471 | * in this thread going dormant, if the | ||
472 | * partner thread is still doing work. | ||
473 | * Thread wakes up if partner goes idle, | ||
474 | * an interrupt is presented, or a prod | ||
475 | * occurs. Returning from the cede | ||
476 | * enables external interrupts. | ||
477 | */ | ||
478 | if (!need_resched()) | ||
479 | cede_processor(); | ||
480 | else | ||
481 | local_irq_enable(); | ||
482 | } else { | ||
483 | /* | ||
484 | * Give the HV an opportunity at the | ||
485 | * processor, since we are not doing | ||
486 | * any work. | ||
487 | */ | ||
488 | poll_pending(); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
493 | } else { | ||
494 | set_need_resched(); | ||
495 | } | ||
496 | |||
497 | HMT_medium(); | ||
498 | lpaca->lppaca.idle = 0; | ||
499 | schedule(); | ||
500 | if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING) | ||
501 | cpu_die(); | ||
502 | } | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int shared_idle(void) | ||
507 | { | ||
508 | struct paca_struct *lpaca = get_paca(); | ||
509 | unsigned int cpu = smp_processor_id(); | ||
510 | |||
511 | while (1) { | ||
512 | /* | ||
513 | * Indicate to the HV that we are idle. Now would be | ||
514 | * a good time to find other work to dispatch. | ||
515 | */ | ||
516 | lpaca->lppaca.idle = 1; | ||
517 | |||
518 | while (!need_resched() && !cpu_is_offline(cpu)) { | ||
519 | local_irq_disable(); | ||
520 | |||
521 | /* | ||
522 | * Yield the processor to the hypervisor. We return if | ||
523 | * an external interrupt occurs (which are driven prior | ||
524 | * to returning here) or if a prod occurs from another | ||
525 | * processor. When returning here, external interrupts | ||
526 | * are enabled. | ||
527 | * | ||
528 | * Check need_resched() again with interrupts disabled | ||
529 | * to avoid a race. | ||
530 | */ | ||
531 | if (!need_resched()) | ||
532 | cede_processor(); | ||
533 | else | ||
534 | local_irq_enable(); | ||
535 | } | ||
536 | |||
537 | HMT_medium(); | ||
538 | lpaca->lppaca.idle = 0; | ||
539 | schedule(); | ||
540 | if (cpu_is_offline(smp_processor_id()) && | ||
541 | system_state == SYSTEM_RUNNING) | ||
542 | cpu_die(); | ||
543 | } | ||
544 | |||
545 | return 0; | ||
546 | } | ||
547 | |||
421 | struct machdep_calls __initdata pSeries_md = { | 548 | struct machdep_calls __initdata pSeries_md = { |
422 | .probe = pSeries_probe, | 549 | .probe = pSeries_probe, |
423 | .setup_arch = pSeries_setup_arch, | 550 | .setup_arch = pSeries_setup_arch, |