aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/pSeries_setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/pSeries_setup.c')
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c156
1 files changed, 156 insertions, 0 deletions
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 44d9af72d225..5bec956e44a0 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -19,6 +19,7 @@
19#undef DEBUG 19#undef DEBUG
20 20
21#include <linux/config.h> 21#include <linux/config.h>
22#include <linux/cpu.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/sched.h> 24#include <linux/sched.h>
24#include <linux/kernel.h> 25#include <linux/kernel.h>
@@ -82,6 +83,9 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
82extern void pSeries_system_reset_exception(struct pt_regs *regs); 83extern void pSeries_system_reset_exception(struct pt_regs *regs);
83extern int pSeries_machine_check_exception(struct pt_regs *regs); 84extern int pSeries_machine_check_exception(struct pt_regs *regs);
84 85
86static int pseries_shared_idle(void);
87static int pseries_dedicated_idle(void);
88
85static volatile void __iomem * chrp_int_ack_special; 89static volatile void __iomem * chrp_int_ack_special;
86struct mpic *pSeries_mpic; 90struct mpic *pSeries_mpic;
87 91
@@ -229,6 +233,20 @@ static void __init pSeries_setup_arch(void)
229 233
230 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) 234 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
231 vpa_init(boot_cpuid); 235 vpa_init(boot_cpuid);
236
237 /* Choose an idle loop */
238 if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
239 if (get_paca()->lppaca.shared_proc) {
240 printk(KERN_INFO "Using shared processor idle loop\n");
241 ppc_md.idle_loop = pseries_shared_idle;
242 } else {
243 printk(KERN_INFO "Using dedicated idle loop\n");
244 ppc_md.idle_loop = pseries_dedicated_idle;
245 }
246 } else {
247 printk(KERN_INFO "Using default idle loop\n");
248 ppc_md.idle_loop = default_idle;
249 }
232} 250}
233 251
234static int __init pSeries_init_panel(void) 252static int __init pSeries_init_panel(void)
@@ -418,6 +436,144 @@ static int __init pSeries_probe(int platform)
418 return 1; 436 return 1;
419} 437}
420 438
439DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
440
441static inline void dedicated_idle_sleep(unsigned int cpu)
442{
443 struct paca_struct *ppaca = &paca[cpu ^ 1];
444
445 /* Only sleep if the other thread is not idle */
446 if (!(ppaca->lppaca.idle)) {
447 local_irq_disable();
448
449 /*
450 * We are about to sleep the thread and so wont be polling any
451 * more.
452 */
453 clear_thread_flag(TIF_POLLING_NRFLAG);
454
455 /*
456 * SMT dynamic mode. Cede will result in this thread going
457 * dormant, if the partner thread is still doing work. Thread
458 * wakes up if partner goes idle, an interrupt is presented, or
459 * a prod occurs. Returning from the cede enables external
460 * interrupts.
461 */
462 if (!need_resched())
463 cede_processor();
464 else
465 local_irq_enable();
466 } else {
467 /*
468 * Give the HV an opportunity at the processor, since we are
469 * not doing any work.
470 */
471 poll_pending();
472 }
473}
474
475static int pseries_dedicated_idle(void)
476{
477 long oldval;
478 struct paca_struct *lpaca = get_paca();
479 unsigned int cpu = smp_processor_id();
480 unsigned long start_snooze;
481 unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
482
483 while (1) {
484 /*
485 * Indicate to the HV that we are idle. Now would be
486 * a good time to find other work to dispatch.
487 */
488 lpaca->lppaca.idle = 1;
489
490 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
491 if (!oldval) {
492 set_thread_flag(TIF_POLLING_NRFLAG);
493
494 start_snooze = __get_tb() +
495 *smt_snooze_delay * tb_ticks_per_usec;
496
497 while (!need_resched() && !cpu_is_offline(cpu)) {
498 ppc64_runlatch_off();
499
500 /*
501 * Go into low thread priority and possibly
502 * low power mode.
503 */
504 HMT_low();
505 HMT_very_low();
506
507 if (*smt_snooze_delay != 0 &&
508 __get_tb() > start_snooze) {
509 HMT_medium();
510 dedicated_idle_sleep(cpu);
511 }
512
513 }
514
515 HMT_medium();
516 clear_thread_flag(TIF_POLLING_NRFLAG);
517 } else {
518 set_need_resched();
519 }
520
521 lpaca->lppaca.idle = 0;
522 ppc64_runlatch_on();
523
524 schedule();
525
526 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
527 cpu_die();
528 }
529}
530
531static int pseries_shared_idle(void)
532{
533 struct paca_struct *lpaca = get_paca();
534 unsigned int cpu = smp_processor_id();
535
536 while (1) {
537 /*
538 * Indicate to the HV that we are idle. Now would be
539 * a good time to find other work to dispatch.
540 */
541 lpaca->lppaca.idle = 1;
542
543 while (!need_resched() && !cpu_is_offline(cpu)) {
544 local_irq_disable();
545 ppc64_runlatch_off();
546
547 /*
548 * Yield the processor to the hypervisor. We return if
549 * an external interrupt occurs (which are driven prior
550 * to returning here) or if a prod occurs from another
551 * processor. When returning here, external interrupts
552 * are enabled.
553 *
554 * Check need_resched() again with interrupts disabled
555 * to avoid a race.
556 */
557 if (!need_resched())
558 cede_processor();
559 else
560 local_irq_enable();
561
562 HMT_medium();
563 }
564
565 lpaca->lppaca.idle = 0;
566 ppc64_runlatch_on();
567
568 schedule();
569
570 if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
571 cpu_die();
572 }
573
574 return 0;
575}
576
421struct machdep_calls __initdata pSeries_md = { 577struct machdep_calls __initdata pSeries_md = {
422 .probe = pSeries_probe, 578 .probe = pSeries_probe,
423 .setup_arch = pSeries_setup_arch, 579 .setup_arch = pSeries_setup_arch,