aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorDeepthi Dharwar <deepthi@linux.vnet.ibm.com>2011-11-29 21:46:55 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-12-07 21:57:20 -0500
commite179816ce60033ce560b28e01bc555ed5116cbe9 (patch)
treeb41c3685a4ee78782363e374e481b3a90cfc8004 /arch/powerpc/platforms
parent707827f3387d9b260d50fa697885a4042cea3bf4 (diff)
powerpc/cpuidle: Enable cpuidle and directly call cpuidle_idle_call() for pSeries
This patch enables cpuidle for pSeries and pSeries_idle is directly called from the idle loop. As a result of pSeries_idle, cpuidle driver registered with cpuidle subsystem comes into action. On failure of loading of the driver or cpuidle framework default idle is executed as part of the function. This patch also removes the routines pseries_shared_idle_sleep and pseries_dedicated_idle_sleep as they are now implemented as part of pseries_idle cpuidle driver. Signed-off-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com> Signed-off-by: Trinabh Gupta <g.trinabh@gmail.com> Signed-off-by: Arun R Bharadwaj <arun.r.bharadwaj@gmail.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/Kconfig6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c101
2 files changed, 23 insertions, 84 deletions
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 3fe6d927ad70..31e1adeaa92a 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -211,6 +211,12 @@ config PPC_PASEMI_CPUFREQ
211 211
212endmenu 212endmenu
213 213
214menu "CPUIdle driver"
215
216source "drivers/cpuidle/Kconfig"
217
218endmenu
219
214config PPC601_SYNC_FIX 220config PPC601_SYNC_FIX
215 bool "Workarounds for PPC601 bugs" 221 bool "Workarounds for PPC601 bugs"
216 depends on 6xx && (PPC_PREP || PPC_PMAC) 222 depends on 6xx && (PPC_PREP || PPC_PMAC)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index f2446da7f2d5..164839cb9fcd 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -39,6 +39,7 @@
39#include <linux/irq.h> 39#include <linux/irq.h>
40#include <linux/seq_file.h> 40#include <linux/seq_file.h>
41#include <linux/root_dev.h> 41#include <linux/root_dev.h>
42#include <linux/cpuidle.h>
42 43
43#include <asm/mmu.h> 44#include <asm/mmu.h>
44#include <asm/processor.h> 45#include <asm/processor.h>
@@ -74,9 +75,6 @@ EXPORT_SYMBOL(CMO_PageSize);
74 75
75int fwnmi_active; /* TRUE if an FWNMI handler is present */ 76int fwnmi_active; /* TRUE if an FWNMI handler is present */
76 77
77static void pseries_shared_idle_sleep(void);
78static void pseries_dedicated_idle_sleep(void);
79
80static struct device_node *pSeries_mpic_node; 78static struct device_node *pSeries_mpic_node;
81 79
82static void pSeries_show_cpuinfo(struct seq_file *m) 80static void pSeries_show_cpuinfo(struct seq_file *m)
@@ -351,6 +349,21 @@ static int alloc_dispatch_log_kmem_cache(void)
351} 349}
352early_initcall(alloc_dispatch_log_kmem_cache); 350early_initcall(alloc_dispatch_log_kmem_cache);
353 351
352static void pSeries_idle(void)
353{
354 /* This would call on the cpuidle framework, and the back-end pseries
355 * driver to go to idle states
356 */
357 if (cpuidle_idle_call()) {
358 /* On error, execute default handler
359 * to go into low thread priority and possibly
360 * low power mode.
361 */
362 HMT_low();
363 HMT_very_low();
364 }
365}
366
354static void __init pSeries_setup_arch(void) 367static void __init pSeries_setup_arch(void)
355{ 368{
356 /* Discover PIC type and setup ppc_md accordingly */ 369 /* Discover PIC type and setup ppc_md accordingly */
@@ -373,18 +386,9 @@ static void __init pSeries_setup_arch(void)
373 386
374 pSeries_nvram_init(); 387 pSeries_nvram_init();
375 388
376 /* Choose an idle loop */
377 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 389 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
378 vpa_init(boot_cpuid); 390 vpa_init(boot_cpuid);
379 if (get_lppaca()->shared_proc) { 391 ppc_md.power_save = pSeries_idle;
380 printk(KERN_DEBUG "Using shared processor idle loop\n");
381 ppc_md.power_save = pseries_shared_idle_sleep;
382 } else {
383 printk(KERN_DEBUG "Using dedicated idle loop\n");
384 ppc_md.power_save = pseries_dedicated_idle_sleep;
385 }
386 } else {
387 printk(KERN_DEBUG "Using default idle loop\n");
388 } 392 }
389 393
390 if (firmware_has_feature(FW_FEATURE_LPAR)) 394 if (firmware_has_feature(FW_FEATURE_LPAR))
@@ -585,77 +589,6 @@ static int __init pSeries_probe(void)
585 return 1; 589 return 1;
586} 590}
587 591
588static void pseries_dedicated_idle_sleep(void)
589{
590 unsigned int cpu = smp_processor_id();
591 unsigned long start_snooze;
592 unsigned long in_purr, out_purr;
593 long snooze = __get_cpu_var(smt_snooze_delay);
594
595 /*
596 * Indicate to the HV that we are idle. Now would be
597 * a good time to find other work to dispatch.
598 */
599 get_lppaca()->idle = 1;
600 get_lppaca()->donate_dedicated_cpu = 1;
601 in_purr = mfspr(SPRN_PURR);
602
603 /*
604 * We come in with interrupts disabled, and need_resched()
605 * has been checked recently. If we should poll for a little
606 * while, do so.
607 */
608 if (snooze) {
609 start_snooze = get_tb() + snooze * tb_ticks_per_usec;
610 local_irq_enable();
611 set_thread_flag(TIF_POLLING_NRFLAG);
612
613 while ((snooze < 0) || (get_tb() < start_snooze)) {
614 if (need_resched() || cpu_is_offline(cpu))
615 goto out;
616 ppc64_runlatch_off();
617 HMT_low();
618 HMT_very_low();
619 }
620
621 HMT_medium();
622 clear_thread_flag(TIF_POLLING_NRFLAG);
623 smp_mb();
624 local_irq_disable();
625 if (need_resched() || cpu_is_offline(cpu))
626 goto out;
627 }
628
629 cede_processor();
630
631out:
632 HMT_medium();
633 out_purr = mfspr(SPRN_PURR);
634 get_lppaca()->wait_state_cycles += out_purr - in_purr;
635 get_lppaca()->donate_dedicated_cpu = 0;
636 get_lppaca()->idle = 0;
637}
638
639static void pseries_shared_idle_sleep(void)
640{
641 /*
642 * Indicate to the HV that we are idle. Now would be
643 * a good time to find other work to dispatch.
644 */
645 get_lppaca()->idle = 1;
646
647 /*
648 * Yield the processor to the hypervisor. We return if
649 * an external interrupt occurs (which are driven prior
650 * to returning here) or if a prod occurs from another
651 * processor. When returning here, external interrupts
652 * are enabled.
653 */
654 cede_processor();
655
656 get_lppaca()->idle = 0;
657}
658
659static int pSeries_pci_probe_mode(struct pci_bus *bus) 592static int pSeries_pci_probe_mode(struct pci_bus *bus)
660{ 593{
661 if (firmware_has_feature(FW_FEATURE_LPAR)) 594 if (firmware_has_feature(FW_FEATURE_LPAR))