aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/iSeries_setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/iSeries_setup.c')
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c119
1 files changed, 86 insertions, 33 deletions
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index 86966ce76b58..077c82fc9f3a 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -24,7 +24,6 @@
24#include <linux/smp.h> 24#include <linux/smp.h>
25#include <linux/param.h> 25#include <linux/param.h>
26#include <linux/string.h> 26#include <linux/string.h>
27#include <linux/bootmem.h>
28#include <linux/initrd.h> 27#include <linux/initrd.h>
29#include <linux/seq_file.h> 28#include <linux/seq_file.h>
30#include <linux/kdev_t.h> 29#include <linux/kdev_t.h>
@@ -676,7 +675,6 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
676 */ 675 */
677static void __init iSeries_setup_arch(void) 676static void __init iSeries_setup_arch(void)
678{ 677{
679 void *eventStack;
680 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; 678 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
681 679
682 /* Add an eye catcher and the systemcfg layout version number */ 680 /* Add an eye catcher and the systemcfg layout version number */
@@ -685,24 +683,7 @@ static void __init iSeries_setup_arch(void)
685 systemcfg->version.minor = SYSTEMCFG_MINOR; 683 systemcfg->version.minor = SYSTEMCFG_MINOR;
686 684
687 /* Setup the Lp Event Queue */ 685 /* Setup the Lp Event Queue */
688 686 setup_hvlpevent_queue();
689 /* Allocate a page for the Event Stack
690 * The hypervisor wants the absolute real address, so
691 * we subtract out the KERNELBASE and add in the
692 * absolute real address of the kernel load area
693 */
694 eventStack = alloc_bootmem_pages(LpEventStackSize);
695 memset(eventStack, 0, LpEventStackSize);
696
697 /* Invoke the hypervisor to initialize the event stack */
698 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
699
700 /* Initialize fields in our Lp Event Queue */
701 xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
702 xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
703 xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
704 (LpEventStackSize - LpEventMaxSize);
705 xItLpQueue.xIndex = 0;
706 687
707 /* Compute processor frequency */ 688 /* Compute processor frequency */
708 procFreqHz = ((1UL << 34) * 1000000) / 689 procFreqHz = ((1UL << 34) * 1000000) /
@@ -853,27 +834,91 @@ static int __init iSeries_src_init(void)
853 834
854late_initcall(iSeries_src_init); 835late_initcall(iSeries_src_init);
855 836
856static int set_spread_lpevents(char *str) 837static inline void process_iSeries_events(void)
857{ 838{
858 unsigned long i; 839 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
859 unsigned long val = simple_strtoul(str, NULL, 0); 840}
841
842static void yield_shared_processor(void)
843{
844 unsigned long tb;
845
846 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
847 HvCall_MaskLpEvent |
848 HvCall_MaskLpProd |
849 HvCall_MaskTimeout);
850
851 tb = get_tb();
852 /* Compute future tb value when yield should expire */
853 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
860 854
861 /* 855 /*
862 * The parameter is the number of processors to share in processing 856 * The decrementer stops during the yield. Force a fake decrementer
863 * lp events. 857 * here and let the timer_interrupt code sort out the actual time.
864 */ 858 */
865 if (( val > 0) && (val <= NR_CPUS)) { 859 get_paca()->lppaca.int_dword.fields.decr_int = 1;
866 for (i = 1; i < val; ++i) 860 process_iSeries_events();
867 paca[i].lpqueue_ptr = paca[0].lpqueue_ptr; 861}
868 862
869 printk("lpevent processing spread over %ld processors\n", val); 863static int iseries_shared_idle(void)
870 } else { 864{
871 printk("invalid spread_lpevents %ld\n", val); 865 while (1) {
866 while (!need_resched() && !hvlpevent_is_pending()) {
867 local_irq_disable();
868 ppc64_runlatch_off();
869
870 /* Recheck with irqs off */
871 if (!need_resched() && !hvlpevent_is_pending())
872 yield_shared_processor();
873
874 HMT_medium();
875 local_irq_enable();
876 }
877
878 ppc64_runlatch_on();
879
880 if (hvlpevent_is_pending())
881 process_iSeries_events();
882
883 schedule();
884 }
885
886 return 0;
887}
888
889static int iseries_dedicated_idle(void)
890{
891 long oldval;
892
893 while (1) {
894 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
895
896 if (!oldval) {
897 set_thread_flag(TIF_POLLING_NRFLAG);
898
899 while (!need_resched()) {
900 ppc64_runlatch_off();
901 HMT_low();
902
903 if (hvlpevent_is_pending()) {
904 HMT_medium();
905 ppc64_runlatch_on();
906 process_iSeries_events();
907 }
908 }
909
910 HMT_medium();
911 clear_thread_flag(TIF_POLLING_NRFLAG);
912 } else {
913 set_need_resched();
914 }
915
916 ppc64_runlatch_on();
917 schedule();
872 } 918 }
873 919
874 return 1; 920 return 0;
875} 921}
876__setup("spread_lpevents=", set_spread_lpevents);
877 922
878#ifndef CONFIG_PCI 923#ifndef CONFIG_PCI
879void __init iSeries_init_IRQ(void) { } 924void __init iSeries_init_IRQ(void) { }
@@ -900,5 +945,13 @@ void __init iSeries_early_setup(void)
900 ppc_md.get_rtc_time = iSeries_get_rtc_time; 945 ppc_md.get_rtc_time = iSeries_get_rtc_time;
901 ppc_md.calibrate_decr = iSeries_calibrate_decr; 946 ppc_md.calibrate_decr = iSeries_calibrate_decr;
902 ppc_md.progress = iSeries_progress; 947 ppc_md.progress = iSeries_progress;
948
949 if (get_paca()->lppaca.shared_proc) {
950 ppc_md.idle_loop = iseries_shared_idle;
951 printk(KERN_INFO "Using shared processor idle loop\n");
952 } else {
953 ppc_md.idle_loop = iseries_dedicated_idle;
954 printk(KERN_INFO "Using dedicated idle loop\n");
955 }
903} 956}
904 957