aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-06-30 01:07:09 -0400
committerPaul Mackerras <paulus@samba.org>2005-06-30 01:07:09 -0400
commitbea248fb30c3122ece8c34798527fac431c1d7b0 (patch)
tree9158d7a089312f92abcb6c8d5a8d942d543be24b /arch
parentb1bdfbd0a29d6da4dbe42736faac02c43a9afe76 (diff)
[PATCH] ppc64: Remove lpqueue pointer from the paca on iSeries
The iSeries code keeps a pointer to the ItLpQueue in its paca struct. But all these pointers end up pointing to the one place, ie. xItLpQueue. So remove the pointer from the paca struct and just refer to xItLpQueue directly where needed. The only complication is that the spread_lpevents logic was implemented by having a NULL lpqueue pointer in the paca on CPUs that weren't supposed to process events. Instead we just compare the spread_lpevents value to the processor id to get the same behaviour. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c16
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c6
-rw-r--r--arch/ppc64/kernel/idle.c4
-rw-r--r--arch/ppc64/kernel/irq.c6
-rw-r--r--arch/ppc64/kernel/mf.c5
-rw-r--r--arch/ppc64/kernel/pacaData.c1
-rw-r--r--arch/ppc64/kernel/time.c7
7 files changed, 19 insertions, 26 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
index cdea00d7707f..e90dca8bd136 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/ppc64/kernel/ItLpQueue.c
@@ -69,15 +69,17 @@ struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
69 return nextLpEvent; 69 return nextLpEvent;
70} 70}
71 71
72unsigned long spread_lpevents = 1;
73
72int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue ) 74int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
73{ 75{
74 int retval = 0; 76 struct HvLpEvent *next_event;
75 struct HvLpEvent * nextLpEvent; 77
76 if ( lpQueue ) { 78 if (smp_processor_id() >= spread_lpevents)
77 nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; 79 return 0;
78 retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending; 80
79 } 81 next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
80 return retval; 82 return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
81} 83}
82 84
83void ItLpQueue_clearValid( struct HvLpEvent * event ) 85void ItLpQueue_clearValid( struct HvLpEvent * event )
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
index 86966ce76b58..2049b6dbafc7 100644
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ b/arch/ppc64/kernel/iSeries_setup.c
@@ -855,17 +855,15 @@ late_initcall(iSeries_src_init);
855 855
856static int set_spread_lpevents(char *str) 856static int set_spread_lpevents(char *str)
857{ 857{
858 unsigned long i;
859 unsigned long val = simple_strtoul(str, NULL, 0); 858 unsigned long val = simple_strtoul(str, NULL, 0);
859 extern unsigned long spread_lpevents;
860 860
861 /* 861 /*
862 * The parameter is the number of processors to share in processing 862 * The parameter is the number of processors to share in processing
863 * lp events. 863 * lp events.
864 */ 864 */
865 if (( val > 0) && (val <= NR_CPUS)) { 865 if (( val > 0) && (val <= NR_CPUS)) {
866 for (i = 1; i < val; ++i) 866 spread_lpevents = val;
867 paca[i].lpqueue_ptr = paca[0].lpqueue_ptr;
868
869 printk("lpevent processing spread over %ld processors\n", val); 867 printk("lpevent processing spread over %ld processors\n", val);
870 } else { 868 } else {
871 printk("invalid spread_lpevents %ld\n", val); 869 printk("invalid spread_lpevents %ld\n", val);
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index bdf13b4dc1c8..63977a7a3094 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -88,7 +88,7 @@ static int iSeries_idle(void)
88 88
89 while (1) { 89 while (1) {
90 if (lpaca->lppaca.shared_proc) { 90 if (lpaca->lppaca.shared_proc) {
91 if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr)) 91 if (ItLpQueue_isLpIntPending(&xItLpQueue))
92 process_iSeries_events(); 92 process_iSeries_events();
93 if (!need_resched()) 93 if (!need_resched())
94 yield_shared_processor(); 94 yield_shared_processor();
@@ -100,7 +100,7 @@ static int iSeries_idle(void)
100 100
101 while (!need_resched()) { 101 while (!need_resched()) {
102 HMT_medium(); 102 HMT_medium();
103 if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr)) 103 if (ItLpQueue_isLpIntPending(&xItLpQueue))
104 process_iSeries_events(); 104 process_iSeries_events();
105 HMT_low(); 105 HMT_low();
106 } 106 }
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c
index 3defc8c33adf..b1e6acb02a9a 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/ppc64/kernel/irq.c
@@ -269,7 +269,6 @@ out:
269void do_IRQ(struct pt_regs *regs) 269void do_IRQ(struct pt_regs *regs)
270{ 270{
271 struct paca_struct *lpaca; 271 struct paca_struct *lpaca;
272 struct ItLpQueue *lpq;
273 272
274 irq_enter(); 273 irq_enter();
275 274
@@ -295,9 +294,8 @@ void do_IRQ(struct pt_regs *regs)
295 iSeries_smp_message_recv(regs); 294 iSeries_smp_message_recv(regs);
296 } 295 }
297#endif /* CONFIG_SMP */ 296#endif /* CONFIG_SMP */
298 lpq = lpaca->lpqueue_ptr; 297 if (ItLpQueue_isLpIntPending(&xItLpQueue))
299 if (lpq && ItLpQueue_isLpIntPending(lpq)) 298 lpevent_count += ItLpQueue_process(&xItLpQueue, regs);
300 lpevent_count += ItLpQueue_process(lpq, regs);
301 299
302 irq_exit(); 300 irq_exit();
303 301
diff --git a/arch/ppc64/kernel/mf.c b/arch/ppc64/kernel/mf.c
index d98bebf7042f..d6a297a4feb3 100644
--- a/arch/ppc64/kernel/mf.c
+++ b/arch/ppc64/kernel/mf.c
@@ -802,9 +802,8 @@ int mf_get_boot_rtc(struct rtc_time *tm)
802 /* We need to poll here as we are not yet taking interrupts */ 802 /* We need to poll here as we are not yet taking interrupts */
803 while (rtc_data.busy) { 803 while (rtc_data.busy) {
804 extern unsigned long lpevent_count; 804 extern unsigned long lpevent_count;
805 struct ItLpQueue *lpq = get_paca()->lpqueue_ptr; 805 if (ItLpQueue_isLpIntPending(&xItLpQueue))
806 if (lpq && ItLpQueue_isLpIntPending(lpq)) 806 lpevent_count += ItLpQueue_process(&xItLpQueue, NULL);
807 lpevent_count += ItLpQueue_process(lpq, NULL);
808 } 807 }
809 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); 808 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
810} 809}
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c
index a3e0975c26c1..ebfb517019ef 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/ppc64/kernel/pacaData.c
@@ -45,7 +45,6 @@ extern unsigned long __toc_start;
45#ifdef CONFIG_PPC_ISERIES 45#ifdef CONFIG_PPC_ISERIES
46#define EXTRA_INITS(number, lpq) \ 46#define EXTRA_INITS(number, lpq) \
47 .lppaca_ptr = &paca[number].lppaca, \ 47 .lppaca_ptr = &paca[number].lppaca, \
48 .lpqueue_ptr = (lpq), /* &xItLpQueue, */ \
49 .reg_save_ptr = &paca[number].reg_save, \ 48 .reg_save_ptr = &paca[number].reg_save, \
50 .reg_save = { \ 49 .reg_save = { \
51 .xDesc = 0xd397d9e2, /* "LpRS" */ \ 50 .xDesc = 0xd397d9e2, /* "LpRS" */ \
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c
index 2a532db9138a..cdc43afb563e 100644
--- a/arch/ppc64/kernel/time.c
+++ b/arch/ppc64/kernel/time.c
@@ -367,11 +367,8 @@ int timer_interrupt(struct pt_regs * regs)
367 set_dec(next_dec); 367 set_dec(next_dec);
368 368
369#ifdef CONFIG_PPC_ISERIES 369#ifdef CONFIG_PPC_ISERIES
370 { 370 if (ItLpQueue_isLpIntPending(&xItLpQueue))
371 struct ItLpQueue *lpq = lpaca->lpqueue_ptr; 371 lpevent_count += ItLpQueue_process(&xItLpQueue, regs);
372 if (lpq && ItLpQueue_isLpIntPending(lpq))
373 lpevent_count += ItLpQueue_process(lpq, regs);
374 }
375#endif 372#endif
376 373
377/* collect purr register values often, for accurate calculations */ 374/* collect purr register values often, for accurate calculations */