diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2005-06-30 01:07:57 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-06-30 01:07:57 -0400 |
commit | 1b19bc721416ae5bc813521d9e010a89f4816120 (patch) | |
tree | 7034c878cdc370cb16b83957b652f6c2b95630d5 /arch | |
parent | ee48444b85f498d99592835f61125385d8e9c975 (diff) |
[PATCH] ppc64: Don't pass the pointers to xItLpQueue around
Because there's only one ItLpQueue and we know where it is, ie. xItLpQueue,
there's no point passing pointers to it it around all over the place.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ppc64/kernel/ItLpQueue.c | 46 | ||||
-rw-r--r-- | arch/ppc64/kernel/idle.c | 4 | ||||
-rw-r--r-- | arch/ppc64/kernel/irq.c | 4 | ||||
-rw-r--r-- | arch/ppc64/kernel/mf.c | 4 | ||||
-rw-r--r-- | arch/ppc64/kernel/time.c | 4 |
5 files changed, 31 insertions, 31 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c index 61be23ed5004..35f6deac0b99 100644 --- a/arch/ppc64/kernel/ItLpQueue.c +++ b/arch/ppc64/kernel/ItLpQueue.c | |||
@@ -17,10 +17,10 @@ | |||
17 | #include <asm/iSeries/HvLpEvent.h> | 17 | #include <asm/iSeries/HvLpEvent.h> |
18 | #include <asm/iSeries/HvCallEvent.h> | 18 | #include <asm/iSeries/HvCallEvent.h> |
19 | 19 | ||
20 | static __inline__ int set_inUse( struct ItLpQueue * lpQueue ) | 20 | static __inline__ int set_inUse(void) |
21 | { | 21 | { |
22 | int t; | 22 | int t; |
23 | u32 * inUseP = &(lpQueue->xInUseWord); | 23 | u32 * inUseP = &xItLpQueue.xInUseWord; |
24 | 24 | ||
25 | __asm__ __volatile__("\n\ | 25 | __asm__ __volatile__("\n\ |
26 | 1: lwarx %0,0,%2 \n\ | 26 | 1: lwarx %0,0,%2 \n\ |
@@ -31,37 +31,37 @@ static __inline__ int set_inUse( struct ItLpQueue * lpQueue ) | |||
31 | stwcx. %0,0,%2 \n\ | 31 | stwcx. %0,0,%2 \n\ |
32 | bne- 1b \n\ | 32 | bne- 1b \n\ |
33 | 2: eieio" | 33 | 2: eieio" |
34 | : "=&r" (t), "=m" (lpQueue->xInUseWord) | 34 | : "=&r" (t), "=m" (xItLpQueue.xInUseWord) |
35 | : "r" (inUseP), "m" (lpQueue->xInUseWord) | 35 | : "r" (inUseP), "m" (xItLpQueue.xInUseWord) |
36 | : "cc"); | 36 | : "cc"); |
37 | 37 | ||
38 | return t; | 38 | return t; |
39 | } | 39 | } |
40 | 40 | ||
41 | static __inline__ void clear_inUse( struct ItLpQueue * lpQueue ) | 41 | static __inline__ void clear_inUse(void) |
42 | { | 42 | { |
43 | lpQueue->xInUseWord = 0; | 43 | xItLpQueue.xInUseWord = 0; |
44 | } | 44 | } |
45 | 45 | ||
46 | /* Array of LpEvent handler functions */ | 46 | /* Array of LpEvent handler functions */ |
47 | extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; | 47 | extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; |
48 | unsigned long ItLpQueueInProcess = 0; | 48 | unsigned long ItLpQueueInProcess = 0; |
49 | 49 | ||
50 | struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue ) | 50 | struct HvLpEvent * ItLpQueue_getNextLpEvent(void) |
51 | { | 51 | { |
52 | struct HvLpEvent * nextLpEvent = | 52 | struct HvLpEvent * nextLpEvent = |
53 | (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; | 53 | (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; |
54 | if ( nextLpEvent->xFlags.xValid ) { | 54 | if ( nextLpEvent->xFlags.xValid ) { |
55 | /* rmb() needed only for weakly consistent machines (regatta) */ | 55 | /* rmb() needed only for weakly consistent machines (regatta) */ |
56 | rmb(); | 56 | rmb(); |
57 | /* Set pointer to next potential event */ | 57 | /* Set pointer to next potential event */ |
58 | lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + | 58 | xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + |
59 | LpEventAlign ) / | 59 | LpEventAlign ) / |
60 | LpEventAlign ) * | 60 | LpEventAlign ) * |
61 | LpEventAlign; | 61 | LpEventAlign; |
62 | /* Wrap to beginning if no room at end */ | 62 | /* Wrap to beginning if no room at end */ |
63 | if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr) | 63 | if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr) |
64 | lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr; | 64 | xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr; |
65 | } | 65 | } |
66 | else | 66 | else |
67 | nextLpEvent = NULL; | 67 | nextLpEvent = NULL; |
@@ -71,15 +71,15 @@ struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue ) | |||
71 | 71 | ||
72 | static unsigned long spread_lpevents = NR_CPUS; | 72 | static unsigned long spread_lpevents = NR_CPUS; |
73 | 73 | ||
74 | int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue ) | 74 | int ItLpQueue_isLpIntPending(void) |
75 | { | 75 | { |
76 | struct HvLpEvent *next_event; | 76 | struct HvLpEvent *next_event; |
77 | 77 | ||
78 | if (smp_processor_id() >= spread_lpevents) | 78 | if (smp_processor_id() >= spread_lpevents) |
79 | return 0; | 79 | return 0; |
80 | 80 | ||
81 | next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; | 81 | next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; |
82 | return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending; | 82 | return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending; |
83 | } | 83 | } |
84 | 84 | ||
85 | void ItLpQueue_clearValid( struct HvLpEvent * event ) | 85 | void ItLpQueue_clearValid( struct HvLpEvent * event ) |
@@ -104,13 +104,13 @@ void ItLpQueue_clearValid( struct HvLpEvent * event ) | |||
104 | event->xFlags.xValid = 0; | 104 | event->xFlags.xValid = 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) | 107 | unsigned ItLpQueue_process(struct pt_regs *regs) |
108 | { | 108 | { |
109 | unsigned numIntsProcessed = 0; | 109 | unsigned numIntsProcessed = 0; |
110 | struct HvLpEvent * nextLpEvent; | 110 | struct HvLpEvent * nextLpEvent; |
111 | 111 | ||
112 | /* If we have recursed, just return */ | 112 | /* If we have recursed, just return */ |
113 | if ( !set_inUse( lpQueue ) ) | 113 | if ( !set_inUse() ) |
114 | return 0; | 114 | return 0; |
115 | 115 | ||
116 | if (ItLpQueueInProcess == 0) | 116 | if (ItLpQueueInProcess == 0) |
@@ -119,13 +119,13 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) | |||
119 | BUG(); | 119 | BUG(); |
120 | 120 | ||
121 | for (;;) { | 121 | for (;;) { |
122 | nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue ); | 122 | nextLpEvent = ItLpQueue_getNextLpEvent(); |
123 | if ( nextLpEvent ) { | 123 | if ( nextLpEvent ) { |
124 | /* Count events to return to caller | 124 | /* Count events to return to caller |
125 | * and count processed events in lpQueue | 125 | * and count processed events in xItLpQueue |
126 | */ | 126 | */ |
127 | ++numIntsProcessed; | 127 | ++numIntsProcessed; |
128 | lpQueue->xLpIntCount++; | 128 | xItLpQueue.xLpIntCount++; |
129 | /* Call appropriate handler here, passing | 129 | /* Call appropriate handler here, passing |
130 | * a pointer to the LpEvent. The handler | 130 | * a pointer to the LpEvent. The handler |
131 | * must make a copy of the LpEvent if it | 131 | * must make a copy of the LpEvent if it |
@@ -140,7 +140,7 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) | |||
140 | * here! | 140 | * here! |
141 | */ | 141 | */ |
142 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) | 142 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) |
143 | lpQueue->xLpIntCountByType[nextLpEvent->xType]++; | 143 | xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++; |
144 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && | 144 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && |
145 | lpEventHandler[nextLpEvent->xType] ) | 145 | lpEventHandler[nextLpEvent->xType] ) |
146 | lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); | 146 | lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); |
@@ -148,19 +148,19 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) | |||
148 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); | 148 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); |
149 | 149 | ||
150 | ItLpQueue_clearValid( nextLpEvent ); | 150 | ItLpQueue_clearValid( nextLpEvent ); |
151 | } else if ( lpQueue->xPlicOverflowIntPending ) | 151 | } else if ( xItLpQueue.xPlicOverflowIntPending ) |
152 | /* | 152 | /* |
153 | * No more valid events. If overflow events are | 153 | * No more valid events. If overflow events are |
154 | * pending process them | 154 | * pending process them |
155 | */ | 155 | */ |
156 | HvCallEvent_getOverflowLpEvents( lpQueue->xIndex); | 156 | HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex); |
157 | else | 157 | else |
158 | break; | 158 | break; |
159 | } | 159 | } |
160 | 160 | ||
161 | ItLpQueueInProcess = 0; | 161 | ItLpQueueInProcess = 0; |
162 | mb(); | 162 | mb(); |
163 | clear_inUse( lpQueue ); | 163 | clear_inUse(); |
164 | 164 | ||
165 | get_paca()->lpevent_count += numIntsProcessed; | 165 | get_paca()->lpevent_count += numIntsProcessed; |
166 | 166 | ||
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index 63977a7a3094..a7ebd0238d39 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c | |||
@@ -88,7 +88,7 @@ static int iSeries_idle(void) | |||
88 | 88 | ||
89 | while (1) { | 89 | while (1) { |
90 | if (lpaca->lppaca.shared_proc) { | 90 | if (lpaca->lppaca.shared_proc) { |
91 | if (ItLpQueue_isLpIntPending(&xItLpQueue)) | 91 | if (ItLpQueue_isLpIntPending()) |
92 | process_iSeries_events(); | 92 | process_iSeries_events(); |
93 | if (!need_resched()) | 93 | if (!need_resched()) |
94 | yield_shared_processor(); | 94 | yield_shared_processor(); |
@@ -100,7 +100,7 @@ static int iSeries_idle(void) | |||
100 | 100 | ||
101 | while (!need_resched()) { | 101 | while (!need_resched()) { |
102 | HMT_medium(); | 102 | HMT_medium(); |
103 | if (ItLpQueue_isLpIntPending(&xItLpQueue)) | 103 | if (ItLpQueue_isLpIntPending()) |
104 | process_iSeries_events(); | 104 | process_iSeries_events(); |
105 | HMT_low(); | 105 | HMT_low(); |
106 | } | 106 | } |
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c index b1e6acb02a9a..46a7151ad6d5 100644 --- a/arch/ppc64/kernel/irq.c +++ b/arch/ppc64/kernel/irq.c | |||
@@ -294,8 +294,8 @@ void do_IRQ(struct pt_regs *regs) | |||
294 | iSeries_smp_message_recv(regs); | 294 | iSeries_smp_message_recv(regs); |
295 | } | 295 | } |
296 | #endif /* CONFIG_SMP */ | 296 | #endif /* CONFIG_SMP */ |
297 | if (ItLpQueue_isLpIntPending(&xItLpQueue)) | 297 | if (ItLpQueue_isLpIntPending()) |
298 | lpevent_count += ItLpQueue_process(&xItLpQueue, regs); | 298 | lpevent_count += ItLpQueue_process(regs); |
299 | 299 | ||
300 | irq_exit(); | 300 | irq_exit(); |
301 | 301 | ||
diff --git a/arch/ppc64/kernel/mf.c b/arch/ppc64/kernel/mf.c index d6a297a4feb3..ef9206998028 100644 --- a/arch/ppc64/kernel/mf.c +++ b/arch/ppc64/kernel/mf.c | |||
@@ -802,8 +802,8 @@ int mf_get_boot_rtc(struct rtc_time *tm) | |||
802 | /* We need to poll here as we are not yet taking interrupts */ | 802 | /* We need to poll here as we are not yet taking interrupts */ |
803 | while (rtc_data.busy) { | 803 | while (rtc_data.busy) { |
804 | extern unsigned long lpevent_count; | 804 | extern unsigned long lpevent_count; |
805 | if (ItLpQueue_isLpIntPending(&xItLpQueue)) | 805 | if (ItLpQueue_isLpIntPending()) |
806 | lpevent_count += ItLpQueue_process(&xItLpQueue, NULL); | 806 | lpevent_count += ItLpQueue_process(NULL); |
807 | } | 807 | } |
808 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | 808 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); |
809 | } | 809 | } |
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index cdc43afb563e..c133f9c28c57 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c | |||
@@ -367,8 +367,8 @@ int timer_interrupt(struct pt_regs * regs) | |||
367 | set_dec(next_dec); | 367 | set_dec(next_dec); |
368 | 368 | ||
369 | #ifdef CONFIG_PPC_ISERIES | 369 | #ifdef CONFIG_PPC_ISERIES |
370 | if (ItLpQueue_isLpIntPending(&xItLpQueue)) | 370 | if (ItLpQueue_isLpIntPending()) |
371 | lpevent_count += ItLpQueue_process(&xItLpQueue, regs); | 371 | lpevent_count += ItLpQueue_process(regs); |
372 | #endif | 372 | #endif |
373 | 373 | ||
374 | /* collect purr register values often, for accurate calculations */ | 374 | /* collect purr register values often, for accurate calculations */ |