aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/ItLpQueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/ItLpQueue.c')
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
index 61be23ed5004..35f6deac0b99 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/ppc64/kernel/ItLpQueue.c
@@ -17,10 +17,10 @@
17#include <asm/iSeries/HvLpEvent.h> 17#include <asm/iSeries/HvLpEvent.h>
18#include <asm/iSeries/HvCallEvent.h> 18#include <asm/iSeries/HvCallEvent.h>
19 19
20static __inline__ int set_inUse( struct ItLpQueue * lpQueue ) 20static __inline__ int set_inUse(void)
21{ 21{
22 int t; 22 int t;
23 u32 * inUseP = &(lpQueue->xInUseWord); 23 u32 * inUseP = &xItLpQueue.xInUseWord;
24 24
25 __asm__ __volatile__("\n\ 25 __asm__ __volatile__("\n\
261: lwarx %0,0,%2 \n\ 261: lwarx %0,0,%2 \n\
@@ -31,37 +31,37 @@ static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
31 stwcx. %0,0,%2 \n\ 31 stwcx. %0,0,%2 \n\
32 bne- 1b \n\ 32 bne- 1b \n\
332: eieio" 332: eieio"
34 : "=&r" (t), "=m" (lpQueue->xInUseWord) 34 : "=&r" (t), "=m" (xItLpQueue.xInUseWord)
35 : "r" (inUseP), "m" (lpQueue->xInUseWord) 35 : "r" (inUseP), "m" (xItLpQueue.xInUseWord)
36 : "cc"); 36 : "cc");
37 37
38 return t; 38 return t;
39} 39}
40 40
41static __inline__ void clear_inUse( struct ItLpQueue * lpQueue ) 41static __inline__ void clear_inUse(void)
42{ 42{
43 lpQueue->xInUseWord = 0; 43 xItLpQueue.xInUseWord = 0;
44} 44}
45 45
46/* Array of LpEvent handler functions */ 46/* Array of LpEvent handler functions */
47extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; 47extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
48unsigned long ItLpQueueInProcess = 0; 48unsigned long ItLpQueueInProcess = 0;
49 49
50struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue ) 50struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
51{ 51{
52 struct HvLpEvent * nextLpEvent = 52 struct HvLpEvent * nextLpEvent =
53 (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; 53 (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
54 if ( nextLpEvent->xFlags.xValid ) { 54 if ( nextLpEvent->xFlags.xValid ) {
55 /* rmb() needed only for weakly consistent machines (regatta) */ 55 /* rmb() needed only for weakly consistent machines (regatta) */
56 rmb(); 56 rmb();
57 /* Set pointer to next potential event */ 57 /* Set pointer to next potential event */
58 lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + 58 xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
59 LpEventAlign ) / 59 LpEventAlign ) /
60 LpEventAlign ) * 60 LpEventAlign ) *
61 LpEventAlign; 61 LpEventAlign;
62 /* Wrap to beginning if no room at end */ 62 /* Wrap to beginning if no room at end */
63 if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr) 63 if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr)
64 lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr; 64 xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr;
65 } 65 }
66 else 66 else
67 nextLpEvent = NULL; 67 nextLpEvent = NULL;
@@ -71,15 +71,15 @@ struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
71 71
72static unsigned long spread_lpevents = NR_CPUS; 72static unsigned long spread_lpevents = NR_CPUS;
73 73
74int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue ) 74int ItLpQueue_isLpIntPending(void)
75{ 75{
76 struct HvLpEvent *next_event; 76 struct HvLpEvent *next_event;
77 77
78 if (smp_processor_id() >= spread_lpevents) 78 if (smp_processor_id() >= spread_lpevents)
79 return 0; 79 return 0;
80 80
81 next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; 81 next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
82 return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending; 82 return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending;
83} 83}
84 84
85void ItLpQueue_clearValid( struct HvLpEvent * event ) 85void ItLpQueue_clearValid( struct HvLpEvent * event )
@@ -104,13 +104,13 @@ void ItLpQueue_clearValid( struct HvLpEvent * event )
104 event->xFlags.xValid = 0; 104 event->xFlags.xValid = 0;
105} 105}
106 106
107unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) 107unsigned ItLpQueue_process(struct pt_regs *regs)
108{ 108{
109 unsigned numIntsProcessed = 0; 109 unsigned numIntsProcessed = 0;
110 struct HvLpEvent * nextLpEvent; 110 struct HvLpEvent * nextLpEvent;
111 111
112 /* If we have recursed, just return */ 112 /* If we have recursed, just return */
113 if ( !set_inUse( lpQueue ) ) 113 if ( !set_inUse() )
114 return 0; 114 return 0;
115 115
116 if (ItLpQueueInProcess == 0) 116 if (ItLpQueueInProcess == 0)
@@ -119,13 +119,13 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
119 BUG(); 119 BUG();
120 120
121 for (;;) { 121 for (;;) {
122 nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue ); 122 nextLpEvent = ItLpQueue_getNextLpEvent();
123 if ( nextLpEvent ) { 123 if ( nextLpEvent ) {
124 /* Count events to return to caller 124 /* Count events to return to caller
125 * and count processed events in lpQueue 125 * and count processed events in xItLpQueue
126 */ 126 */
127 ++numIntsProcessed; 127 ++numIntsProcessed;
128 lpQueue->xLpIntCount++; 128 xItLpQueue.xLpIntCount++;
129 /* Call appropriate handler here, passing 129 /* Call appropriate handler here, passing
130 * a pointer to the LpEvent. The handler 130 * a pointer to the LpEvent. The handler
131 * must make a copy of the LpEvent if it 131 * must make a copy of the LpEvent if it
@@ -140,7 +140,7 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
140 * here! 140 * here!
141 */ 141 */
142 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) 142 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
143 lpQueue->xLpIntCountByType[nextLpEvent->xType]++; 143 xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++;
144 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && 144 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
145 lpEventHandler[nextLpEvent->xType] ) 145 lpEventHandler[nextLpEvent->xType] )
146 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); 146 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
@@ -148,19 +148,19 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
148 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); 148 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
149 149
150 ItLpQueue_clearValid( nextLpEvent ); 150 ItLpQueue_clearValid( nextLpEvent );
151 } else if ( lpQueue->xPlicOverflowIntPending ) 151 } else if ( xItLpQueue.xPlicOverflowIntPending )
152 /* 152 /*
153 * No more valid events. If overflow events are 153 * No more valid events. If overflow events are
154 * pending process them 154 * pending process them
155 */ 155 */
156 HvCallEvent_getOverflowLpEvents( lpQueue->xIndex); 156 HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex);
157 else 157 else
158 break; 158 break;
159 } 159 }
160 160
161 ItLpQueueInProcess = 0; 161 ItLpQueueInProcess = 0;
162 mb(); 162 mb();
163 clear_inUse( lpQueue ); 163 clear_inUse();
164 164
165 get_paca()->lpevent_count += numIntsProcessed; 165 get_paca()->lpevent_count += numIntsProcessed;
166 166