aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/ItLpQueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/ItLpQueue.c')
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
index 7ddbfb9750dc..e55fe1a2725f 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/ppc64/kernel/ItLpQueue.c
@@ -26,7 +26,7 @@
26 * 26 *
27 * It is written to by the hypervisor so cannot end up in the BSS. 27 * It is written to by the hypervisor so cannot end up in the BSS.
28 */ 28 */
29struct ItLpQueue xItLpQueue __attribute__((__section__(".data"))); 29struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
30 30
31static char *event_types[9] = { 31static char *event_types[9] = {
32 "Hypervisor\t\t", 32 "Hypervisor\t\t",
@@ -43,7 +43,7 @@ static char *event_types[9] = {
43static __inline__ int set_inUse(void) 43static __inline__ int set_inUse(void)
44{ 44{
45 int t; 45 int t;
46 u32 * inUseP = &xItLpQueue.xInUseWord; 46 u32 * inUseP = &hvlpevent_queue.xInUseWord;
47 47
48 __asm__ __volatile__("\n\ 48 __asm__ __volatile__("\n\
491: lwarx %0,0,%2 \n\ 491: lwarx %0,0,%2 \n\
@@ -54,8 +54,8 @@ static __inline__ int set_inUse(void)
54 stwcx. %0,0,%2 \n\ 54 stwcx. %0,0,%2 \n\
55 bne- 1b \n\ 55 bne- 1b \n\
562: eieio" 562: eieio"
57 : "=&r" (t), "=m" (xItLpQueue.xInUseWord) 57 : "=&r" (t), "=m" (hvlpevent_queue.xInUseWord)
58 : "r" (inUseP), "m" (xItLpQueue.xInUseWord) 58 : "r" (inUseP), "m" (hvlpevent_queue.xInUseWord)
59 : "cc"); 59 : "cc");
60 60
61 return t; 61 return t;
@@ -63,7 +63,7 @@ static __inline__ int set_inUse(void)
63 63
64static __inline__ void clear_inUse(void) 64static __inline__ void clear_inUse(void)
65{ 65{
66 xItLpQueue.xInUseWord = 0; 66 hvlpevent_queue.xInUseWord = 0;
67} 67}
68 68
69/* Array of LpEvent handler functions */ 69/* Array of LpEvent handler functions */
@@ -73,18 +73,18 @@ unsigned long ItLpQueueInProcess = 0;
73static struct HvLpEvent * ItLpQueue_getNextLpEvent(void) 73static struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
74{ 74{
75 struct HvLpEvent * nextLpEvent = 75 struct HvLpEvent * nextLpEvent =
76 (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; 76 (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
77 if ( nextLpEvent->xFlags.xValid ) { 77 if ( nextLpEvent->xFlags.xValid ) {
78 /* rmb() needed only for weakly consistent machines (regatta) */ 78 /* rmb() needed only for weakly consistent machines (regatta) */
79 rmb(); 79 rmb();
80 /* Set pointer to next potential event */ 80 /* Set pointer to next potential event */
81 xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + 81 hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
82 LpEventAlign ) / 82 LpEventAlign ) /
83 LpEventAlign ) * 83 LpEventAlign ) *
84 LpEventAlign; 84 LpEventAlign;
85 /* Wrap to beginning if no room at end */ 85 /* Wrap to beginning if no room at end */
86 if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr) 86 if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr)
87 xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr; 87 hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr;
88 } 88 }
89 else 89 else
90 nextLpEvent = NULL; 90 nextLpEvent = NULL;
@@ -101,8 +101,8 @@ int ItLpQueue_isLpIntPending(void)
101 if (smp_processor_id() >= spread_lpevents) 101 if (smp_processor_id() >= spread_lpevents)
102 return 0; 102 return 0;
103 103
104 next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; 104 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
105 return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending; 105 return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending;
106} 106}
107 107
108static void ItLpQueue_clearValid( struct HvLpEvent * event ) 108static void ItLpQueue_clearValid( struct HvLpEvent * event )
@@ -145,10 +145,10 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
145 nextLpEvent = ItLpQueue_getNextLpEvent(); 145 nextLpEvent = ItLpQueue_getNextLpEvent();
146 if ( nextLpEvent ) { 146 if ( nextLpEvent ) {
147 /* Count events to return to caller 147 /* Count events to return to caller
148 * and count processed events in xItLpQueue 148 * and count processed events in hvlpevent_queue
149 */ 149 */
150 ++numIntsProcessed; 150 ++numIntsProcessed;
151 xItLpQueue.xLpIntCount++; 151 hvlpevent_queue.xLpIntCount++;
152 /* Call appropriate handler here, passing 152 /* Call appropriate handler here, passing
153 * a pointer to the LpEvent. The handler 153 * a pointer to the LpEvent. The handler
154 * must make a copy of the LpEvent if it 154 * must make a copy of the LpEvent if it
@@ -163,7 +163,7 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
163 * here! 163 * here!
164 */ 164 */
165 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) 165 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
166 xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++; 166 hvlpevent_queue.xLpIntCountByType[nextLpEvent->xType]++;
167 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && 167 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
168 lpEventHandler[nextLpEvent->xType] ) 168 lpEventHandler[nextLpEvent->xType] )
169 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); 169 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
@@ -171,12 +171,12 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
171 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); 171 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
172 172
173 ItLpQueue_clearValid( nextLpEvent ); 173 ItLpQueue_clearValid( nextLpEvent );
174 } else if ( xItLpQueue.xPlicOverflowIntPending ) 174 } else if ( hvlpevent_queue.xPlicOverflowIntPending )
175 /* 175 /*
176 * No more valid events. If overflow events are 176 * No more valid events. If overflow events are
177 * pending process them 177 * pending process them
178 */ 178 */
179 HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex); 179 HvCallEvent_getOverflowLpEvents( hvlpevent_queue.xIndex);
180 else 180 else
181 break; 181 break;
182 } 182 }
@@ -224,11 +224,11 @@ void setup_hvlpevent_queue(void)
224 /* Invoke the hypervisor to initialize the event stack */ 224 /* Invoke the hypervisor to initialize the event stack */
225 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); 225 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
226 226
227 xItLpQueue.xSlicEventStackPtr = (char *)eventStack; 227 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
228 xItLpQueue.xSlicCurEventPtr = (char *)eventStack; 228 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
229 xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + 229 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
230 (LpEventStackSize - LpEventMaxSize); 230 (LpEventStackSize - LpEventMaxSize);
231 xItLpQueue.xIndex = 0; 231 hvlpevent_queue.xIndex = 0;
232} 232}
233 233
234static int proc_lpevents_show(struct seq_file *m, void *v) 234static int proc_lpevents_show(struct seq_file *m, void *v)
@@ -237,11 +237,11 @@ static int proc_lpevents_show(struct seq_file *m, void *v)
237 237
238 seq_printf(m, "LpEventQueue 0\n"); 238 seq_printf(m, "LpEventQueue 0\n");
239 seq_printf(m, " events processed:\t%lu\n", 239 seq_printf(m, " events processed:\t%lu\n",
240 (unsigned long)xItLpQueue.xLpIntCount); 240 (unsigned long)hvlpevent_queue.xLpIntCount);
241 241
242 for (i = 0; i < 9; ++i) 242 for (i = 0; i < 9; ++i)
243 seq_printf(m, " %s %10lu\n", event_types[i], 243 seq_printf(m, " %s %10lu\n", event_types[i],
244 (unsigned long)xItLpQueue.xLpIntCountByType[i]); 244 (unsigned long)hvlpevent_queue.xLpIntCountByType[i]);
245 245
246 seq_printf(m, "\n events processed by processor:\n"); 246 seq_printf(m, "\n events processed by processor:\n");
247 247