aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2005-06-30 01:15:32 -0400
committerPaul Mackerras <paulus@samba.org>2005-06-30 01:15:32 -0400
commita61874648d14450f4d397489527998e3dd1119de (patch)
tree007af1fc91b283f6f1d12706300f61d75d69ba43
parentab354b637924beb33dcc23eedc9482f2c692188f (diff)
[PATCH] ppc64: Rename xItLpQueue to hvlpevent_queue
The xItLpQueue is a queue of HvLpEvents that we're given by the Hypervisor. Rename xItLpQueue to hvlpevent_queue and make the type struct hvlpevent_queue. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Acked-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c44
-rw-r--r--arch/ppc64/kernel/LparData.c4
-rw-r--r--include/asm-ppc64/iSeries/ItLpQueue.h4
3 files changed, 26 insertions, 26 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
index 7ddbfb9750dc..e55fe1a2725f 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/ppc64/kernel/ItLpQueue.c
@@ -26,7 +26,7 @@
26 * 26 *
27 * It is written to by the hypervisor so cannot end up in the BSS. 27 * It is written to by the hypervisor so cannot end up in the BSS.
28 */ 28 */
29struct ItLpQueue xItLpQueue __attribute__((__section__(".data"))); 29struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
30 30
31static char *event_types[9] = { 31static char *event_types[9] = {
32 "Hypervisor\t\t", 32 "Hypervisor\t\t",
@@ -43,7 +43,7 @@ static char *event_types[9] = {
43static __inline__ int set_inUse(void) 43static __inline__ int set_inUse(void)
44{ 44{
45 int t; 45 int t;
46 u32 * inUseP = &xItLpQueue.xInUseWord; 46 u32 * inUseP = &hvlpevent_queue.xInUseWord;
47 47
48 __asm__ __volatile__("\n\ 48 __asm__ __volatile__("\n\
491: lwarx %0,0,%2 \n\ 491: lwarx %0,0,%2 \n\
@@ -54,8 +54,8 @@ static __inline__ int set_inUse(void)
54 stwcx. %0,0,%2 \n\ 54 stwcx. %0,0,%2 \n\
55 bne- 1b \n\ 55 bne- 1b \n\
562: eieio" 562: eieio"
57 : "=&r" (t), "=m" (xItLpQueue.xInUseWord) 57 : "=&r" (t), "=m" (hvlpevent_queue.xInUseWord)
58 : "r" (inUseP), "m" (xItLpQueue.xInUseWord) 58 : "r" (inUseP), "m" (hvlpevent_queue.xInUseWord)
59 : "cc"); 59 : "cc");
60 60
61 return t; 61 return t;
@@ -63,7 +63,7 @@ static __inline__ int set_inUse(void)
63 63
64static __inline__ void clear_inUse(void) 64static __inline__ void clear_inUse(void)
65{ 65{
66 xItLpQueue.xInUseWord = 0; 66 hvlpevent_queue.xInUseWord = 0;
67} 67}
68 68
69/* Array of LpEvent handler functions */ 69/* Array of LpEvent handler functions */
@@ -73,18 +73,18 @@ unsigned long ItLpQueueInProcess = 0;
73static struct HvLpEvent * ItLpQueue_getNextLpEvent(void) 73static struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
74{ 74{
75 struct HvLpEvent * nextLpEvent = 75 struct HvLpEvent * nextLpEvent =
76 (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; 76 (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
77 if ( nextLpEvent->xFlags.xValid ) { 77 if ( nextLpEvent->xFlags.xValid ) {
78 /* rmb() needed only for weakly consistent machines (regatta) */ 78 /* rmb() needed only for weakly consistent machines (regatta) */
79 rmb(); 79 rmb();
80 /* Set pointer to next potential event */ 80 /* Set pointer to next potential event */
81 xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + 81 hvlpevent_queue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
82 LpEventAlign ) / 82 LpEventAlign ) /
83 LpEventAlign ) * 83 LpEventAlign ) *
84 LpEventAlign; 84 LpEventAlign;
85 /* Wrap to beginning if no room at end */ 85 /* Wrap to beginning if no room at end */
86 if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr) 86 if (hvlpevent_queue.xSlicCurEventPtr > hvlpevent_queue.xSlicLastValidEventPtr)
87 xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr; 87 hvlpevent_queue.xSlicCurEventPtr = hvlpevent_queue.xSlicEventStackPtr;
88 } 88 }
89 else 89 else
90 nextLpEvent = NULL; 90 nextLpEvent = NULL;
@@ -101,8 +101,8 @@ int ItLpQueue_isLpIntPending(void)
101 if (smp_processor_id() >= spread_lpevents) 101 if (smp_processor_id() >= spread_lpevents)
102 return 0; 102 return 0;
103 103
104 next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr; 104 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
105 return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending; 105 return next_event->xFlags.xValid | hvlpevent_queue.xPlicOverflowIntPending;
106} 106}
107 107
108static void ItLpQueue_clearValid( struct HvLpEvent * event ) 108static void ItLpQueue_clearValid( struct HvLpEvent * event )
@@ -145,10 +145,10 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
145 nextLpEvent = ItLpQueue_getNextLpEvent(); 145 nextLpEvent = ItLpQueue_getNextLpEvent();
146 if ( nextLpEvent ) { 146 if ( nextLpEvent ) {
147 /* Count events to return to caller 147 /* Count events to return to caller
148 * and count processed events in xItLpQueue 148 * and count processed events in hvlpevent_queue
149 */ 149 */
150 ++numIntsProcessed; 150 ++numIntsProcessed;
151 xItLpQueue.xLpIntCount++; 151 hvlpevent_queue.xLpIntCount++;
152 /* Call appropriate handler here, passing 152 /* Call appropriate handler here, passing
153 * a pointer to the LpEvent. The handler 153 * a pointer to the LpEvent. The handler
154 * must make a copy of the LpEvent if it 154 * must make a copy of the LpEvent if it
@@ -163,7 +163,7 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
163 * here! 163 * here!
164 */ 164 */
165 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) 165 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
166 xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++; 166 hvlpevent_queue.xLpIntCountByType[nextLpEvent->xType]++;
167 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && 167 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
168 lpEventHandler[nextLpEvent->xType] ) 168 lpEventHandler[nextLpEvent->xType] )
169 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); 169 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
@@ -171,12 +171,12 @@ unsigned ItLpQueue_process(struct pt_regs *regs)
171 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); 171 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
172 172
173 ItLpQueue_clearValid( nextLpEvent ); 173 ItLpQueue_clearValid( nextLpEvent );
174 } else if ( xItLpQueue.xPlicOverflowIntPending ) 174 } else if ( hvlpevent_queue.xPlicOverflowIntPending )
175 /* 175 /*
176 * No more valid events. If overflow events are 176 * No more valid events. If overflow events are
177 * pending process them 177 * pending process them
178 */ 178 */
179 HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex); 179 HvCallEvent_getOverflowLpEvents( hvlpevent_queue.xIndex);
180 else 180 else
181 break; 181 break;
182 } 182 }
@@ -224,11 +224,11 @@ void setup_hvlpevent_queue(void)
224 /* Invoke the hypervisor to initialize the event stack */ 224 /* Invoke the hypervisor to initialize the event stack */
225 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); 225 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
226 226
227 xItLpQueue.xSlicEventStackPtr = (char *)eventStack; 227 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
228 xItLpQueue.xSlicCurEventPtr = (char *)eventStack; 228 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
229 xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + 229 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
230 (LpEventStackSize - LpEventMaxSize); 230 (LpEventStackSize - LpEventMaxSize);
231 xItLpQueue.xIndex = 0; 231 hvlpevent_queue.xIndex = 0;
232} 232}
233 233
234static int proc_lpevents_show(struct seq_file *m, void *v) 234static int proc_lpevents_show(struct seq_file *m, void *v)
@@ -237,11 +237,11 @@ static int proc_lpevents_show(struct seq_file *m, void *v)
237 237
238 seq_printf(m, "LpEventQueue 0\n"); 238 seq_printf(m, "LpEventQueue 0\n");
239 seq_printf(m, " events processed:\t%lu\n", 239 seq_printf(m, " events processed:\t%lu\n",
240 (unsigned long)xItLpQueue.xLpIntCount); 240 (unsigned long)hvlpevent_queue.xLpIntCount);
241 241
242 for (i = 0; i < 9; ++i) 242 for (i = 0; i < 9; ++i)
243 seq_printf(m, " %s %10lu\n", event_types[i], 243 seq_printf(m, " %s %10lu\n", event_types[i],
244 (unsigned long)xItLpQueue.xLpIntCountByType[i]); 244 (unsigned long)hvlpevent_queue.xLpIntCountByType[i]);
245 245
246 seq_printf(m, "\n events processed by processor:\n"); 246 seq_printf(m, "\n events processed by processor:\n");
247 247
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c
index f42ee35f927a..6ffcf67dd507 100644
--- a/arch/ppc64/kernel/LparData.c
+++ b/arch/ppc64/kernel/LparData.c
@@ -193,7 +193,7 @@ struct ItVpdAreas itVpdAreas = {
193 0,0,0, /* 13 - 15 */ 193 0,0,0, /* 13 - 15 */
194 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */ 194 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
195 0,0,0,0,0,0, /* 17 - 22 */ 195 0,0,0,0,0,0, /* 17 - 22 */
196 sizeof(struct ItLpQueue),/* 23 length of Lp Queue */ 196 sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
197 0,0 /* 24 - 25 */ 197 0,0 /* 24 - 25 */
198 }, 198 },
199 .xSlicVpdAdrs = { /* VPD addresses */ 199 .xSlicVpdAdrs = { /* VPD addresses */
@@ -211,7 +211,7 @@ struct ItVpdAreas itVpdAreas = {
211 0,0,0, /* 13 - 15 */ 211 0,0,0, /* 13 - 15 */
212 &xIoHriProcessorVpd, /* 16 Proc Vpd */ 212 &xIoHriProcessorVpd, /* 16 Proc Vpd */
213 0,0,0,0,0,0, /* 17 - 22 */ 213 0,0,0,0,0,0, /* 17 - 22 */
214 &xItLpQueue, /* 23 Lp Queue */ 214 &hvlpevent_queue, /* 23 Lp Queue */
215 0,0 215 0,0
216 } 216 }
217}; 217};
diff --git a/include/asm-ppc64/iSeries/ItLpQueue.h b/include/asm-ppc64/iSeries/ItLpQueue.h
index be1cb7f3f884..bebfb364f556 100644
--- a/include/asm-ppc64/iSeries/ItLpQueue.h
+++ b/include/asm-ppc64/iSeries/ItLpQueue.h
@@ -41,7 +41,7 @@ struct HvLpEvent;
41#define LpEventMaxSize 256 41#define LpEventMaxSize 256
42#define LpEventAlign 64 42#define LpEventAlign 64
43 43
44struct ItLpQueue { 44struct hvlpevent_queue {
45/* 45/*
46 * The xSlicCurEventPtr is the pointer to the next event stack entry 46 * The xSlicCurEventPtr is the pointer to the next event stack entry
47 * that will become valid. The OS must peek at this entry to determine 47 * that will become valid. The OS must peek at this entry to determine
@@ -74,7 +74,7 @@ struct ItLpQueue {
74 u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type 74 u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type
75}; 75};
76 76
77extern struct ItLpQueue xItLpQueue; 77extern struct hvlpevent_queue hvlpevent_queue;
78 78
79extern int ItLpQueue_isLpIntPending(void); 79extern int ItLpQueue_isLpIntPending(void);
80extern unsigned ItLpQueue_process(struct pt_regs *); 80extern unsigned ItLpQueue_process(struct pt_regs *);