aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/ItLpQueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/ItLpQueue.c')
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c294
1 files changed, 195 insertions, 99 deletions
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
index cdea00d7707f..4231861288a3 100644
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ b/arch/ppc64/kernel/ItLpQueue.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * ItLpQueue.c 2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation 3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or 7 * the Free Software Foundation; either version 2 of the License, or
@@ -11,156 +11,252 @@
11#include <linux/stddef.h> 11#include <linux/stddef.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/bootmem.h>
15#include <linux/seq_file.h>
16#include <linux/proc_fs.h>
14#include <asm/system.h> 17#include <asm/system.h>
15#include <asm/paca.h> 18#include <asm/paca.h>
16#include <asm/iSeries/ItLpQueue.h> 19#include <asm/iSeries/ItLpQueue.h>
17#include <asm/iSeries/HvLpEvent.h> 20#include <asm/iSeries/HvLpEvent.h>
18#include <asm/iSeries/HvCallEvent.h> 21#include <asm/iSeries/HvCallEvent.h>
19 22
20static __inline__ int set_inUse( struct ItLpQueue * lpQueue ) 23/*
21{ 24 * The LpQueue is used to pass event data from the hypervisor to
22 int t; 25 * the partition. This is where I/O interrupt events are communicated.
23 u32 * inUseP = &(lpQueue->xInUseWord); 26 *
24 27 * It is written to by the hypervisor so cannot end up in the BSS.
25 __asm__ __volatile__("\n\ 28 */
261: lwarx %0,0,%2 \n\ 29struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
27 cmpwi 0,%0,0 \n\
28 li %0,0 \n\
29 bne- 2f \n\
30 addi %0,%0,1 \n\
31 stwcx. %0,0,%2 \n\
32 bne- 1b \n\
332: eieio"
34 : "=&r" (t), "=m" (lpQueue->xInUseWord)
35 : "r" (inUseP), "m" (lpQueue->xInUseWord)
36 : "cc");
37
38 return t;
39}
40 30
41static __inline__ void clear_inUse( struct ItLpQueue * lpQueue ) 31DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
42{ 32
43 lpQueue->xInUseWord = 0; 33static char *event_types[HvLpEvent_Type_NumTypes] = {
44} 34 "Hypervisor",
35 "Machine Facilities",
36 "Session Manager",
37 "SPD I/O",
38 "Virtual Bus",
39 "PCI I/O",
40 "RIO I/O",
41 "Virtual Lan",
42 "Virtual I/O"
43};
45 44
46/* Array of LpEvent handler functions */ 45/* Array of LpEvent handler functions */
47extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; 46extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
48unsigned long ItLpQueueInProcess = 0;
49 47
50struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue ) 48static struct HvLpEvent * get_next_hvlpevent(void)
51{ 49{
52 struct HvLpEvent * nextLpEvent = 50 struct HvLpEvent * event;
53 (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; 51 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
54 if ( nextLpEvent->xFlags.xValid ) { 52
53 if (event->xFlags.xValid) {
55 /* rmb() needed only for weakly consistent machines (regatta) */ 54 /* rmb() needed only for weakly consistent machines (regatta) */
56 rmb(); 55 rmb();
57 /* Set pointer to next potential event */ 56 /* Set pointer to next potential event */
58 lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + 57 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
59 LpEventAlign ) / 58 LpEventAlign) / LpEventAlign) * LpEventAlign;
60 LpEventAlign ) * 59
61 LpEventAlign;
62 /* Wrap to beginning if no room at end */ 60 /* Wrap to beginning if no room at end */
63 if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr) 61 if (hvlpevent_queue.xSlicCurEventPtr >
64 lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr; 62 hvlpevent_queue.xSlicLastValidEventPtr) {
63 hvlpevent_queue.xSlicCurEventPtr =
64 hvlpevent_queue.xSlicEventStackPtr;
65 }
66 } else {
67 event = NULL;
65 } 68 }
66 else
67 nextLpEvent = NULL;
68 69
69 return nextLpEvent; 70 return event;
70} 71}
71 72
72int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue ) 73static unsigned long spread_lpevents = NR_CPUS;
74
75int hvlpevent_is_pending(void)
73{ 76{
74 int retval = 0; 77 struct HvLpEvent *next_event;
75 struct HvLpEvent * nextLpEvent; 78
76 if ( lpQueue ) { 79 if (smp_processor_id() >= spread_lpevents)
77 nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; 80 return 0;
78 retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending; 81
79 } 82 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
80 return retval; 83
84 return next_event->xFlags.xValid |
85 hvlpevent_queue.xPlicOverflowIntPending;
81} 86}
82 87
83void ItLpQueue_clearValid( struct HvLpEvent * event ) 88static void hvlpevent_clear_valid(struct HvLpEvent * event)
84{ 89{
85 /* Clear the valid bit of the event 90 /* Tell the Hypervisor that we're done with this event.
86 * Also clear bits within this event that might 91 * Also clear bits within this event that might look like valid bits.
87 * look like valid bits (on 64-byte boundaries) 92 * ie. on 64-byte boundaries.
88 */ 93 */
89 unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) / 94 struct HvLpEvent *tmp;
90 LpEventAlign ) - 1; 95 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
91 switch ( extra ) { 96 LpEventAlign) - 1;
92 case 3: 97
93 ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0; 98 switch (extra) {
94 case 2: 99 case 3:
95 ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0; 100 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
96 case 1: 101 tmp->xFlags.xValid = 0;
97 ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0; 102 case 2:
98 case 0: 103 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
99 ; 104 tmp->xFlags.xValid = 0;
105 case 1:
106 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
107 tmp->xFlags.xValid = 0;
100 } 108 }
109
101 mb(); 110 mb();
111
102 event->xFlags.xValid = 0; 112 event->xFlags.xValid = 0;
103} 113}
104 114
105unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) 115void process_hvlpevents(struct pt_regs *regs)
106{ 116{
107 unsigned numIntsProcessed = 0; 117 struct HvLpEvent * event;
108 struct HvLpEvent * nextLpEvent;
109 118
110 /* If we have recursed, just return */ 119 /* If we have recursed, just return */
111 if ( !set_inUse( lpQueue ) ) 120 if (!spin_trylock(&hvlpevent_queue.lock))
112 return 0; 121 return;
113
114 if (ItLpQueueInProcess == 0)
115 ItLpQueueInProcess = 1;
116 else
117 BUG();
118 122
119 for (;;) { 123 for (;;) {
120 nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue ); 124 event = get_next_hvlpevent();
121 if ( nextLpEvent ) { 125 if (event) {
122 /* Count events to return to caller 126 /* Call appropriate handler here, passing
123 * and count processed events in lpQueue
124 */
125 ++numIntsProcessed;
126 lpQueue->xLpIntCount++;
127 /* Call appropriate handler here, passing
128 * a pointer to the LpEvent. The handler 127 * a pointer to the LpEvent. The handler
129 * must make a copy of the LpEvent if it 128 * must make a copy of the LpEvent if it
130 * needs it in a bottom half. (perhaps for 129 * needs it in a bottom half. (perhaps for
131 * an ACK) 130 * an ACK)
132 * 131 *
133 * Handlers are responsible for ACK processing 132 * Handlers are responsible for ACK processing
134 * 133 *
135 * The Hypervisor guarantees that LpEvents will 134 * The Hypervisor guarantees that LpEvents will
136 * only be delivered with types that we have 135 * only be delivered with types that we have
137 * registered for, so no type check is necessary 136 * registered for, so no type check is necessary
138 * here! 137 * here!
139 */ 138 */
140 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) 139 if (event->xType < HvLpEvent_Type_NumTypes)
141 lpQueue->xLpIntCountByType[nextLpEvent->xType]++; 140 __get_cpu_var(hvlpevent_counts)[event->xType]++;
142 if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && 141 if (event->xType < HvLpEvent_Type_NumTypes &&
143 lpEventHandler[nextLpEvent->xType] ) 142 lpEventHandler[event->xType])
144 lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); 143 lpEventHandler[event->xType](event, regs);
145 else 144 else
146 printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); 145 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
147 146
148 ItLpQueue_clearValid( nextLpEvent ); 147 hvlpevent_clear_valid(event);
149 } else if ( lpQueue->xPlicOverflowIntPending ) 148 } else if (hvlpevent_queue.xPlicOverflowIntPending)
150 /* 149 /*
151 * No more valid events. If overflow events are 150 * No more valid events. If overflow events are
152 * pending process them 151 * pending process them
153 */ 152 */
154 HvCallEvent_getOverflowLpEvents( lpQueue->xIndex); 153 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
155 else 154 else
156 break; 155 break;
157 } 156 }
158 157
159 ItLpQueueInProcess = 0; 158 spin_unlock(&hvlpevent_queue.lock);
160 mb(); 159}
161 clear_inUse( lpQueue ); 160
161static int set_spread_lpevents(char *str)
162{
163 unsigned long val = simple_strtoul(str, NULL, 0);
164
165 /*
166 * The parameter is the number of processors to share in processing
167 * lp events.
168 */
169 if (( val > 0) && (val <= NR_CPUS)) {
170 spread_lpevents = val;
171 printk("lpevent processing spread over %ld processors\n", val);
172 } else {
173 printk("invalid spread_lpevents %ld\n", val);
174 }
162 175
163 get_paca()->lpevent_count += numIntsProcessed; 176 return 1;
177}
178__setup("spread_lpevents=", set_spread_lpevents);
179
180void setup_hvlpevent_queue(void)
181{
182 void *eventStack;
183
184 /*
185 * Allocate a page for the Event Stack. The Hypervisor needs the
186 * absolute real address, so we subtract out the KERNELBASE and add
187 * in the absolute real address of the kernel load area.
188 */
189 eventStack = alloc_bootmem_pages(LpEventStackSize);
190 memset(eventStack, 0, LpEventStackSize);
191
192 /* Invoke the hypervisor to initialize the event stack */
193 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
194
195 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
196 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
197 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
198 (LpEventStackSize - LpEventMaxSize);
199 hvlpevent_queue.xIndex = 0;
200}
201
202static int proc_lpevents_show(struct seq_file *m, void *v)
203{
204 int cpu, i;
205 unsigned long sum;
206 static unsigned long cpu_totals[NR_CPUS];
207
208 /* FIXME: do we care that there's no locking here? */
209 sum = 0;
210 for_each_online_cpu(cpu) {
211 cpu_totals[cpu] = 0;
212 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
213 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
214 }
215 sum += cpu_totals[cpu];
216 }
217
218 seq_printf(m, "LpEventQueue 0\n");
219 seq_printf(m, " events processed:\t%lu\n", sum);
220
221 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
222 sum = 0;
223 for_each_online_cpu(cpu) {
224 sum += per_cpu(hvlpevent_counts, cpu)[i];
225 }
226
227 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
228 }
229
230 seq_printf(m, "\n events processed by processor:\n");
231
232 for_each_online_cpu(cpu) {
233 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
234 }
235
236 return 0;
237}
238
239static int proc_lpevents_open(struct inode *inode, struct file *file)
240{
241 return single_open(file, proc_lpevents_show, NULL);
242}
164 243
165 return numIntsProcessed; 244static struct file_operations proc_lpevents_operations = {
245 .open = proc_lpevents_open,
246 .read = seq_read,
247 .llseek = seq_lseek,
248 .release = single_release,
249};
250
251static int __init proc_lpevents_init(void)
252{
253 struct proc_dir_entry *e;
254
255 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
256 if (e)
257 e->proc_fops = &proc_lpevents_operations;
258
259 return 0;
166} 260}
261__initcall(proc_lpevents_init);
262