aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/char/ipmi
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/Kconfig67
-rw-r--r--drivers/char/ipmi/Makefile15
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c513
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c582
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c500
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3174
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c549
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2359
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h120
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c599
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c1068
11 files changed, 9546 insertions, 0 deletions
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
new file mode 100644
index 000000000000..a6dcb2918157
--- /dev/null
+++ b/drivers/char/ipmi/Kconfig
@@ -0,0 +1,67 @@
1#
2# IPMI device configuration
3#
4
5menu "IPMI"
6config IPMI_HANDLER
7 tristate 'IPMI top-level message handler'
8 help
9 This enables the central IPMI message handler, required for IPMI
10 to work.
11
12 IPMI is a standard for managing sensors (temperature,
13 voltage, etc.) in a system.
14
15 See <file:Documentation/IPMI.txt> for more details on the driver.
16
17 If unsure, say N.
18
19config IPMI_PANIC_EVENT
20 bool 'Generate a panic event to all BMCs on a panic'
21 depends on IPMI_HANDLER
22 help
23 When a panic occurs, this will cause the IPMI message handler to
24 generate an IPMI event describing the panic to each interface
25 registered with the message handler.
26
27config IPMI_PANIC_STRING
28 bool 'Generate OEM events containing the panic string'
29 depends on IPMI_PANIC_EVENT
30 help
31 When a panic occurs, this will cause the IPMI message handler to
32 generate IPMI OEM type f0 events holding the IPMB address of the
33 panic generator (byte 4 of the event), a sequence number for the
34 string (byte 5 of the event) and part of the string (the rest of the
35 event). Bytes 1, 2, and 3 are the normal usage for an OEM event.
36 You can fetch these events and use the sequence numbers to piece the
37 string together.
38
39config IPMI_DEVICE_INTERFACE
40 tristate 'Device interface for IPMI'
41 depends on IPMI_HANDLER
42 help
43 This provides an IOCTL interface to the IPMI message handler so
44 userland processes may use IPMI. It supports poll() and select().
45
46config IPMI_SI
47 tristate 'IPMI System Interface handler'
48 depends on IPMI_HANDLER
49 help
50 Provides a driver for System Interfaces (KCS, SMIC, BT).
51 Currently, only KCS and SMIC are supported. If
52 you are using IPMI, you should probably say "y" here.
53
54config IPMI_WATCHDOG
55 tristate 'IPMI Watchdog Timer'
56 depends on IPMI_HANDLER
57 help
58 This enables the IPMI watchdog timer.
59
60config IPMI_POWEROFF
61 tristate 'IPMI Poweroff'
62 depends on IPMI_HANDLER
63 help
64 This enables a function to power off the system with IPMI if
65 the IPMI management controller is capable of this.
66
67endmenu
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
new file mode 100644
index 000000000000..553f0a408eda
--- /dev/null
+++ b/drivers/char/ipmi/Makefile
@@ -0,0 +1,15 @@
1#
2# Makefile for the ipmi drivers.
3#
4
5ipmi_si-objs := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
6
7obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
8obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
9obj-$(CONFIG_IPMI_SI) += ipmi_si.o
10obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
11obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
12
13ipmi_si.o: $(ipmi_si-objs)
14 $(LD) -r -o $@ $(ipmi_si-objs)
15
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
new file mode 100644
index 000000000000..225b330115bb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -0,0 +1,513 @@
1/*
2 * ipmi_bt_sm.c
3 *
4 * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
5 * of the driver architecture at http://sourceforge.net/project/openipmi
6 *
7 * Author: Rocky Craig <first.last@hp.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
20 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
21 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
22 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
23 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA. */
28
29#include <linux/kernel.h> /* For printk. */
30#include <linux/string.h>
31#include <linux/ipmi_msgdefs.h> /* for completion codes */
32#include "ipmi_si_sm.h"
33
34#define IPMI_BT_VERSION "v33"
35
36static int bt_debug = 0x00; /* Production value 0, see following flags */
37
38#define BT_DEBUG_ENABLE 1
39#define BT_DEBUG_MSG 2
40#define BT_DEBUG_STATES 4
41
42/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
43 and 64 byte buffers. However, one HP implementation wants 255 bytes of
44 buffer (with a documented message of 160 bytes) so go for the max.
45 Since the Open IPMI architecture is single-message oriented at this
46 stage, the queue depth of BT is of no concern. */
47
48#define BT_NORMAL_TIMEOUT 2000000 /* seconds in microseconds */
49#define BT_RETRY_LIMIT 2
50#define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */
51
52enum bt_states {
53 BT_STATE_IDLE,
54 BT_STATE_XACTION_START,
55 BT_STATE_WRITE_BYTES,
56 BT_STATE_WRITE_END,
57 BT_STATE_WRITE_CONSUME,
58 BT_STATE_B2H_WAIT,
59 BT_STATE_READ_END,
60 BT_STATE_RESET1, /* These must come last */
61 BT_STATE_RESET2,
62 BT_STATE_RESET3,
63 BT_STATE_RESTART,
64 BT_STATE_HOSED
65};
66
67struct si_sm_data {
68 enum bt_states state;
69 enum bt_states last_state; /* assist printing and resets */
70 unsigned char seq; /* BT sequence number */
71 struct si_sm_io *io;
72 unsigned char write_data[IPMI_MAX_MSG_LENGTH];
73 int write_count;
74 unsigned char read_data[IPMI_MAX_MSG_LENGTH];
75 int read_count;
76 int truncated;
77 long timeout;
78 unsigned int error_retries; /* end of "common" fields */
79 int nonzero_status; /* hung BMCs stay all 0 */
80};
81
82#define BT_CLR_WR_PTR 0x01 /* See IPMI 1.5 table 11.6.4 */
83#define BT_CLR_RD_PTR 0x02
84#define BT_H2B_ATN 0x04
85#define BT_B2H_ATN 0x08
86#define BT_SMS_ATN 0x10
87#define BT_OEM0 0x20
88#define BT_H_BUSY 0x40
89#define BT_B_BUSY 0x80
90
91/* Some bits are toggled on each write: write once to set it, once
92 more to clear it; writing a zero does nothing. To absolutely
93 clear it, check its state and write if set. This avoids the "get
94 current then use as mask" scheme to modify one bit. Note that the
95 variable "bt" is hardcoded into these macros. */
96
97#define BT_STATUS bt->io->inputb(bt->io, 0)
98#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
99
100#define BMC2HOST bt->io->inputb(bt->io, 1)
101#define HOST2BMC(x) bt->io->outputb(bt->io, 1, x)
102
103#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
104#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
105
106/* Convenience routines for debugging. These are not multi-open safe!
107 Note the macros have hardcoded variables in them. */
108
109static char *state2txt(unsigned char state)
110{
111 switch (state) {
112 case BT_STATE_IDLE: return("IDLE");
113 case BT_STATE_XACTION_START: return("XACTION");
114 case BT_STATE_WRITE_BYTES: return("WR_BYTES");
115 case BT_STATE_WRITE_END: return("WR_END");
116 case BT_STATE_WRITE_CONSUME: return("WR_CONSUME");
117 case BT_STATE_B2H_WAIT: return("B2H_WAIT");
118 case BT_STATE_READ_END: return("RD_END");
119 case BT_STATE_RESET1: return("RESET1");
120 case BT_STATE_RESET2: return("RESET2");
121 case BT_STATE_RESET3: return("RESET3");
122 case BT_STATE_RESTART: return("RESTART");
123 case BT_STATE_HOSED: return("HOSED");
124 }
125 return("BAD STATE");
126}
127#define STATE2TXT state2txt(bt->state)
128
129static char *status2txt(unsigned char status, char *buf)
130{
131 strcpy(buf, "[ ");
132 if (status & BT_B_BUSY) strcat(buf, "B_BUSY ");
133 if (status & BT_H_BUSY) strcat(buf, "H_BUSY ");
134 if (status & BT_OEM0) strcat(buf, "OEM0 ");
135 if (status & BT_SMS_ATN) strcat(buf, "SMS ");
136 if (status & BT_B2H_ATN) strcat(buf, "B2H ");
137 if (status & BT_H2B_ATN) strcat(buf, "H2B ");
138 strcat(buf, "]");
139 return buf;
140}
141#define STATUS2TXT(buf) status2txt(status, buf)
142
143/* This will be called from within this module on a hosed condition */
144#define FIRST_SEQ 0
145static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
146{
147 bt->state = BT_STATE_IDLE;
148 bt->last_state = BT_STATE_IDLE;
149 bt->seq = FIRST_SEQ;
150 bt->io = io;
151 bt->write_count = 0;
152 bt->read_count = 0;
153 bt->error_retries = 0;
154 bt->nonzero_status = 0;
155 bt->truncated = 0;
156 bt->timeout = BT_NORMAL_TIMEOUT;
157 return 3; /* We claim 3 bytes of space; ought to check SPMI table */
158}
159
160static int bt_start_transaction(struct si_sm_data *bt,
161 unsigned char *data,
162 unsigned int size)
163{
164 unsigned int i;
165
166 if ((size < 2) || (size > IPMI_MAX_MSG_LENGTH)) return -1;
167
168 if ((bt->state != BT_STATE_IDLE) && (bt->state != BT_STATE_HOSED))
169 return -2;
170
171 if (bt_debug & BT_DEBUG_MSG) {
172 printk(KERN_WARNING "+++++++++++++++++++++++++++++++++++++\n");
173 printk(KERN_WARNING "BT: write seq=0x%02X:", bt->seq);
174 for (i = 0; i < size; i ++) printk (" %02x", data[i]);
175 printk("\n");
176 }
177 bt->write_data[0] = size + 1; /* all data plus seq byte */
178 bt->write_data[1] = *data; /* NetFn/LUN */
179 bt->write_data[2] = bt->seq;
180 memcpy(bt->write_data + 3, data + 1, size - 1);
181 bt->write_count = size + 2;
182
183 bt->error_retries = 0;
184 bt->nonzero_status = 0;
185 bt->read_count = 0;
186 bt->truncated = 0;
187 bt->state = BT_STATE_XACTION_START;
188 bt->last_state = BT_STATE_IDLE;
189 bt->timeout = BT_NORMAL_TIMEOUT;
190 return 0;
191}
192
193/* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
194 it calls this. Strip out the length and seq bytes. */
195
196static int bt_get_result(struct si_sm_data *bt,
197 unsigned char *data,
198 unsigned int length)
199{
200 int i, msg_len;
201
202 msg_len = bt->read_count - 2; /* account for length & seq */
203 /* Always NetFn, Cmd, cCode */
204 if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
205 printk(KERN_WARNING "BT results: bad msg_len = %d\n", msg_len);
206 data[0] = bt->write_data[1] | 0x4; /* Kludge a response */
207 data[1] = bt->write_data[3];
208 data[2] = IPMI_ERR_UNSPECIFIED;
209 msg_len = 3;
210 } else {
211 data[0] = bt->read_data[1];
212 data[1] = bt->read_data[3];
213 if (length < msg_len) bt->truncated = 1;
214 if (bt->truncated) { /* can be set in read_all_bytes() */
215 data[2] = IPMI_ERR_MSG_TRUNCATED;
216 msg_len = 3;
217 } else memcpy(data + 2, bt->read_data + 4, msg_len - 2);
218
219 if (bt_debug & BT_DEBUG_MSG) {
220 printk (KERN_WARNING "BT: res (raw)");
221 for (i = 0; i < msg_len; i++) printk(" %02x", data[i]);
222 printk ("\n");
223 }
224 }
225 bt->read_count = 0; /* paranoia */
226 return msg_len;
227}
228
229/* This bit's functionality is optional */
230#define BT_BMC_HWRST 0x80
231
232static void reset_flags(struct si_sm_data *bt)
233{
234 if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY);
235 if (BT_STATUS & BT_B_BUSY) BT_CONTROL(BT_B_BUSY);
236 BT_CONTROL(BT_CLR_WR_PTR);
237 BT_CONTROL(BT_SMS_ATN);
238 BT_INTMASK_W(BT_BMC_HWRST);
239#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION
240 if (BT_STATUS & BT_B2H_ATN) {
241 int i;
242 BT_CONTROL(BT_H_BUSY);
243 BT_CONTROL(BT_B2H_ATN);
244 BT_CONTROL(BT_CLR_RD_PTR);
245 for (i = 0; i < IPMI_MAX_MSG_LENGTH + 2; i++) BMC2HOST;
246 BT_CONTROL(BT_H_BUSY);
247 }
248#endif
249}
250
251static inline void write_all_bytes(struct si_sm_data *bt)
252{
253 int i;
254
255 if (bt_debug & BT_DEBUG_MSG) {
256 printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
257 bt->write_count, bt->seq);
258 for (i = 0; i < bt->write_count; i++)
259 printk (" %02x", bt->write_data[i]);
260 printk ("\n");
261 }
262 for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]);
263}
264
265static inline int read_all_bytes(struct si_sm_data *bt)
266{
267 unsigned char i;
268
269 bt->read_data[0] = BMC2HOST;
270 bt->read_count = bt->read_data[0];
271 if (bt_debug & BT_DEBUG_MSG)
272 printk(KERN_WARNING "BT: read %d bytes:", bt->read_count);
273
274 /* minimum: length, NetFn, Seq, Cmd, cCode == 5 total, or 4 more
275 following the length byte. */
276 if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) {
277 if (bt_debug & BT_DEBUG_MSG)
278 printk("bad length %d\n", bt->read_count);
279 bt->truncated = 1;
280 return 1; /* let next XACTION START clean it up */
281 }
282 for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST;
283 bt->read_count++; /* account for the length byte */
284
285 if (bt_debug & BT_DEBUG_MSG) {
286 for (i = 0; i < bt->read_count; i++)
287 printk (" %02x", bt->read_data[i]);
288 printk ("\n");
289 }
290 if (bt->seq != bt->write_data[2]) /* idiot check */
291 printk(KERN_WARNING "BT: internal error: sequence mismatch\n");
292
293 /* per the spec, the (NetFn, Seq, Cmd) tuples should match */
294 if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */
295 (bt->read_data[2] == bt->write_data[2]) && /* Sequence */
296 ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8)))
297 return 1;
298
299 if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "BT: bad packet: "
300 "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n",
301 bt->write_data[1], bt->write_data[2], bt->write_data[3],
302 bt->read_data[1], bt->read_data[2], bt->read_data[3]);
303 return 0;
304}
305
306/* Modifies bt->state appropriately, need to get into the bt_event() switch */
307
308static void error_recovery(struct si_sm_data *bt, char *reason)
309{
310 unsigned char status;
311 char buf[40]; /* For getting status */
312
313 bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */
314
315 status = BT_STATUS;
316 printk(KERN_WARNING "BT: %s in %s %s ", reason, STATE2TXT,
317 STATUS2TXT(buf));
318
319 (bt->error_retries)++;
320 if (bt->error_retries > BT_RETRY_LIMIT) {
321 printk("retry limit (%d) exceeded\n", BT_RETRY_LIMIT);
322 bt->state = BT_STATE_HOSED;
323 if (!bt->nonzero_status)
324 printk(KERN_ERR "IPMI: BT stuck, try power cycle\n");
325 else if (bt->seq == FIRST_SEQ + BT_RETRY_LIMIT) {
326 /* most likely during insmod */
327 printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
328 bt->state = BT_STATE_RESET1;
329 }
330 return;
331 }
332
333 /* Sometimes the BMC queues get in an "off-by-one" state...*/
334 if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) {
335 printk("retry B2H_WAIT\n");
336 return;
337 }
338
339 printk("restart command\n");
340 bt->state = BT_STATE_RESTART;
341}
342
343/* Check the status and (possibly) advance the BT state machine. The
344 default return is SI_SM_CALL_WITH_DELAY. */
345
346static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
347{
348 unsigned char status;
349 char buf[40]; /* For getting status */
350 int i;
351
352 status = BT_STATUS;
353 bt->nonzero_status |= status;
354
355 if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state))
356 printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
357 STATE2TXT,
358 STATUS2TXT(buf),
359 bt->timeout,
360 time);
361 bt->last_state = bt->state;
362
363 if (bt->state == BT_STATE_HOSED) return SI_SM_HOSED;
364
365 if (bt->state != BT_STATE_IDLE) { /* do timeout test */
366
367 /* Certain states, on error conditions, can lock up a CPU
368 because they are effectively in an infinite loop with
369 CALL_WITHOUT_DELAY (right back here with time == 0).
370 Prevent infinite lockup by ALWAYS decrementing timeout. */
371
372 /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT
373 (noticed in ipmi_smic_sm.c January 2004) */
374
375 if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT)) time = 100;
376 bt->timeout -= time;
377 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
378 error_recovery(bt, "timed out");
379 return SI_SM_CALL_WITHOUT_DELAY;
380 }
381 }
382
383 switch (bt->state) {
384
385 case BT_STATE_IDLE: /* check for asynchronous messages */
386 if (status & BT_SMS_ATN) {
387 BT_CONTROL(BT_SMS_ATN); /* clear it */
388 return SI_SM_ATTN;
389 }
390 return SI_SM_IDLE;
391
392 case BT_STATE_XACTION_START:
393 if (status & BT_H_BUSY) {
394 BT_CONTROL(BT_H_BUSY);
395 break;
396 }
397 if (status & BT_B2H_ATN) break;
398 bt->state = BT_STATE_WRITE_BYTES;
399 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
400
401 case BT_STATE_WRITE_BYTES:
402 if (status & (BT_B_BUSY | BT_H2B_ATN)) break;
403 BT_CONTROL(BT_CLR_WR_PTR);
404 write_all_bytes(bt);
405 BT_CONTROL(BT_H2B_ATN); /* clears too fast to catch? */
406 bt->state = BT_STATE_WRITE_CONSUME;
407 return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */
408
409 case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
410 if (status & (BT_H2B_ATN | BT_B_BUSY)) break;
411 bt->state = BT_STATE_B2H_WAIT;
412 /* fall through with status */
413
414 /* Stay in BT_STATE_B2H_WAIT until a packet matches. However, spinning
415 hard here, constantly reading status, seems to hold off the
416 generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */
417
418 case BT_STATE_B2H_WAIT:
419 if (!(status & BT_B2H_ATN)) break;
420
421 /* Assume ordered, uncached writes: no need to wait */
422 if (!(status & BT_H_BUSY)) BT_CONTROL(BT_H_BUSY); /* set */
423 BT_CONTROL(BT_B2H_ATN); /* clear it, ACK to the BMC */
424 BT_CONTROL(BT_CLR_RD_PTR); /* reset the queue */
425 i = read_all_bytes(bt);
426 BT_CONTROL(BT_H_BUSY); /* clear */
427 if (!i) break; /* Try this state again */
428 bt->state = BT_STATE_READ_END;
429 return SI_SM_CALL_WITHOUT_DELAY; /* for logging */
430
431 case BT_STATE_READ_END:
432
433 /* I could wait on BT_H_BUSY to go clear for a truly clean
434 exit. However, this is already done in XACTION_START
435 and the (possible) extra loop/status/possible wait affects
436 performance. So, as long as it works, just ignore H_BUSY */
437
438#ifdef MAKE_THIS_TRUE_IF_NECESSARY
439
440 if (status & BT_H_BUSY) break;
441#endif
442 bt->seq++;
443 bt->state = BT_STATE_IDLE;
444 return SI_SM_TRANSACTION_COMPLETE;
445
446 case BT_STATE_RESET1:
447 reset_flags(bt);
448 bt->timeout = BT_RESET_DELAY;
449 bt->state = BT_STATE_RESET2;
450 break;
451
452 case BT_STATE_RESET2: /* Send a soft reset */
453 BT_CONTROL(BT_CLR_WR_PTR);
454 HOST2BMC(3); /* number of bytes following */
455 HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */
456 HOST2BMC(42); /* Sequence number */
457 HOST2BMC(3); /* Cmd == Soft reset */
458 BT_CONTROL(BT_H2B_ATN);
459 bt->state = BT_STATE_RESET3;
460 break;
461
462 case BT_STATE_RESET3:
463 if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY;
464 bt->state = BT_STATE_RESTART; /* printk in debug modes */
465 break;
466
467 case BT_STATE_RESTART: /* don't reset retries! */
468 bt->write_data[2] = ++bt->seq;
469 bt->read_count = 0;
470 bt->nonzero_status = 0;
471 bt->timeout = BT_NORMAL_TIMEOUT;
472 bt->state = BT_STATE_XACTION_START;
473 break;
474
475 default: /* HOSED is supposed to be caught much earlier */
476 error_recovery(bt, "internal logic error");
477 break;
478 }
479 return SI_SM_CALL_WITH_DELAY;
480}
481
482static int bt_detect(struct si_sm_data *bt)
483{
484 /* It's impossible for the BT status and interrupt registers to be
485 all 1's, (assuming a properly functioning, self-initialized BMC)
486 but that's what you get from reading a bogus address, so we
487 test that first. The calling routine uses negative logic. */
488
489 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1;
490 reset_flags(bt);
491 return 0;
492}
493
494static void bt_cleanup(struct si_sm_data *bt)
495{
496}
497
498static int bt_size(void)
499{
500 return sizeof(struct si_sm_data);
501}
502
503struct si_sm_handlers bt_smi_handlers =
504{
505 .version = IPMI_BT_VERSION,
506 .init_data = bt_init_data,
507 .start_transaction = bt_start_transaction,
508 .get_result = bt_get_result,
509 .event = bt_event,
510 .detect = bt_detect,
511 .cleanup = bt_cleanup,
512 .size = bt_size,
513};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
new file mode 100644
index 000000000000..49d67f5384a2
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -0,0 +1,582 @@
1/*
2 * ipmi_devintf.c
3 *
4 * Linux device interface for the IPMI message handler.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/config.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/errno.h>
38#include <asm/system.h>
39#include <linux/sched.h>
40#include <linux/poll.h>
41#include <linux/spinlock.h>
42#include <linux/slab.h>
43#include <linux/devfs_fs_kernel.h>
44#include <linux/ipmi.h>
45#include <asm/semaphore.h>
46#include <linux/init.h>
47
48#define IPMI_DEVINTF_VERSION "v33"
49
50struct ipmi_file_private
51{
52 ipmi_user_t user;
53 spinlock_t recv_msg_lock;
54 struct list_head recv_msgs;
55 struct file *file;
56 struct fasync_struct *fasync_queue;
57 wait_queue_head_t wait;
58 struct semaphore recv_sem;
59 int default_retries;
60 unsigned int default_retry_time_ms;
61};
62
63static void file_receive_handler(struct ipmi_recv_msg *msg,
64 void *handler_data)
65{
66 struct ipmi_file_private *priv = handler_data;
67 int was_empty;
68 unsigned long flags;
69
70 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
71
72 was_empty = list_empty(&(priv->recv_msgs));
73 list_add_tail(&(msg->link), &(priv->recv_msgs));
74
75 if (was_empty) {
76 wake_up_interruptible(&priv->wait);
77 kill_fasync(&priv->fasync_queue, SIGIO, POLL_IN);
78 }
79
80 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
81}
82
83static unsigned int ipmi_poll(struct file *file, poll_table *wait)
84{
85 struct ipmi_file_private *priv = file->private_data;
86 unsigned int mask = 0;
87 unsigned long flags;
88
89 poll_wait(file, &priv->wait, wait);
90
91 spin_lock_irqsave(&priv->recv_msg_lock, flags);
92
93 if (! list_empty(&(priv->recv_msgs)))
94 mask |= (POLLIN | POLLRDNORM);
95
96 spin_unlock_irqrestore(&priv->recv_msg_lock, flags);
97
98 return mask;
99}
100
101static int ipmi_fasync(int fd, struct file *file, int on)
102{
103 struct ipmi_file_private *priv = file->private_data;
104 int result;
105
106 result = fasync_helper(fd, file, on, &priv->fasync_queue);
107
108 return (result);
109}
110
111static struct ipmi_user_hndl ipmi_hndlrs =
112{
113 .ipmi_recv_hndl = file_receive_handler,
114};
115
116static int ipmi_open(struct inode *inode, struct file *file)
117{
118 int if_num = iminor(inode);
119 int rv;
120 struct ipmi_file_private *priv;
121
122
123 priv = kmalloc(sizeof(*priv), GFP_KERNEL);
124 if (!priv)
125 return -ENOMEM;
126
127 priv->file = file;
128
129 rv = ipmi_create_user(if_num,
130 &ipmi_hndlrs,
131 priv,
132 &(priv->user));
133 if (rv) {
134 kfree(priv);
135 return rv;
136 }
137
138 file->private_data = priv;
139
140 spin_lock_init(&(priv->recv_msg_lock));
141 INIT_LIST_HEAD(&(priv->recv_msgs));
142 init_waitqueue_head(&priv->wait);
143 priv->fasync_queue = NULL;
144 sema_init(&(priv->recv_sem), 1);
145
146 /* Use the low-level defaults. */
147 priv->default_retries = -1;
148 priv->default_retry_time_ms = 0;
149
150 return 0;
151}
152
153static int ipmi_release(struct inode *inode, struct file *file)
154{
155 struct ipmi_file_private *priv = file->private_data;
156 int rv;
157
158 rv = ipmi_destroy_user(priv->user);
159 if (rv)
160 return rv;
161
162 ipmi_fasync (-1, file, 0);
163
164 /* FIXME - free the messages in the list. */
165 kfree(priv);
166
167 return 0;
168}
169
170static int handle_send_req(ipmi_user_t user,
171 struct ipmi_req *req,
172 int retries,
173 unsigned int retry_time_ms)
174{
175 int rv;
176 struct ipmi_addr addr;
177 struct kernel_ipmi_msg msg;
178
179 if (req->addr_len > sizeof(struct ipmi_addr))
180 return -EINVAL;
181
182 if (copy_from_user(&addr, req->addr, req->addr_len))
183 return -EFAULT;
184
185 msg.netfn = req->msg.netfn;
186 msg.cmd = req->msg.cmd;
187 msg.data_len = req->msg.data_len;
188 msg.data = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
189 if (!msg.data)
190 return -ENOMEM;
191
192 /* From here out we cannot return, we must jump to "out" for
193 error exits to free msgdata. */
194
195 rv = ipmi_validate_addr(&addr, req->addr_len);
196 if (rv)
197 goto out;
198
199 if (req->msg.data != NULL) {
200 if (req->msg.data_len > IPMI_MAX_MSG_LENGTH) {
201 rv = -EMSGSIZE;
202 goto out;
203 }
204
205 if (copy_from_user(msg.data,
206 req->msg.data,
207 req->msg.data_len))
208 {
209 rv = -EFAULT;
210 goto out;
211 }
212 } else {
213 msg.data_len = 0;
214 }
215
216 rv = ipmi_request_settime(user,
217 &addr,
218 req->msgid,
219 &msg,
220 NULL,
221 0,
222 retries,
223 retry_time_ms);
224 out:
225 kfree(msg.data);
226 return rv;
227}
228
229static int ipmi_ioctl(struct inode *inode,
230 struct file *file,
231 unsigned int cmd,
232 unsigned long data)
233{
234 int rv = -EINVAL;
235 struct ipmi_file_private *priv = file->private_data;
236 void __user *arg = (void __user *)data;
237
238 switch (cmd)
239 {
240 case IPMICTL_SEND_COMMAND:
241 {
242 struct ipmi_req req;
243
244 if (copy_from_user(&req, arg, sizeof(req))) {
245 rv = -EFAULT;
246 break;
247 }
248
249 rv = handle_send_req(priv->user,
250 &req,
251 priv->default_retries,
252 priv->default_retry_time_ms);
253 break;
254 }
255
256 case IPMICTL_SEND_COMMAND_SETTIME:
257 {
258 struct ipmi_req_settime req;
259
260 if (copy_from_user(&req, arg, sizeof(req))) {
261 rv = -EFAULT;
262 break;
263 }
264
265 rv = handle_send_req(priv->user,
266 &req.req,
267 req.retries,
268 req.retry_time_ms);
269 break;
270 }
271
272 case IPMICTL_RECEIVE_MSG:
273 case IPMICTL_RECEIVE_MSG_TRUNC:
274 {
275 struct ipmi_recv rsp;
276 int addr_len;
277 struct list_head *entry;
278 struct ipmi_recv_msg *msg;
279 unsigned long flags;
280
281
282 rv = 0;
283 if (copy_from_user(&rsp, arg, sizeof(rsp))) {
284 rv = -EFAULT;
285 break;
286 }
287
288 /* We claim a semaphore because we don't want two
289 users getting something from the queue at a time.
290 Since we have to release the spinlock before we can
291 copy the data to the user, it's possible another
292 user will grab something from the queue, too. Then
293 the messages might get out of order if something
294 fails and the message gets put back onto the
295 queue. This semaphore prevents that problem. */
296 down(&(priv->recv_sem));
297
298 /* Grab the message off the list. */
299 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
300 if (list_empty(&(priv->recv_msgs))) {
301 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
302 rv = -EAGAIN;
303 goto recv_err;
304 }
305 entry = priv->recv_msgs.next;
306 msg = list_entry(entry, struct ipmi_recv_msg, link);
307 list_del(entry);
308 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
309
310 addr_len = ipmi_addr_length(msg->addr.addr_type);
311 if (rsp.addr_len < addr_len)
312 {
313 rv = -EINVAL;
314 goto recv_putback_on_err;
315 }
316
317 if (copy_to_user(rsp.addr, &(msg->addr), addr_len)) {
318 rv = -EFAULT;
319 goto recv_putback_on_err;
320 }
321 rsp.addr_len = addr_len;
322
323 rsp.recv_type = msg->recv_type;
324 rsp.msgid = msg->msgid;
325 rsp.msg.netfn = msg->msg.netfn;
326 rsp.msg.cmd = msg->msg.cmd;
327
328 if (msg->msg.data_len > 0) {
329 if (rsp.msg.data_len < msg->msg.data_len) {
330 rv = -EMSGSIZE;
331 if (cmd == IPMICTL_RECEIVE_MSG_TRUNC) {
332 msg->msg.data_len = rsp.msg.data_len;
333 } else {
334 goto recv_putback_on_err;
335 }
336 }
337
338 if (copy_to_user(rsp.msg.data,
339 msg->msg.data,
340 msg->msg.data_len))
341 {
342 rv = -EFAULT;
343 goto recv_putback_on_err;
344 }
345 rsp.msg.data_len = msg->msg.data_len;
346 } else {
347 rsp.msg.data_len = 0;
348 }
349
350 if (copy_to_user(arg, &rsp, sizeof(rsp))) {
351 rv = -EFAULT;
352 goto recv_putback_on_err;
353 }
354
355 up(&(priv->recv_sem));
356 ipmi_free_recv_msg(msg);
357 break;
358
359 recv_putback_on_err:
360 /* If we got an error, put the message back onto
361 the head of the queue. */
362 spin_lock_irqsave(&(priv->recv_msg_lock), flags);
363 list_add(entry, &(priv->recv_msgs));
364 spin_unlock_irqrestore(&(priv->recv_msg_lock), flags);
365 up(&(priv->recv_sem));
366 break;
367
368 recv_err:
369 up(&(priv->recv_sem));
370 break;
371 }
372
373 case IPMICTL_REGISTER_FOR_CMD:
374 {
375 struct ipmi_cmdspec val;
376
377 if (copy_from_user(&val, arg, sizeof(val))) {
378 rv = -EFAULT;
379 break;
380 }
381
382 rv = ipmi_register_for_cmd(priv->user, val.netfn, val.cmd);
383 break;
384 }
385
386 case IPMICTL_UNREGISTER_FOR_CMD:
387 {
388 struct ipmi_cmdspec val;
389
390 if (copy_from_user(&val, arg, sizeof(val))) {
391 rv = -EFAULT;
392 break;
393 }
394
395 rv = ipmi_unregister_for_cmd(priv->user, val.netfn, val.cmd);
396 break;
397 }
398
399 case IPMICTL_SET_GETS_EVENTS_CMD:
400 {
401 int val;
402
403 if (copy_from_user(&val, arg, sizeof(val))) {
404 rv = -EFAULT;
405 break;
406 }
407
408 rv = ipmi_set_gets_events(priv->user, val);
409 break;
410 }
411
412 case IPMICTL_SET_MY_ADDRESS_CMD:
413 {
414 unsigned int val;
415
416 if (copy_from_user(&val, arg, sizeof(val))) {
417 rv = -EFAULT;
418 break;
419 }
420
421 ipmi_set_my_address(priv->user, val);
422 rv = 0;
423 break;
424 }
425
426 case IPMICTL_GET_MY_ADDRESS_CMD:
427 {
428 unsigned int val;
429
430 val = ipmi_get_my_address(priv->user);
431
432 if (copy_to_user(arg, &val, sizeof(val))) {
433 rv = -EFAULT;
434 break;
435 }
436 rv = 0;
437 break;
438 }
439
440 case IPMICTL_SET_MY_LUN_CMD:
441 {
442 unsigned int val;
443
444 if (copy_from_user(&val, arg, sizeof(val))) {
445 rv = -EFAULT;
446 break;
447 }
448
449 ipmi_set_my_LUN(priv->user, val);
450 rv = 0;
451 break;
452 }
453
454 case IPMICTL_GET_MY_LUN_CMD:
455 {
456 unsigned int val;
457
458 val = ipmi_get_my_LUN(priv->user);
459
460 if (copy_to_user(arg, &val, sizeof(val))) {
461 rv = -EFAULT;
462 break;
463 }
464 rv = 0;
465 break;
466 }
467 case IPMICTL_SET_TIMING_PARMS_CMD:
468 {
469 struct ipmi_timing_parms parms;
470
471 if (copy_from_user(&parms, arg, sizeof(parms))) {
472 rv = -EFAULT;
473 break;
474 }
475
476 priv->default_retries = parms.retries;
477 priv->default_retry_time_ms = parms.retry_time_ms;
478 rv = 0;
479 break;
480 }
481
482 case IPMICTL_GET_TIMING_PARMS_CMD:
483 {
484 struct ipmi_timing_parms parms;
485
486 parms.retries = priv->default_retries;
487 parms.retry_time_ms = priv->default_retry_time_ms;
488
489 if (copy_to_user(arg, &parms, sizeof(parms))) {
490 rv = -EFAULT;
491 break;
492 }
493
494 rv = 0;
495 break;
496 }
497 }
498
499 return rv;
500}
501
502
503static struct file_operations ipmi_fops = {
504 .owner = THIS_MODULE,
505 .ioctl = ipmi_ioctl,
506 .open = ipmi_open,
507 .release = ipmi_release,
508 .fasync = ipmi_fasync,
509 .poll = ipmi_poll,
510};
511
512#define DEVICE_NAME "ipmidev"
513
514static int ipmi_major = 0;
515module_param(ipmi_major, int, 0);
516MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By"
517 " default, or if you set it to zero, it will choose the next"
518 " available device. Setting it to -1 will disable the"
519 " interface. Other values will set the major device number"
520 " to that value.");
521
522static void ipmi_new_smi(int if_num)
523{
524 devfs_mk_cdev(MKDEV(ipmi_major, if_num),
525 S_IFCHR | S_IRUSR | S_IWUSR,
526 "ipmidev/%d", if_num);
527}
528
529static void ipmi_smi_gone(int if_num)
530{
531 devfs_remove("ipmidev/%d", if_num);
532}
533
534static struct ipmi_smi_watcher smi_watcher =
535{
536 .owner = THIS_MODULE,
537 .new_smi = ipmi_new_smi,
538 .smi_gone = ipmi_smi_gone,
539};
540
541static __init int init_ipmi_devintf(void)
542{
543 int rv;
544
545 if (ipmi_major < 0)
546 return -EINVAL;
547
548 printk(KERN_INFO "ipmi device interface version "
549 IPMI_DEVINTF_VERSION "\n");
550
551 rv = register_chrdev(ipmi_major, DEVICE_NAME, &ipmi_fops);
552 if (rv < 0) {
553 printk(KERN_ERR "ipmi: can't get major %d\n", ipmi_major);
554 return rv;
555 }
556
557 if (ipmi_major == 0) {
558 ipmi_major = rv;
559 }
560
561 devfs_mk_dir(DEVICE_NAME);
562
563 rv = ipmi_smi_watcher_register(&smi_watcher);
564 if (rv) {
565 unregister_chrdev(ipmi_major, DEVICE_NAME);
566 printk(KERN_WARNING "ipmi: can't register smi watcher\n");
567 return rv;
568 }
569
570 return 0;
571}
572module_init(init_ipmi_devintf);
573
574static __exit void cleanup_ipmi(void)
575{
576 ipmi_smi_watcher_unregister(&smi_watcher);
577 devfs_remove(DEVICE_NAME);
578 unregister_chrdev(ipmi_major, DEVICE_NAME);
579}
580module_exit(cleanup_ipmi);
581
582MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
new file mode 100644
index 000000000000..48cce24329be
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -0,0 +1,500 @@
1/*
2 * ipmi_kcs_sm.c
3 *
4 * State machine for handling IPMI KCS interfaces.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34/*
35 * This state machine is taken from the state machine in the IPMI spec,
36 * pretty much verbatim. If you have questions about the states, see
37 * that document.
38 */
39
40#include <linux/kernel.h> /* For printk. */
41#include <linux/string.h>
42#include <linux/ipmi_msgdefs.h> /* for completion codes */
43#include "ipmi_si_sm.h"
44
45#define IPMI_KCS_VERSION "v33"
46
47/* Set this if you want a printout of why the state machine was hosed
48 when it gets hosed. */
49#define DEBUG_HOSED_REASON
50
51/* Print the state machine state on entry every time. */
52#undef DEBUG_STATE
53
54/* The states the KCS driver may be in. */
55enum kcs_states {
56 KCS_IDLE, /* The KCS interface is currently
57 doing nothing. */
58 KCS_START_OP, /* We are starting an operation. The
59 data is in the output buffer, but
60 nothing has been done to the
61 interface yet. This was added to
62 the state machine in the spec to
63 wait for the initial IBF. */
64 KCS_WAIT_WRITE_START, /* We have written a write cmd to the
65 interface. */
66 KCS_WAIT_WRITE, /* We are writing bytes to the
67 interface. */
68 KCS_WAIT_WRITE_END, /* We have written the write end cmd
69 to the interface, and still need to
70 write the last byte. */
71 KCS_WAIT_READ, /* We are waiting to read data from
72 the interface. */
73 KCS_ERROR0, /* State to transition to the error
74 handler, this was added to the
75 state machine in the spec to be
76 sure IBF was there. */
77 KCS_ERROR1, /* First stage error handler, wait for
78 the interface to respond. */
79 KCS_ERROR2, /* The abort cmd has been written,
80 wait for the interface to
81 respond. */
82 KCS_ERROR3, /* We wrote some data to the
83 interface, wait for it to switch to
84 read mode. */
85 KCS_HOSED /* The hardware failed to follow the
86 state machine. */
87};
88
89#define MAX_KCS_READ_SIZE 80
90#define MAX_KCS_WRITE_SIZE 80
91
92/* Timeouts in microseconds. */
93#define IBF_RETRY_TIMEOUT 1000000
94#define OBF_RETRY_TIMEOUT 1000000
95#define MAX_ERROR_RETRIES 10
96
97struct si_sm_data
98{
99 enum kcs_states state;
100 struct si_sm_io *io;
101 unsigned char write_data[MAX_KCS_WRITE_SIZE];
102 int write_pos;
103 int write_count;
104 int orig_write_count;
105 unsigned char read_data[MAX_KCS_READ_SIZE];
106 int read_pos;
107 int truncated;
108
109 unsigned int error_retries;
110 long ibf_timeout;
111 long obf_timeout;
112};
113
114static unsigned int init_kcs_data(struct si_sm_data *kcs,
115 struct si_sm_io *io)
116{
117 kcs->state = KCS_IDLE;
118 kcs->io = io;
119 kcs->write_pos = 0;
120 kcs->write_count = 0;
121 kcs->orig_write_count = 0;
122 kcs->read_pos = 0;
123 kcs->error_retries = 0;
124 kcs->truncated = 0;
125 kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
126 kcs->obf_timeout = OBF_RETRY_TIMEOUT;
127
128 /* Reserve 2 I/O bytes. */
129 return 2;
130}
131
132static inline unsigned char read_status(struct si_sm_data *kcs)
133{
134 return kcs->io->inputb(kcs->io, 1);
135}
136
137static inline unsigned char read_data(struct si_sm_data *kcs)
138{
139 return kcs->io->inputb(kcs->io, 0);
140}
141
142static inline void write_cmd(struct si_sm_data *kcs, unsigned char data)
143{
144 kcs->io->outputb(kcs->io, 1, data);
145}
146
147static inline void write_data(struct si_sm_data *kcs, unsigned char data)
148{
149 kcs->io->outputb(kcs->io, 0, data);
150}
151
152/* Control codes. */
153#define KCS_GET_STATUS_ABORT 0x60
154#define KCS_WRITE_START 0x61
155#define KCS_WRITE_END 0x62
156#define KCS_READ_BYTE 0x68
157
158/* Status bits. */
159#define GET_STATUS_STATE(status) (((status) >> 6) & 0x03)
160#define KCS_IDLE_STATE 0
161#define KCS_READ_STATE 1
162#define KCS_WRITE_STATE 2
163#define KCS_ERROR_STATE 3
164#define GET_STATUS_ATN(status) ((status) & 0x04)
165#define GET_STATUS_IBF(status) ((status) & 0x02)
166#define GET_STATUS_OBF(status) ((status) & 0x01)
167
168
169static inline void write_next_byte(struct si_sm_data *kcs)
170{
171 write_data(kcs, kcs->write_data[kcs->write_pos]);
172 (kcs->write_pos)++;
173 (kcs->write_count)--;
174}
175
176static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
177{
178 (kcs->error_retries)++;
179 if (kcs->error_retries > MAX_ERROR_RETRIES) {
180#ifdef DEBUG_HOSED_REASON
181 printk("ipmi_kcs_sm: kcs hosed: %s\n", reason);
182#endif
183 kcs->state = KCS_HOSED;
184 } else {
185 kcs->state = KCS_ERROR0;
186 }
187}
188
189static inline void read_next_byte(struct si_sm_data *kcs)
190{
191 if (kcs->read_pos >= MAX_KCS_READ_SIZE) {
192 /* Throw the data away and mark it truncated. */
193 read_data(kcs);
194 kcs->truncated = 1;
195 } else {
196 kcs->read_data[kcs->read_pos] = read_data(kcs);
197 (kcs->read_pos)++;
198 }
199 write_data(kcs, KCS_READ_BYTE);
200}
201
202static inline int check_ibf(struct si_sm_data *kcs, unsigned char status,
203 long time)
204{
205 if (GET_STATUS_IBF(status)) {
206 kcs->ibf_timeout -= time;
207 if (kcs->ibf_timeout < 0) {
208 start_error_recovery(kcs, "IBF not ready in time");
209 kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
210 return 1;
211 }
212 return 0;
213 }
214 kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
215 return 1;
216}
217
218static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
219 long time)
220{
221 if (! GET_STATUS_OBF(status)) {
222 kcs->obf_timeout -= time;
223 if (kcs->obf_timeout < 0) {
224 start_error_recovery(kcs, "OBF not ready in time");
225 return 1;
226 }
227 return 0;
228 }
229 kcs->obf_timeout = OBF_RETRY_TIMEOUT;
230 return 1;
231}
232
233static void clear_obf(struct si_sm_data *kcs, unsigned char status)
234{
235 if (GET_STATUS_OBF(status))
236 read_data(kcs);
237}
238
239static void restart_kcs_transaction(struct si_sm_data *kcs)
240{
241 kcs->write_count = kcs->orig_write_count;
242 kcs->write_pos = 0;
243 kcs->read_pos = 0;
244 kcs->state = KCS_WAIT_WRITE_START;
245 kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
246 kcs->obf_timeout = OBF_RETRY_TIMEOUT;
247 write_cmd(kcs, KCS_WRITE_START);
248}
249
250static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
251 unsigned int size)
252{
253 if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
254 return -1;
255 }
256
257 if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
258 return -2;
259 }
260
261 kcs->error_retries = 0;
262 memcpy(kcs->write_data, data, size);
263 kcs->write_count = size;
264 kcs->orig_write_count = size;
265 kcs->write_pos = 0;
266 kcs->read_pos = 0;
267 kcs->state = KCS_START_OP;
268 kcs->ibf_timeout = IBF_RETRY_TIMEOUT;
269 kcs->obf_timeout = OBF_RETRY_TIMEOUT;
270 return 0;
271}
272
273static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
274 unsigned int length)
275{
276 if (length < kcs->read_pos) {
277 kcs->read_pos = length;
278 kcs->truncated = 1;
279 }
280
281 memcpy(data, kcs->read_data, kcs->read_pos);
282
283 if ((length >= 3) && (kcs->read_pos < 3)) {
284 /* Guarantee that we return at least 3 bytes, with an
285 error in the third byte if it is too short. */
286 data[2] = IPMI_ERR_UNSPECIFIED;
287 kcs->read_pos = 3;
288 }
289 if (kcs->truncated) {
290 /* Report a truncated error. We might overwrite
291 another error, but that's too bad, the user needs
292 to know it was truncated. */
293 data[2] = IPMI_ERR_MSG_TRUNCATED;
294 kcs->truncated = 0;
295 }
296
297 return kcs->read_pos;
298}
299
300/* This implements the state machine defined in the IPMI manual, see
301 that for details on how this works. Divide that flowchart into
302 sections delimited by "Wait for IBF" and this will become clear. */
303static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
304{
305 unsigned char status;
306 unsigned char state;
307
308 status = read_status(kcs);
309
310#ifdef DEBUG_STATE
311 printk(" State = %d, %x\n", kcs->state, status);
312#endif
313 /* All states wait for ibf, so just do it here. */
314 if (!check_ibf(kcs, status, time))
315 return SI_SM_CALL_WITH_DELAY;
316
317 /* Just about everything looks at the KCS state, so grab that, too. */
318 state = GET_STATUS_STATE(status);
319
320 switch (kcs->state) {
321 case KCS_IDLE:
322 /* If there's and interrupt source, turn it off. */
323 clear_obf(kcs, status);
324
325 if (GET_STATUS_ATN(status))
326 return SI_SM_ATTN;
327 else
328 return SI_SM_IDLE;
329
330 case KCS_START_OP:
331 if (state != KCS_IDLE) {
332 start_error_recovery(kcs,
333 "State machine not idle at start");
334 break;
335 }
336
337 clear_obf(kcs, status);
338 write_cmd(kcs, KCS_WRITE_START);
339 kcs->state = KCS_WAIT_WRITE_START;
340 break;
341
342 case KCS_WAIT_WRITE_START:
343 if (state != KCS_WRITE_STATE) {
344 start_error_recovery(
345 kcs,
346 "Not in write state at write start");
347 break;
348 }
349 read_data(kcs);
350 if (kcs->write_count == 1) {
351 write_cmd(kcs, KCS_WRITE_END);
352 kcs->state = KCS_WAIT_WRITE_END;
353 } else {
354 write_next_byte(kcs);
355 kcs->state = KCS_WAIT_WRITE;
356 }
357 break;
358
359 case KCS_WAIT_WRITE:
360 if (state != KCS_WRITE_STATE) {
361 start_error_recovery(kcs,
362 "Not in write state for write");
363 break;
364 }
365 clear_obf(kcs, status);
366 if (kcs->write_count == 1) {
367 write_cmd(kcs, KCS_WRITE_END);
368 kcs->state = KCS_WAIT_WRITE_END;
369 } else {
370 write_next_byte(kcs);
371 }
372 break;
373
374 case KCS_WAIT_WRITE_END:
375 if (state != KCS_WRITE_STATE) {
376 start_error_recovery(kcs,
377 "Not in write state for write end");
378 break;
379 }
380 clear_obf(kcs, status);
381 write_next_byte(kcs);
382 kcs->state = KCS_WAIT_READ;
383 break;
384
385 case KCS_WAIT_READ:
386 if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
387 start_error_recovery(
388 kcs,
389 "Not in read or idle in read state");
390 break;
391 }
392
393 if (state == KCS_READ_STATE) {
394 if (! check_obf(kcs, status, time))
395 return SI_SM_CALL_WITH_DELAY;
396 read_next_byte(kcs);
397 } else {
398 /* We don't implement this exactly like the state
399 machine in the spec. Some broken hardware
400 does not write the final dummy byte to the
401 read register. Thus obf will never go high
402 here. We just go straight to idle, and we
403 handle clearing out obf in idle state if it
404 happens to come in. */
405 clear_obf(kcs, status);
406 kcs->orig_write_count = 0;
407 kcs->state = KCS_IDLE;
408 return SI_SM_TRANSACTION_COMPLETE;
409 }
410 break;
411
412 case KCS_ERROR0:
413 clear_obf(kcs, status);
414 write_cmd(kcs, KCS_GET_STATUS_ABORT);
415 kcs->state = KCS_ERROR1;
416 break;
417
418 case KCS_ERROR1:
419 clear_obf(kcs, status);
420 write_data(kcs, 0);
421 kcs->state = KCS_ERROR2;
422 break;
423
424 case KCS_ERROR2:
425 if (state != KCS_READ_STATE) {
426 start_error_recovery(kcs,
427 "Not in read state for error2");
428 break;
429 }
430 if (! check_obf(kcs, status, time))
431 return SI_SM_CALL_WITH_DELAY;
432
433 clear_obf(kcs, status);
434 write_data(kcs, KCS_READ_BYTE);
435 kcs->state = KCS_ERROR3;
436 break;
437
438 case KCS_ERROR3:
439 if (state != KCS_IDLE_STATE) {
440 start_error_recovery(kcs,
441 "Not in idle state for error3");
442 break;
443 }
444
445 if (! check_obf(kcs, status, time))
446 return SI_SM_CALL_WITH_DELAY;
447
448 clear_obf(kcs, status);
449 if (kcs->orig_write_count) {
450 restart_kcs_transaction(kcs);
451 } else {
452 kcs->state = KCS_IDLE;
453 return SI_SM_TRANSACTION_COMPLETE;
454 }
455 break;
456
457 case KCS_HOSED:
458 break;
459 }
460
461 if (kcs->state == KCS_HOSED) {
462 init_kcs_data(kcs, kcs->io);
463 return SI_SM_HOSED;
464 }
465
466 return SI_SM_CALL_WITHOUT_DELAY;
467}
468
469static int kcs_size(void)
470{
471 return sizeof(struct si_sm_data);
472}
473
474static int kcs_detect(struct si_sm_data *kcs)
475{
476 /* It's impossible for the KCS status register to be all 1's,
477 (assuming a properly functioning, self-initialized BMC)
478 but that's what you get from reading a bogus address, so we
479 test that first. */
480 if (read_status(kcs) == 0xff)
481 return 1;
482
483 return 0;
484}
485
486static void kcs_cleanup(struct si_sm_data *kcs)
487{
488}
489
490struct si_sm_handlers kcs_smi_handlers =
491{
492 .version = IPMI_KCS_VERSION,
493 .init_data = init_kcs_data,
494 .start_transaction = start_kcs_transaction,
495 .get_result = get_kcs_result,
496 .event = kcs_event,
497 .detect = kcs_detect,
498 .cleanup = kcs_cleanup,
499 .size = kcs_size,
500};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
new file mode 100644
index 000000000000..a6606a1aced7
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -0,0 +1,3174 @@
1/*
2 * ipmi_msghandler.c
3 *
4 * Incoming and outgoing message routing for an IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/config.h>
35#include <linux/module.h>
36#include <linux/errno.h>
37#include <asm/system.h>
38#include <linux/sched.h>
39#include <linux/poll.h>
40#include <linux/spinlock.h>
41#include <linux/rwsem.h>
42#include <linux/slab.h>
43#include <linux/ipmi.h>
44#include <linux/ipmi_smi.h>
45#include <linux/notifier.h>
46#include <linux/init.h>
47#include <linux/proc_fs.h>
48
49#define PFX "IPMI message handler: "
50#define IPMI_MSGHANDLER_VERSION "v33"
51
52static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53static int ipmi_init_msghandler(void);
54
55static int initialized = 0;
56
57static struct proc_dir_entry *proc_ipmi_root = NULL;
58
59#define MAX_EVENTS_IN_QUEUE 25
60
61/* Don't let a message sit in a queue forever, always time it with at lest
62 the max message timer. This is in milliseconds. */
63#define MAX_MSG_TIMEOUT 60000
64
65struct ipmi_user
66{
67 struct list_head link;
68
69 /* The upper layer that handles receive messages. */
70 struct ipmi_user_hndl *handler;
71 void *handler_data;
72
73 /* The interface this user is bound to. */
74 ipmi_smi_t intf;
75
76 /* Does this interface receive IPMI events? */
77 int gets_events;
78};
79
80struct cmd_rcvr
81{
82 struct list_head link;
83
84 ipmi_user_t user;
85 unsigned char netfn;
86 unsigned char cmd;
87};
88
89struct seq_table
90{
91 unsigned int inuse : 1;
92 unsigned int broadcast : 1;
93
94 unsigned long timeout;
95 unsigned long orig_timeout;
96 unsigned int retries_left;
97
98 /* To verify on an incoming send message response that this is
99 the message that the response is for, we keep a sequence id
100 and increment it every time we send a message. */
101 long seqid;
102
103 /* This is held so we can properly respond to the message on a
104 timeout, and it is used to hold the temporary data for
105 retransmission, too. */
106 struct ipmi_recv_msg *recv_msg;
107};
108
109/* Store the information in a msgid (long) to allow us to find a
110 sequence table entry from the msgid. */
111#define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
112
113#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
114 do { \
115 seq = ((msgid >> 26) & 0x3f); \
116 seqid = (msgid & 0x3fffff); \
117 } while(0)
118
119#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
120
121struct ipmi_channel
122{
123 unsigned char medium;
124 unsigned char protocol;
125};
126
127struct ipmi_proc_entry
128{
129 char *name;
130 struct ipmi_proc_entry *next;
131};
132
133#define IPMI_IPMB_NUM_SEQ 64
134#define IPMI_MAX_CHANNELS 8
135struct ipmi_smi
136{
137 /* What interface number are we? */
138 int intf_num;
139
140 /* The list of upper layers that are using me. We read-lock
141 this when delivering messages to the upper layer to keep
142 the user from going away while we are processing the
143 message. This means that you cannot add or delete a user
144 from the receive callback. */
145 rwlock_t users_lock;
146 struct list_head users;
147
148 /* Used for wake ups at startup. */
149 wait_queue_head_t waitq;
150
151 /* The IPMI version of the BMC on the other end. */
152 unsigned char version_major;
153 unsigned char version_minor;
154
155 /* This is the lower-layer's sender routine. */
156 struct ipmi_smi_handlers *handlers;
157 void *send_info;
158
159 /* A list of proc entries for this interface. This does not
160 need a lock, only one thread creates it and only one thread
161 destroys it. */
162 struct ipmi_proc_entry *proc_entries;
163
164 /* A table of sequence numbers for this interface. We use the
165 sequence numbers for IPMB messages that go out of the
166 interface to match them up with their responses. A routine
167 is called periodically to time the items in this list. */
168 spinlock_t seq_lock;
169 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
170 int curr_seq;
171
172 /* Messages that were delayed for some reason (out of memory,
173 for instance), will go in here to be processed later in a
174 periodic timer interrupt. */
175 spinlock_t waiting_msgs_lock;
176 struct list_head waiting_msgs;
177
178 /* The list of command receivers that are registered for commands
179 on this interface. */
180 rwlock_t cmd_rcvr_lock;
181 struct list_head cmd_rcvrs;
182
183 /* Events that were queues because no one was there to receive
184 them. */
185 spinlock_t events_lock; /* For dealing with event stuff. */
186 struct list_head waiting_events;
187 unsigned int waiting_events_count; /* How many events in queue? */
188
189 /* This will be non-null if someone registers to receive all
190 IPMI commands (this is for interface emulation). There
191 may not be any things in the cmd_rcvrs list above when
192 this is registered. */
193 ipmi_user_t all_cmd_rcvr;
194
195 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
196 but may be changed by the user. */
197 unsigned char my_address;
198
199 /* My LUN. This should generally stay the SMS LUN, but just in
200 case... */
201 unsigned char my_lun;
202
203 /* The event receiver for my BMC, only really used at panic
204 shutdown as a place to store this. */
205 unsigned char event_receiver;
206 unsigned char event_receiver_lun;
207 unsigned char local_sel_device;
208 unsigned char local_event_generator;
209
210 /* A cheap hack, if this is non-null and a message to an
211 interface comes in with a NULL user, call this routine with
212 it. Note that the message will still be freed by the
213 caller. This only works on the system interface. */
214 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_smi_msg *msg);
215
216 /* When we are scanning the channels for an SMI, this will
217 tell which channel we are scanning. */
218 int curr_channel;
219
220 /* Channel information */
221 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
222
223 /* Proc FS stuff. */
224 struct proc_dir_entry *proc_dir;
225 char proc_dir_name[10];
226
227 spinlock_t counter_lock; /* For making counters atomic. */
228
229 /* Commands we got that were invalid. */
230 unsigned int sent_invalid_commands;
231
232 /* Commands we sent to the MC. */
233 unsigned int sent_local_commands;
234 /* Responses from the MC that were delivered to a user. */
235 unsigned int handled_local_responses;
236 /* Responses from the MC that were not delivered to a user. */
237 unsigned int unhandled_local_responses;
238
239 /* Commands we sent out to the IPMB bus. */
240 unsigned int sent_ipmb_commands;
241 /* Commands sent on the IPMB that had errors on the SEND CMD */
242 unsigned int sent_ipmb_command_errs;
243 /* Each retransmit increments this count. */
244 unsigned int retransmitted_ipmb_commands;
245 /* When a message times out (runs out of retransmits) this is
246 incremented. */
247 unsigned int timed_out_ipmb_commands;
248
249 /* This is like above, but for broadcasts. Broadcasts are
250 *not* included in the above count (they are expected to
251 time out). */
252 unsigned int timed_out_ipmb_broadcasts;
253
254 /* Responses I have sent to the IPMB bus. */
255 unsigned int sent_ipmb_responses;
256
257 /* The response was delivered to the user. */
258 unsigned int handled_ipmb_responses;
259 /* The response had invalid data in it. */
260 unsigned int invalid_ipmb_responses;
261 /* The response didn't have anyone waiting for it. */
262 unsigned int unhandled_ipmb_responses;
263
264 /* Commands we sent out to the IPMB bus. */
265 unsigned int sent_lan_commands;
266 /* Commands sent on the IPMB that had errors on the SEND CMD */
267 unsigned int sent_lan_command_errs;
268 /* Each retransmit increments this count. */
269 unsigned int retransmitted_lan_commands;
270 /* When a message times out (runs out of retransmits) this is
271 incremented. */
272 unsigned int timed_out_lan_commands;
273
274 /* Responses I have sent to the IPMB bus. */
275 unsigned int sent_lan_responses;
276
277 /* The response was delivered to the user. */
278 unsigned int handled_lan_responses;
279 /* The response had invalid data in it. */
280 unsigned int invalid_lan_responses;
281 /* The response didn't have anyone waiting for it. */
282 unsigned int unhandled_lan_responses;
283
284 /* The command was delivered to the user. */
285 unsigned int handled_commands;
286 /* The command had invalid data in it. */
287 unsigned int invalid_commands;
288 /* The command didn't have anyone waiting for it. */
289 unsigned int unhandled_commands;
290
291 /* Invalid data in an event. */
292 unsigned int invalid_events;
293 /* Events that were received with the proper format. */
294 unsigned int events;
295};
296
297#define MAX_IPMI_INTERFACES 4
298static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
299
300/* Used to keep interfaces from going away while operations are
301 operating on interfaces. Grab read if you are not modifying the
302 interfaces, write if you are. */
303static DECLARE_RWSEM(interfaces_sem);
304
305/* Directly protects the ipmi_interfaces data structure. This is
306 claimed in the timer interrupt. */
307static DEFINE_SPINLOCK(interfaces_lock);
308
309/* List of watchers that want to know when smi's are added and
310 deleted. */
311static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
312static DECLARE_RWSEM(smi_watchers_sem);
313
314int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
315{
316 int i;
317
318 down_read(&interfaces_sem);
319 down_write(&smi_watchers_sem);
320 list_add(&(watcher->link), &smi_watchers);
321 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
322 if (ipmi_interfaces[i] != NULL) {
323 watcher->new_smi(i);
324 }
325 }
326 up_write(&smi_watchers_sem);
327 up_read(&interfaces_sem);
328 return 0;
329}
330
331int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
332{
333 down_write(&smi_watchers_sem);
334 list_del(&(watcher->link));
335 up_write(&smi_watchers_sem);
336 return 0;
337}
338
339static void
340call_smi_watchers(int i)
341{
342 struct ipmi_smi_watcher *w;
343
344 down_read(&smi_watchers_sem);
345 list_for_each_entry(w, &smi_watchers, link) {
346 if (try_module_get(w->owner)) {
347 w->new_smi(i);
348 module_put(w->owner);
349 }
350 }
351 up_read(&smi_watchers_sem);
352}
353
354static int
355ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
356{
357 if (addr1->addr_type != addr2->addr_type)
358 return 0;
359
360 if (addr1->channel != addr2->channel)
361 return 0;
362
363 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
364 struct ipmi_system_interface_addr *smi_addr1
365 = (struct ipmi_system_interface_addr *) addr1;
366 struct ipmi_system_interface_addr *smi_addr2
367 = (struct ipmi_system_interface_addr *) addr2;
368 return (smi_addr1->lun == smi_addr2->lun);
369 }
370
371 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
372 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
373 {
374 struct ipmi_ipmb_addr *ipmb_addr1
375 = (struct ipmi_ipmb_addr *) addr1;
376 struct ipmi_ipmb_addr *ipmb_addr2
377 = (struct ipmi_ipmb_addr *) addr2;
378
379 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
380 && (ipmb_addr1->lun == ipmb_addr2->lun));
381 }
382
383 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
384 struct ipmi_lan_addr *lan_addr1
385 = (struct ipmi_lan_addr *) addr1;
386 struct ipmi_lan_addr *lan_addr2
387 = (struct ipmi_lan_addr *) addr2;
388
389 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
390 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
391 && (lan_addr1->session_handle
392 == lan_addr2->session_handle)
393 && (lan_addr1->lun == lan_addr2->lun));
394 }
395
396 return 1;
397}
398
399int ipmi_validate_addr(struct ipmi_addr *addr, int len)
400{
401 if (len < sizeof(struct ipmi_system_interface_addr)) {
402 return -EINVAL;
403 }
404
405 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
406 if (addr->channel != IPMI_BMC_CHANNEL)
407 return -EINVAL;
408 return 0;
409 }
410
411 if ((addr->channel == IPMI_BMC_CHANNEL)
412 || (addr->channel >= IPMI_NUM_CHANNELS)
413 || (addr->channel < 0))
414 return -EINVAL;
415
416 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
417 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
418 {
419 if (len < sizeof(struct ipmi_ipmb_addr)) {
420 return -EINVAL;
421 }
422 return 0;
423 }
424
425 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
426 if (len < sizeof(struct ipmi_lan_addr)) {
427 return -EINVAL;
428 }
429 return 0;
430 }
431
432 return -EINVAL;
433}
434
435unsigned int ipmi_addr_length(int addr_type)
436{
437 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
438 return sizeof(struct ipmi_system_interface_addr);
439
440 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
441 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
442 {
443 return sizeof(struct ipmi_ipmb_addr);
444 }
445
446 if (addr_type == IPMI_LAN_ADDR_TYPE)
447 return sizeof(struct ipmi_lan_addr);
448
449 return 0;
450}
451
452static void deliver_response(struct ipmi_recv_msg *msg)
453{
454 msg->user->handler->ipmi_recv_hndl(msg, msg->user->handler_data);
455}
456
457/* Find the next sequence number not being used and add the given
458 message with the given timeout to the sequence table. This must be
459 called with the interface's seq_lock held. */
460static int intf_next_seq(ipmi_smi_t intf,
461 struct ipmi_recv_msg *recv_msg,
462 unsigned long timeout,
463 int retries,
464 int broadcast,
465 unsigned char *seq,
466 long *seqid)
467{
468 int rv = 0;
469 unsigned int i;
470
471 for (i=intf->curr_seq;
472 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
473 i=(i+1)%IPMI_IPMB_NUM_SEQ)
474 {
475 if (! intf->seq_table[i].inuse)
476 break;
477 }
478
479 if (! intf->seq_table[i].inuse) {
480 intf->seq_table[i].recv_msg = recv_msg;
481
482 /* Start with the maximum timeout, when the send response
483 comes in we will start the real timer. */
484 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
485 intf->seq_table[i].orig_timeout = timeout;
486 intf->seq_table[i].retries_left = retries;
487 intf->seq_table[i].broadcast = broadcast;
488 intf->seq_table[i].inuse = 1;
489 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
490 *seq = i;
491 *seqid = intf->seq_table[i].seqid;
492 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
493 } else {
494 rv = -EAGAIN;
495 }
496
497 return rv;
498}
499
500/* Return the receive message for the given sequence number and
501 release the sequence number so it can be reused. Some other data
502 is passed in to be sure the message matches up correctly (to help
503 guard against message coming in after their timeout and the
504 sequence number being reused). */
505static int intf_find_seq(ipmi_smi_t intf,
506 unsigned char seq,
507 short channel,
508 unsigned char cmd,
509 unsigned char netfn,
510 struct ipmi_addr *addr,
511 struct ipmi_recv_msg **recv_msg)
512{
513 int rv = -ENODEV;
514 unsigned long flags;
515
516 if (seq >= IPMI_IPMB_NUM_SEQ)
517 return -EINVAL;
518
519 spin_lock_irqsave(&(intf->seq_lock), flags);
520 if (intf->seq_table[seq].inuse) {
521 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
522
523 if ((msg->addr.channel == channel)
524 && (msg->msg.cmd == cmd)
525 && (msg->msg.netfn == netfn)
526 && (ipmi_addr_equal(addr, &(msg->addr))))
527 {
528 *recv_msg = msg;
529 intf->seq_table[seq].inuse = 0;
530 rv = 0;
531 }
532 }
533 spin_unlock_irqrestore(&(intf->seq_lock), flags);
534
535 return rv;
536}
537
538
539/* Start the timer for a specific sequence table entry. */
540static int intf_start_seq_timer(ipmi_smi_t intf,
541 long msgid)
542{
543 int rv = -ENODEV;
544 unsigned long flags;
545 unsigned char seq;
546 unsigned long seqid;
547
548
549 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
550
551 spin_lock_irqsave(&(intf->seq_lock), flags);
552 /* We do this verification because the user can be deleted
553 while a message is outstanding. */
554 if ((intf->seq_table[seq].inuse)
555 && (intf->seq_table[seq].seqid == seqid))
556 {
557 struct seq_table *ent = &(intf->seq_table[seq]);
558 ent->timeout = ent->orig_timeout;
559 rv = 0;
560 }
561 spin_unlock_irqrestore(&(intf->seq_lock), flags);
562
563 return rv;
564}
565
566/* Got an error for the send message for a specific sequence number. */
567static int intf_err_seq(ipmi_smi_t intf,
568 long msgid,
569 unsigned int err)
570{
571 int rv = -ENODEV;
572 unsigned long flags;
573 unsigned char seq;
574 unsigned long seqid;
575 struct ipmi_recv_msg *msg = NULL;
576
577
578 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
579
580 spin_lock_irqsave(&(intf->seq_lock), flags);
581 /* We do this verification because the user can be deleted
582 while a message is outstanding. */
583 if ((intf->seq_table[seq].inuse)
584 && (intf->seq_table[seq].seqid == seqid))
585 {
586 struct seq_table *ent = &(intf->seq_table[seq]);
587
588 ent->inuse = 0;
589 msg = ent->recv_msg;
590 rv = 0;
591 }
592 spin_unlock_irqrestore(&(intf->seq_lock), flags);
593
594 if (msg) {
595 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
596 msg->msg_data[0] = err;
597 msg->msg.netfn |= 1; /* Convert to a response. */
598 msg->msg.data_len = 1;
599 msg->msg.data = msg->msg_data;
600 deliver_response(msg);
601 }
602
603 return rv;
604}
605
606
607int ipmi_create_user(unsigned int if_num,
608 struct ipmi_user_hndl *handler,
609 void *handler_data,
610 ipmi_user_t *user)
611{
612 unsigned long flags;
613 ipmi_user_t new_user;
614 int rv = 0;
615 ipmi_smi_t intf;
616
617 /* There is no module usecount here, because it's not
618 required. Since this can only be used by and called from
619 other modules, they will implicitly use this module, and
620 thus this can't be removed unless the other modules are
621 removed. */
622
623 if (handler == NULL)
624 return -EINVAL;
625
626 /* Make sure the driver is actually initialized, this handles
627 problems with initialization order. */
628 if (!initialized) {
629 rv = ipmi_init_msghandler();
630 if (rv)
631 return rv;
632
633 /* The init code doesn't return an error if it was turned
634 off, but it won't initialize. Check that. */
635 if (!initialized)
636 return -ENODEV;
637 }
638
639 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
640 if (! new_user)
641 return -ENOMEM;
642
643 down_read(&interfaces_sem);
644 if ((if_num > MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL)
645 {
646 rv = -EINVAL;
647 goto out_unlock;
648 }
649
650 intf = ipmi_interfaces[if_num];
651
652 new_user->handler = handler;
653 new_user->handler_data = handler_data;
654 new_user->intf = intf;
655 new_user->gets_events = 0;
656
657 if (!try_module_get(intf->handlers->owner)) {
658 rv = -ENODEV;
659 goto out_unlock;
660 }
661
662 if (intf->handlers->inc_usecount) {
663 rv = intf->handlers->inc_usecount(intf->send_info);
664 if (rv) {
665 module_put(intf->handlers->owner);
666 goto out_unlock;
667 }
668 }
669
670 write_lock_irqsave(&intf->users_lock, flags);
671 list_add_tail(&new_user->link, &intf->users);
672 write_unlock_irqrestore(&intf->users_lock, flags);
673
674 out_unlock:
675 if (rv) {
676 kfree(new_user);
677 } else {
678 *user = new_user;
679 }
680
681 up_read(&interfaces_sem);
682 return rv;
683}
684
685static int ipmi_destroy_user_nolock(ipmi_user_t user)
686{
687 int rv = -ENODEV;
688 ipmi_user_t t_user;
689 struct cmd_rcvr *rcvr, *rcvr2;
690 int i;
691 unsigned long flags;
692
693 /* Find the user and delete them from the list. */
694 list_for_each_entry(t_user, &(user->intf->users), link) {
695 if (t_user == user) {
696 list_del(&t_user->link);
697 rv = 0;
698 break;
699 }
700 }
701
702 if (rv) {
703 goto out_unlock;
704 }
705
706 /* Remove the user from the interfaces sequence table. */
707 spin_lock_irqsave(&(user->intf->seq_lock), flags);
708 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
709 if (user->intf->seq_table[i].inuse
710 && (user->intf->seq_table[i].recv_msg->user == user))
711 {
712 user->intf->seq_table[i].inuse = 0;
713 }
714 }
715 spin_unlock_irqrestore(&(user->intf->seq_lock), flags);
716
717 /* Remove the user from the command receiver's table. */
718 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
719 list_for_each_entry_safe(rcvr, rcvr2, &(user->intf->cmd_rcvrs), link) {
720 if (rcvr->user == user) {
721 list_del(&rcvr->link);
722 kfree(rcvr);
723 }
724 }
725 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
726
727 kfree(user);
728
729 out_unlock:
730
731 return rv;
732}
733
734int ipmi_destroy_user(ipmi_user_t user)
735{
736 int rv;
737 ipmi_smi_t intf = user->intf;
738 unsigned long flags;
739
740 down_read(&interfaces_sem);
741 write_lock_irqsave(&intf->users_lock, flags);
742 rv = ipmi_destroy_user_nolock(user);
743 if (!rv) {
744 module_put(intf->handlers->owner);
745 if (intf->handlers->dec_usecount)
746 intf->handlers->dec_usecount(intf->send_info);
747 }
748
749 write_unlock_irqrestore(&intf->users_lock, flags);
750 up_read(&interfaces_sem);
751 return rv;
752}
753
754void ipmi_get_version(ipmi_user_t user,
755 unsigned char *major,
756 unsigned char *minor)
757{
758 *major = user->intf->version_major;
759 *minor = user->intf->version_minor;
760}
761
762void ipmi_set_my_address(ipmi_user_t user,
763 unsigned char address)
764{
765 user->intf->my_address = address;
766}
767
768unsigned char ipmi_get_my_address(ipmi_user_t user)
769{
770 return user->intf->my_address;
771}
772
773void ipmi_set_my_LUN(ipmi_user_t user,
774 unsigned char LUN)
775{
776 user->intf->my_lun = LUN & 0x3;
777}
778
779unsigned char ipmi_get_my_LUN(ipmi_user_t user)
780{
781 return user->intf->my_lun;
782}
783
784int ipmi_set_gets_events(ipmi_user_t user, int val)
785{
786 unsigned long flags;
787 struct ipmi_recv_msg *msg, *msg2;
788
789 read_lock(&(user->intf->users_lock));
790 spin_lock_irqsave(&(user->intf->events_lock), flags);
791 user->gets_events = val;
792
793 if (val) {
794 /* Deliver any queued events. */
795 list_for_each_entry_safe(msg, msg2, &(user->intf->waiting_events), link) {
796 list_del(&msg->link);
797 msg->user = user;
798 deliver_response(msg);
799 }
800 }
801
802 spin_unlock_irqrestore(&(user->intf->events_lock), flags);
803 read_unlock(&(user->intf->users_lock));
804
805 return 0;
806}
807
808int ipmi_register_for_cmd(ipmi_user_t user,
809 unsigned char netfn,
810 unsigned char cmd)
811{
812 struct cmd_rcvr *cmp;
813 unsigned long flags;
814 struct cmd_rcvr *rcvr;
815 int rv = 0;
816
817
818 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
819 if (! rcvr)
820 return -ENOMEM;
821
822 read_lock(&(user->intf->users_lock));
823 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
824 if (user->intf->all_cmd_rcvr != NULL) {
825 rv = -EBUSY;
826 goto out_unlock;
827 }
828
829 /* Make sure the command/netfn is not already registered. */
830 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) {
831 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) {
832 rv = -EBUSY;
833 break;
834 }
835 }
836
837 if (! rv) {
838 rcvr->cmd = cmd;
839 rcvr->netfn = netfn;
840 rcvr->user = user;
841 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
842 }
843 out_unlock:
844 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
845 read_unlock(&(user->intf->users_lock));
846
847 if (rv)
848 kfree(rcvr);
849
850 return rv;
851}
852
853int ipmi_unregister_for_cmd(ipmi_user_t user,
854 unsigned char netfn,
855 unsigned char cmd)
856{
857 unsigned long flags;
858 struct cmd_rcvr *rcvr;
859 int rv = -ENOENT;
860
861 read_lock(&(user->intf->users_lock));
862 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
863 /* Make sure the command/netfn is not already registered. */
864 list_for_each_entry(rcvr, &(user->intf->cmd_rcvrs), link) {
865 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
866 rv = 0;
867 list_del(&rcvr->link);
868 kfree(rcvr);
869 break;
870 }
871 }
872 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
873 read_unlock(&(user->intf->users_lock));
874
875 return rv;
876}
877
878void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
879{
880 user->intf->handlers->set_run_to_completion(user->intf->send_info,
881 val);
882}
883
884static unsigned char
885ipmb_checksum(unsigned char *data, int size)
886{
887 unsigned char csum = 0;
888
889 for (; size > 0; size--, data++)
890 csum += *data;
891
892 return -csum;
893}
894
895static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
896 struct kernel_ipmi_msg *msg,
897 struct ipmi_ipmb_addr *ipmb_addr,
898 long msgid,
899 unsigned char ipmb_seq,
900 int broadcast,
901 unsigned char source_address,
902 unsigned char source_lun)
903{
904 int i = broadcast;
905
906 /* Format the IPMB header data. */
907 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
908 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
909 smi_msg->data[2] = ipmb_addr->channel;
910 if (broadcast)
911 smi_msg->data[3] = 0;
912 smi_msg->data[i+3] = ipmb_addr->slave_addr;
913 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
914 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
915 smi_msg->data[i+6] = source_address;
916 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
917 smi_msg->data[i+8] = msg->cmd;
918
919 /* Now tack on the data to the message. */
920 if (msg->data_len > 0)
921 memcpy(&(smi_msg->data[i+9]), msg->data,
922 msg->data_len);
923 smi_msg->data_size = msg->data_len + 9;
924
925 /* Now calculate the checksum and tack it on. */
926 smi_msg->data[i+smi_msg->data_size]
927 = ipmb_checksum(&(smi_msg->data[i+6]),
928 smi_msg->data_size-6);
929
930 /* Add on the checksum size and the offset from the
931 broadcast. */
932 smi_msg->data_size += 1 + i;
933
934 smi_msg->msgid = msgid;
935}
936
937static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
938 struct kernel_ipmi_msg *msg,
939 struct ipmi_lan_addr *lan_addr,
940 long msgid,
941 unsigned char ipmb_seq,
942 unsigned char source_lun)
943{
944 /* Format the IPMB header data. */
945 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
946 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
947 smi_msg->data[2] = lan_addr->channel;
948 smi_msg->data[3] = lan_addr->session_handle;
949 smi_msg->data[4] = lan_addr->remote_SWID;
950 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
951 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
952 smi_msg->data[7] = lan_addr->local_SWID;
953 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
954 smi_msg->data[9] = msg->cmd;
955
956 /* Now tack on the data to the message. */
957 if (msg->data_len > 0)
958 memcpy(&(smi_msg->data[10]), msg->data,
959 msg->data_len);
960 smi_msg->data_size = msg->data_len + 10;
961
962 /* Now calculate the checksum and tack it on. */
963 smi_msg->data[smi_msg->data_size]
964 = ipmb_checksum(&(smi_msg->data[7]),
965 smi_msg->data_size-7);
966
967 /* Add on the checksum size and the offset from the
968 broadcast. */
969 smi_msg->data_size += 1;
970
971 smi_msg->msgid = msgid;
972}
973
974/* Separate from ipmi_request so that the user does not have to be
975 supplied in certain circumstances (mainly at panic time). If
976 messages are supplied, they will be freed, even if an error
977 occurs. */
978static inline int i_ipmi_request(ipmi_user_t user,
979 ipmi_smi_t intf,
980 struct ipmi_addr *addr,
981 long msgid,
982 struct kernel_ipmi_msg *msg,
983 void *user_msg_data,
984 void *supplied_smi,
985 struct ipmi_recv_msg *supplied_recv,
986 int priority,
987 unsigned char source_address,
988 unsigned char source_lun,
989 int retries,
990 unsigned int retry_time_ms)
991{
992 int rv = 0;
993 struct ipmi_smi_msg *smi_msg;
994 struct ipmi_recv_msg *recv_msg;
995 unsigned long flags;
996
997
998 if (supplied_recv) {
999 recv_msg = supplied_recv;
1000 } else {
1001 recv_msg = ipmi_alloc_recv_msg();
1002 if (recv_msg == NULL) {
1003 return -ENOMEM;
1004 }
1005 }
1006 recv_msg->user_msg_data = user_msg_data;
1007
1008 if (supplied_smi) {
1009 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1010 } else {
1011 smi_msg = ipmi_alloc_smi_msg();
1012 if (smi_msg == NULL) {
1013 ipmi_free_recv_msg(recv_msg);
1014 return -ENOMEM;
1015 }
1016 }
1017
1018 recv_msg->user = user;
1019 recv_msg->msgid = msgid;
1020 /* Store the message to send in the receive message so timeout
1021 responses can get the proper response data. */
1022 recv_msg->msg = *msg;
1023
1024 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1025 struct ipmi_system_interface_addr *smi_addr;
1026
1027 if (msg->netfn & 1) {
1028 /* Responses are not allowed to the SMI. */
1029 rv = -EINVAL;
1030 goto out_err;
1031 }
1032
1033 smi_addr = (struct ipmi_system_interface_addr *) addr;
1034 if (smi_addr->lun > 3) {
1035 spin_lock_irqsave(&intf->counter_lock, flags);
1036 intf->sent_invalid_commands++;
1037 spin_unlock_irqrestore(&intf->counter_lock, flags);
1038 rv = -EINVAL;
1039 goto out_err;
1040 }
1041
1042 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1043
1044 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1045 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1046 || (msg->cmd == IPMI_GET_MSG_CMD)
1047 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1048 {
1049 /* We don't let the user do these, since we manage
1050 the sequence numbers. */
1051 spin_lock_irqsave(&intf->counter_lock, flags);
1052 intf->sent_invalid_commands++;
1053 spin_unlock_irqrestore(&intf->counter_lock, flags);
1054 rv = -EINVAL;
1055 goto out_err;
1056 }
1057
1058 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1059 spin_lock_irqsave(&intf->counter_lock, flags);
1060 intf->sent_invalid_commands++;
1061 spin_unlock_irqrestore(&intf->counter_lock, flags);
1062 rv = -EMSGSIZE;
1063 goto out_err;
1064 }
1065
1066 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1067 smi_msg->data[1] = msg->cmd;
1068 smi_msg->msgid = msgid;
1069 smi_msg->user_data = recv_msg;
1070 if (msg->data_len > 0)
1071 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1072 smi_msg->data_size = msg->data_len + 2;
1073 spin_lock_irqsave(&intf->counter_lock, flags);
1074 intf->sent_local_commands++;
1075 spin_unlock_irqrestore(&intf->counter_lock, flags);
1076 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1077 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1078 {
1079 struct ipmi_ipmb_addr *ipmb_addr;
1080 unsigned char ipmb_seq;
1081 long seqid;
1082 int broadcast = 0;
1083
1084 if (addr->channel > IPMI_NUM_CHANNELS) {
1085 spin_lock_irqsave(&intf->counter_lock, flags);
1086 intf->sent_invalid_commands++;
1087 spin_unlock_irqrestore(&intf->counter_lock, flags);
1088 rv = -EINVAL;
1089 goto out_err;
1090 }
1091
1092 if (intf->channels[addr->channel].medium
1093 != IPMI_CHANNEL_MEDIUM_IPMB)
1094 {
1095 spin_lock_irqsave(&intf->counter_lock, flags);
1096 intf->sent_invalid_commands++;
1097 spin_unlock_irqrestore(&intf->counter_lock, flags);
1098 rv = -EINVAL;
1099 goto out_err;
1100 }
1101
1102 if (retries < 0) {
1103 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1104 retries = 0; /* Don't retry broadcasts. */
1105 else
1106 retries = 4;
1107 }
1108 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1109 /* Broadcasts add a zero at the beginning of the
1110 message, but otherwise is the same as an IPMB
1111 address. */
1112 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1113 broadcast = 1;
1114 }
1115
1116
1117 /* Default to 1 second retries. */
1118 if (retry_time_ms == 0)
1119 retry_time_ms = 1000;
1120
1121 /* 9 for the header and 1 for the checksum, plus
1122 possibly one for the broadcast. */
1123 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1124 spin_lock_irqsave(&intf->counter_lock, flags);
1125 intf->sent_invalid_commands++;
1126 spin_unlock_irqrestore(&intf->counter_lock, flags);
1127 rv = -EMSGSIZE;
1128 goto out_err;
1129 }
1130
1131 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1132 if (ipmb_addr->lun > 3) {
1133 spin_lock_irqsave(&intf->counter_lock, flags);
1134 intf->sent_invalid_commands++;
1135 spin_unlock_irqrestore(&intf->counter_lock, flags);
1136 rv = -EINVAL;
1137 goto out_err;
1138 }
1139
1140 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1141
1142 if (recv_msg->msg.netfn & 0x1) {
1143 /* It's a response, so use the user's sequence
1144 from msgid. */
1145 spin_lock_irqsave(&intf->counter_lock, flags);
1146 intf->sent_ipmb_responses++;
1147 spin_unlock_irqrestore(&intf->counter_lock, flags);
1148 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1149 msgid, broadcast,
1150 source_address, source_lun);
1151
1152 /* Save the receive message so we can use it
1153 to deliver the response. */
1154 smi_msg->user_data = recv_msg;
1155 } else {
1156 /* It's a command, so get a sequence for it. */
1157
1158 spin_lock_irqsave(&(intf->seq_lock), flags);
1159
1160 spin_lock(&intf->counter_lock);
1161 intf->sent_ipmb_commands++;
1162 spin_unlock(&intf->counter_lock);
1163
1164 /* Create a sequence number with a 1 second
1165 timeout and 4 retries. */
1166 rv = intf_next_seq(intf,
1167 recv_msg,
1168 retry_time_ms,
1169 retries,
1170 broadcast,
1171 &ipmb_seq,
1172 &seqid);
1173 if (rv) {
1174 /* We have used up all the sequence numbers,
1175 probably, so abort. */
1176 spin_unlock_irqrestore(&(intf->seq_lock),
1177 flags);
1178 goto out_err;
1179 }
1180
1181 /* Store the sequence number in the message,
1182 so that when the send message response
1183 comes back we can start the timer. */
1184 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1185 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1186 ipmb_seq, broadcast,
1187 source_address, source_lun);
1188
1189 /* Copy the message into the recv message data, so we
1190 can retransmit it later if necessary. */
1191 memcpy(recv_msg->msg_data, smi_msg->data,
1192 smi_msg->data_size);
1193 recv_msg->msg.data = recv_msg->msg_data;
1194 recv_msg->msg.data_len = smi_msg->data_size;
1195
1196 /* We don't unlock until here, because we need
1197 to copy the completed message into the
1198 recv_msg before we release the lock.
1199 Otherwise, race conditions may bite us. I
1200 know that's pretty paranoid, but I prefer
1201 to be correct. */
1202 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1203 }
1204 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1205 struct ipmi_lan_addr *lan_addr;
1206 unsigned char ipmb_seq;
1207 long seqid;
1208
1209 if (addr->channel > IPMI_NUM_CHANNELS) {
1210 spin_lock_irqsave(&intf->counter_lock, flags);
1211 intf->sent_invalid_commands++;
1212 spin_unlock_irqrestore(&intf->counter_lock, flags);
1213 rv = -EINVAL;
1214 goto out_err;
1215 }
1216
1217 if ((intf->channels[addr->channel].medium
1218 != IPMI_CHANNEL_MEDIUM_8023LAN)
1219 && (intf->channels[addr->channel].medium
1220 != IPMI_CHANNEL_MEDIUM_ASYNC))
1221 {
1222 spin_lock_irqsave(&intf->counter_lock, flags);
1223 intf->sent_invalid_commands++;
1224 spin_unlock_irqrestore(&intf->counter_lock, flags);
1225 rv = -EINVAL;
1226 goto out_err;
1227 }
1228
1229 retries = 4;
1230
1231 /* Default to 1 second retries. */
1232 if (retry_time_ms == 0)
1233 retry_time_ms = 1000;
1234
1235 /* 11 for the header and 1 for the checksum. */
1236 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1237 spin_lock_irqsave(&intf->counter_lock, flags);
1238 intf->sent_invalid_commands++;
1239 spin_unlock_irqrestore(&intf->counter_lock, flags);
1240 rv = -EMSGSIZE;
1241 goto out_err;
1242 }
1243
1244 lan_addr = (struct ipmi_lan_addr *) addr;
1245 if (lan_addr->lun > 3) {
1246 spin_lock_irqsave(&intf->counter_lock, flags);
1247 intf->sent_invalid_commands++;
1248 spin_unlock_irqrestore(&intf->counter_lock, flags);
1249 rv = -EINVAL;
1250 goto out_err;
1251 }
1252
1253 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1254
1255 if (recv_msg->msg.netfn & 0x1) {
1256 /* It's a response, so use the user's sequence
1257 from msgid. */
1258 spin_lock_irqsave(&intf->counter_lock, flags);
1259 intf->sent_lan_responses++;
1260 spin_unlock_irqrestore(&intf->counter_lock, flags);
1261 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1262 msgid, source_lun);
1263
1264 /* Save the receive message so we can use it
1265 to deliver the response. */
1266 smi_msg->user_data = recv_msg;
1267 } else {
1268 /* It's a command, so get a sequence for it. */
1269
1270 spin_lock_irqsave(&(intf->seq_lock), flags);
1271
1272 spin_lock(&intf->counter_lock);
1273 intf->sent_lan_commands++;
1274 spin_unlock(&intf->counter_lock);
1275
1276 /* Create a sequence number with a 1 second
1277 timeout and 4 retries. */
1278 rv = intf_next_seq(intf,
1279 recv_msg,
1280 retry_time_ms,
1281 retries,
1282 0,
1283 &ipmb_seq,
1284 &seqid);
1285 if (rv) {
1286 /* We have used up all the sequence numbers,
1287 probably, so abort. */
1288 spin_unlock_irqrestore(&(intf->seq_lock),
1289 flags);
1290 goto out_err;
1291 }
1292
1293 /* Store the sequence number in the message,
1294 so that when the send message response
1295 comes back we can start the timer. */
1296 format_lan_msg(smi_msg, msg, lan_addr,
1297 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1298 ipmb_seq, source_lun);
1299
1300 /* Copy the message into the recv message data, so we
1301 can retransmit it later if necessary. */
1302 memcpy(recv_msg->msg_data, smi_msg->data,
1303 smi_msg->data_size);
1304 recv_msg->msg.data = recv_msg->msg_data;
1305 recv_msg->msg.data_len = smi_msg->data_size;
1306
1307 /* We don't unlock until here, because we need
1308 to copy the completed message into the
1309 recv_msg before we release the lock.
1310 Otherwise, race conditions may bite us. I
1311 know that's pretty paranoid, but I prefer
1312 to be correct. */
1313 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1314 }
1315 } else {
1316 /* Unknown address type. */
1317 spin_lock_irqsave(&intf->counter_lock, flags);
1318 intf->sent_invalid_commands++;
1319 spin_unlock_irqrestore(&intf->counter_lock, flags);
1320 rv = -EINVAL;
1321 goto out_err;
1322 }
1323
1324#ifdef DEBUG_MSGING
1325 {
1326 int m;
1327 for (m=0; m<smi_msg->data_size; m++)
1328 printk(" %2.2x", smi_msg->data[m]);
1329 printk("\n");
1330 }
1331#endif
1332 intf->handlers->sender(intf->send_info, smi_msg, priority);
1333
1334 return 0;
1335
1336 out_err:
1337 ipmi_free_smi_msg(smi_msg);
1338 ipmi_free_recv_msg(recv_msg);
1339 return rv;
1340}
1341
1342int ipmi_request_settime(ipmi_user_t user,
1343 struct ipmi_addr *addr,
1344 long msgid,
1345 struct kernel_ipmi_msg *msg,
1346 void *user_msg_data,
1347 int priority,
1348 int retries,
1349 unsigned int retry_time_ms)
1350{
1351 return i_ipmi_request(user,
1352 user->intf,
1353 addr,
1354 msgid,
1355 msg,
1356 user_msg_data,
1357 NULL, NULL,
1358 priority,
1359 user->intf->my_address,
1360 user->intf->my_lun,
1361 retries,
1362 retry_time_ms);
1363}
1364
1365int ipmi_request_supply_msgs(ipmi_user_t user,
1366 struct ipmi_addr *addr,
1367 long msgid,
1368 struct kernel_ipmi_msg *msg,
1369 void *user_msg_data,
1370 void *supplied_smi,
1371 struct ipmi_recv_msg *supplied_recv,
1372 int priority)
1373{
1374 return i_ipmi_request(user,
1375 user->intf,
1376 addr,
1377 msgid,
1378 msg,
1379 user_msg_data,
1380 supplied_smi,
1381 supplied_recv,
1382 priority,
1383 user->intf->my_address,
1384 user->intf->my_lun,
1385 -1, 0);
1386}
1387
1388static int ipmb_file_read_proc(char *page, char **start, off_t off,
1389 int count, int *eof, void *data)
1390{
1391 char *out = (char *) page;
1392 ipmi_smi_t intf = data;
1393
1394 return sprintf(out, "%x\n", intf->my_address);
1395}
1396
1397static int version_file_read_proc(char *page, char **start, off_t off,
1398 int count, int *eof, void *data)
1399{
1400 char *out = (char *) page;
1401 ipmi_smi_t intf = data;
1402
1403 return sprintf(out, "%d.%d\n",
1404 intf->version_major, intf->version_minor);
1405}
1406
1407static int stat_file_read_proc(char *page, char **start, off_t off,
1408 int count, int *eof, void *data)
1409{
1410 char *out = (char *) page;
1411 ipmi_smi_t intf = data;
1412
1413 out += sprintf(out, "sent_invalid_commands: %d\n",
1414 intf->sent_invalid_commands);
1415 out += sprintf(out, "sent_local_commands: %d\n",
1416 intf->sent_local_commands);
1417 out += sprintf(out, "handled_local_responses: %d\n",
1418 intf->handled_local_responses);
1419 out += sprintf(out, "unhandled_local_responses: %d\n",
1420 intf->unhandled_local_responses);
1421 out += sprintf(out, "sent_ipmb_commands: %d\n",
1422 intf->sent_ipmb_commands);
1423 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1424 intf->sent_ipmb_command_errs);
1425 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1426 intf->retransmitted_ipmb_commands);
1427 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1428 intf->timed_out_ipmb_commands);
1429 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1430 intf->timed_out_ipmb_broadcasts);
1431 out += sprintf(out, "sent_ipmb_responses: %d\n",
1432 intf->sent_ipmb_responses);
1433 out += sprintf(out, "handled_ipmb_responses: %d\n",
1434 intf->handled_ipmb_responses);
1435 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1436 intf->invalid_ipmb_responses);
1437 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1438 intf->unhandled_ipmb_responses);
1439 out += sprintf(out, "sent_lan_commands: %d\n",
1440 intf->sent_lan_commands);
1441 out += sprintf(out, "sent_lan_command_errs: %d\n",
1442 intf->sent_lan_command_errs);
1443 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1444 intf->retransmitted_lan_commands);
1445 out += sprintf(out, "timed_out_lan_commands: %d\n",
1446 intf->timed_out_lan_commands);
1447 out += sprintf(out, "sent_lan_responses: %d\n",
1448 intf->sent_lan_responses);
1449 out += sprintf(out, "handled_lan_responses: %d\n",
1450 intf->handled_lan_responses);
1451 out += sprintf(out, "invalid_lan_responses: %d\n",
1452 intf->invalid_lan_responses);
1453 out += sprintf(out, "unhandled_lan_responses: %d\n",
1454 intf->unhandled_lan_responses);
1455 out += sprintf(out, "handled_commands: %d\n",
1456 intf->handled_commands);
1457 out += sprintf(out, "invalid_commands: %d\n",
1458 intf->invalid_commands);
1459 out += sprintf(out, "unhandled_commands: %d\n",
1460 intf->unhandled_commands);
1461 out += sprintf(out, "invalid_events: %d\n",
1462 intf->invalid_events);
1463 out += sprintf(out, "events: %d\n",
1464 intf->events);
1465
1466 return (out - ((char *) page));
1467}
1468
1469int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1470 read_proc_t *read_proc, write_proc_t *write_proc,
1471 void *data, struct module *owner)
1472{
1473 struct proc_dir_entry *file;
1474 int rv = 0;
1475 struct ipmi_proc_entry *entry;
1476
1477 /* Create a list element. */
1478 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1479 if (!entry)
1480 return -ENOMEM;
1481 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1482 if (!entry->name) {
1483 kfree(entry);
1484 return -ENOMEM;
1485 }
1486 strcpy(entry->name, name);
1487
1488 file = create_proc_entry(name, 0, smi->proc_dir);
1489 if (!file) {
1490 kfree(entry->name);
1491 kfree(entry);
1492 rv = -ENOMEM;
1493 } else {
1494 file->nlink = 1;
1495 file->data = data;
1496 file->read_proc = read_proc;
1497 file->write_proc = write_proc;
1498 file->owner = owner;
1499
1500 /* Stick it on the list. */
1501 entry->next = smi->proc_entries;
1502 smi->proc_entries = entry;
1503 }
1504
1505 return rv;
1506}
1507
1508static int add_proc_entries(ipmi_smi_t smi, int num)
1509{
1510 int rv = 0;
1511
1512 sprintf(smi->proc_dir_name, "%d", num);
1513 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1514 if (!smi->proc_dir)
1515 rv = -ENOMEM;
1516 else {
1517 smi->proc_dir->owner = THIS_MODULE;
1518 }
1519
1520 if (rv == 0)
1521 rv = ipmi_smi_add_proc_entry(smi, "stats",
1522 stat_file_read_proc, NULL,
1523 smi, THIS_MODULE);
1524
1525 if (rv == 0)
1526 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1527 ipmb_file_read_proc, NULL,
1528 smi, THIS_MODULE);
1529
1530 if (rv == 0)
1531 rv = ipmi_smi_add_proc_entry(smi, "version",
1532 version_file_read_proc, NULL,
1533 smi, THIS_MODULE);
1534
1535 return rv;
1536}
1537
1538static void remove_proc_entries(ipmi_smi_t smi)
1539{
1540 struct ipmi_proc_entry *entry;
1541
1542 while (smi->proc_entries) {
1543 entry = smi->proc_entries;
1544 smi->proc_entries = entry->next;
1545
1546 remove_proc_entry(entry->name, smi->proc_dir);
1547 kfree(entry->name);
1548 kfree(entry);
1549 }
1550 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1551}
1552
1553static int
1554send_channel_info_cmd(ipmi_smi_t intf, int chan)
1555{
1556 struct kernel_ipmi_msg msg;
1557 unsigned char data[1];
1558 struct ipmi_system_interface_addr si;
1559
1560 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
1561 si.channel = IPMI_BMC_CHANNEL;
1562 si.lun = 0;
1563
1564 msg.netfn = IPMI_NETFN_APP_REQUEST;
1565 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
1566 msg.data = data;
1567 msg.data_len = 1;
1568 data[0] = chan;
1569 return i_ipmi_request(NULL,
1570 intf,
1571 (struct ipmi_addr *) &si,
1572 0,
1573 &msg,
1574 NULL,
1575 NULL,
1576 NULL,
1577 0,
1578 intf->my_address,
1579 intf->my_lun,
1580 -1, 0);
1581}
1582
1583static void
1584channel_handler(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
1585{
1586 int rv = 0;
1587 int chan;
1588
1589 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2))
1590 && (msg->rsp[1] == IPMI_GET_CHANNEL_INFO_CMD))
1591 {
1592 /* It's the one we want */
1593 if (msg->rsp[2] != 0) {
1594 /* Got an error from the channel, just go on. */
1595
1596 if (msg->rsp[2] == IPMI_INVALID_COMMAND_ERR) {
1597 /* If the MC does not support this
1598 command, that is legal. We just
1599 assume it has one IPMB at channel
1600 zero. */
1601 intf->channels[0].medium
1602 = IPMI_CHANNEL_MEDIUM_IPMB;
1603 intf->channels[0].protocol
1604 = IPMI_CHANNEL_PROTOCOL_IPMB;
1605 rv = -ENOSYS;
1606
1607 intf->curr_channel = IPMI_MAX_CHANNELS;
1608 wake_up(&intf->waitq);
1609 goto out;
1610 }
1611 goto next_channel;
1612 }
1613 if (msg->rsp_size < 6) {
1614 /* Message not big enough, just go on. */
1615 goto next_channel;
1616 }
1617 chan = intf->curr_channel;
1618 intf->channels[chan].medium = msg->rsp[4] & 0x7f;
1619 intf->channels[chan].protocol = msg->rsp[5] & 0x1f;
1620
1621 next_channel:
1622 intf->curr_channel++;
1623 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
1624 wake_up(&intf->waitq);
1625 else
1626 rv = send_channel_info_cmd(intf, intf->curr_channel);
1627
1628 if (rv) {
1629 /* Got an error somehow, just give up. */
1630 intf->curr_channel = IPMI_MAX_CHANNELS;
1631 wake_up(&intf->waitq);
1632
1633 printk(KERN_WARNING PFX
1634 "Error sending channel information: %d\n",
1635 rv);
1636 }
1637 }
1638 out:
1639 return;
1640}
1641
1642int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1643 void *send_info,
1644 unsigned char version_major,
1645 unsigned char version_minor,
1646 unsigned char slave_addr,
1647 ipmi_smi_t *intf)
1648{
1649 int i, j;
1650 int rv;
1651 ipmi_smi_t new_intf;
1652 unsigned long flags;
1653
1654
1655 /* Make sure the driver is actually initialized, this handles
1656 problems with initialization order. */
1657 if (!initialized) {
1658 rv = ipmi_init_msghandler();
1659 if (rv)
1660 return rv;
1661 /* The init code doesn't return an error if it was turned
1662 off, but it won't initialize. Check that. */
1663 if (!initialized)
1664 return -ENODEV;
1665 }
1666
1667 new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL);
1668 if (!new_intf)
1669 return -ENOMEM;
1670 memset(new_intf, 0, sizeof(*new_intf));
1671
1672 new_intf->proc_dir = NULL;
1673
1674 rv = -ENOMEM;
1675
1676 down_write(&interfaces_sem);
1677 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1678 if (ipmi_interfaces[i] == NULL) {
1679 new_intf->intf_num = i;
1680 new_intf->version_major = version_major;
1681 new_intf->version_minor = version_minor;
1682 if (slave_addr == 0)
1683 new_intf->my_address = IPMI_BMC_SLAVE_ADDR;
1684 else
1685 new_intf->my_address = slave_addr;
1686 new_intf->my_lun = 2; /* the SMS LUN. */
1687 rwlock_init(&(new_intf->users_lock));
1688 INIT_LIST_HEAD(&(new_intf->users));
1689 new_intf->handlers = handlers;
1690 new_intf->send_info = send_info;
1691 spin_lock_init(&(new_intf->seq_lock));
1692 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
1693 new_intf->seq_table[j].inuse = 0;
1694 new_intf->seq_table[j].seqid = 0;
1695 }
1696 new_intf->curr_seq = 0;
1697 spin_lock_init(&(new_intf->waiting_msgs_lock));
1698 INIT_LIST_HEAD(&(new_intf->waiting_msgs));
1699 spin_lock_init(&(new_intf->events_lock));
1700 INIT_LIST_HEAD(&(new_intf->waiting_events));
1701 new_intf->waiting_events_count = 0;
1702 rwlock_init(&(new_intf->cmd_rcvr_lock));
1703 init_waitqueue_head(&new_intf->waitq);
1704 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
1705 new_intf->all_cmd_rcvr = NULL;
1706
1707 spin_lock_init(&(new_intf->counter_lock));
1708
1709 spin_lock_irqsave(&interfaces_lock, flags);
1710 ipmi_interfaces[i] = new_intf;
1711 spin_unlock_irqrestore(&interfaces_lock, flags);
1712
1713 rv = 0;
1714 *intf = new_intf;
1715 break;
1716 }
1717 }
1718
1719 downgrade_write(&interfaces_sem);
1720
1721 if (rv == 0)
1722 rv = add_proc_entries(*intf, i);
1723
1724 if (rv == 0) {
1725 if ((version_major > 1)
1726 || ((version_major == 1) && (version_minor >= 5)))
1727 {
1728 /* Start scanning the channels to see what is
1729 available. */
1730 (*intf)->null_user_handler = channel_handler;
1731 (*intf)->curr_channel = 0;
1732 rv = send_channel_info_cmd(*intf, 0);
1733 if (rv)
1734 goto out;
1735
1736 /* Wait for the channel info to be read. */
1737 up_read(&interfaces_sem);
1738 wait_event((*intf)->waitq,
1739 ((*intf)->curr_channel>=IPMI_MAX_CHANNELS));
1740 down_read(&interfaces_sem);
1741
1742 if (ipmi_interfaces[i] != new_intf)
1743 /* Well, it went away. Just return. */
1744 goto out;
1745 } else {
1746 /* Assume a single IPMB channel at zero. */
1747 (*intf)->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
1748 (*intf)->channels[0].protocol
1749 = IPMI_CHANNEL_PROTOCOL_IPMB;
1750 }
1751
1752 /* Call all the watcher interfaces to tell
1753 them that a new interface is available. */
1754 call_smi_watchers(i);
1755 }
1756
1757 out:
1758 up_read(&interfaces_sem);
1759
1760 if (rv) {
1761 if (new_intf->proc_dir)
1762 remove_proc_entries(new_intf);
1763 kfree(new_intf);
1764 }
1765
1766 return rv;
1767}
1768
1769static void free_recv_msg_list(struct list_head *q)
1770{
1771 struct ipmi_recv_msg *msg, *msg2;
1772
1773 list_for_each_entry_safe(msg, msg2, q, link) {
1774 list_del(&msg->link);
1775 ipmi_free_recv_msg(msg);
1776 }
1777}
1778
1779static void free_cmd_rcvr_list(struct list_head *q)
1780{
1781 struct cmd_rcvr *rcvr, *rcvr2;
1782
1783 list_for_each_entry_safe(rcvr, rcvr2, q, link) {
1784 list_del(&rcvr->link);
1785 kfree(rcvr);
1786 }
1787}
1788
1789static void clean_up_interface_data(ipmi_smi_t intf)
1790{
1791 int i;
1792
1793 free_recv_msg_list(&(intf->waiting_msgs));
1794 free_recv_msg_list(&(intf->waiting_events));
1795 free_cmd_rcvr_list(&(intf->cmd_rcvrs));
1796
1797 for (i=0; i<IPMI_IPMB_NUM_SEQ; i++) {
1798 if ((intf->seq_table[i].inuse)
1799 && (intf->seq_table[i].recv_msg))
1800 {
1801 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1802 }
1803 }
1804}
1805
1806int ipmi_unregister_smi(ipmi_smi_t intf)
1807{
1808 int rv = -ENODEV;
1809 int i;
1810 struct ipmi_smi_watcher *w;
1811 unsigned long flags;
1812
1813 down_write(&interfaces_sem);
1814 if (list_empty(&(intf->users)))
1815 {
1816 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
1817 if (ipmi_interfaces[i] == intf) {
1818 remove_proc_entries(intf);
1819 spin_lock_irqsave(&interfaces_lock, flags);
1820 ipmi_interfaces[i] = NULL;
1821 clean_up_interface_data(intf);
1822 spin_unlock_irqrestore(&interfaces_lock,flags);
1823 kfree(intf);
1824 rv = 0;
1825 goto out_call_watcher;
1826 }
1827 }
1828 } else {
1829 rv = -EBUSY;
1830 }
1831 up_write(&interfaces_sem);
1832
1833 return rv;
1834
1835 out_call_watcher:
1836 downgrade_write(&interfaces_sem);
1837
1838 /* Call all the watcher interfaces to tell them that
1839 an interface is gone. */
1840 down_read(&smi_watchers_sem);
1841 list_for_each_entry(w, &smi_watchers, link) {
1842 w->smi_gone(i);
1843 }
1844 up_read(&smi_watchers_sem);
1845 up_read(&interfaces_sem);
1846 return 0;
1847}
1848
1849static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
1850 struct ipmi_smi_msg *msg)
1851{
1852 struct ipmi_ipmb_addr ipmb_addr;
1853 struct ipmi_recv_msg *recv_msg;
1854 unsigned long flags;
1855
1856
1857 /* This is 11, not 10, because the response must contain a
1858 * completion code. */
1859 if (msg->rsp_size < 11) {
1860 /* Message not big enough, just ignore it. */
1861 spin_lock_irqsave(&intf->counter_lock, flags);
1862 intf->invalid_ipmb_responses++;
1863 spin_unlock_irqrestore(&intf->counter_lock, flags);
1864 return 0;
1865 }
1866
1867 if (msg->rsp[2] != 0) {
1868 /* An error getting the response, just ignore it. */
1869 return 0;
1870 }
1871
1872 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
1873 ipmb_addr.slave_addr = msg->rsp[6];
1874 ipmb_addr.channel = msg->rsp[3] & 0x0f;
1875 ipmb_addr.lun = msg->rsp[7] & 3;
1876
1877 /* It's a response from a remote entity. Look up the sequence
1878 number and handle the response. */
1879 if (intf_find_seq(intf,
1880 msg->rsp[7] >> 2,
1881 msg->rsp[3] & 0x0f,
1882 msg->rsp[8],
1883 (msg->rsp[4] >> 2) & (~1),
1884 (struct ipmi_addr *) &(ipmb_addr),
1885 &recv_msg))
1886 {
1887 /* We were unable to find the sequence number,
1888 so just nuke the message. */
1889 spin_lock_irqsave(&intf->counter_lock, flags);
1890 intf->unhandled_ipmb_responses++;
1891 spin_unlock_irqrestore(&intf->counter_lock, flags);
1892 return 0;
1893 }
1894
1895 memcpy(recv_msg->msg_data,
1896 &(msg->rsp[9]),
1897 msg->rsp_size - 9);
1898 /* THe other fields matched, so no need to set them, except
1899 for netfn, which needs to be the response that was
1900 returned, not the request value. */
1901 recv_msg->msg.netfn = msg->rsp[4] >> 2;
1902 recv_msg->msg.data = recv_msg->msg_data;
1903 recv_msg->msg.data_len = msg->rsp_size - 10;
1904 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
1905 spin_lock_irqsave(&intf->counter_lock, flags);
1906 intf->handled_ipmb_responses++;
1907 spin_unlock_irqrestore(&intf->counter_lock, flags);
1908 deliver_response(recv_msg);
1909
1910 return 0;
1911}
1912
1913static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1914 struct ipmi_smi_msg *msg)
1915{
1916 struct cmd_rcvr *rcvr;
1917 int rv = 0;
1918 unsigned char netfn;
1919 unsigned char cmd;
1920 ipmi_user_t user = NULL;
1921 struct ipmi_ipmb_addr *ipmb_addr;
1922 struct ipmi_recv_msg *recv_msg;
1923 unsigned long flags;
1924
1925 if (msg->rsp_size < 10) {
1926 /* Message not big enough, just ignore it. */
1927 spin_lock_irqsave(&intf->counter_lock, flags);
1928 intf->invalid_commands++;
1929 spin_unlock_irqrestore(&intf->counter_lock, flags);
1930 return 0;
1931 }
1932
1933 if (msg->rsp[2] != 0) {
1934 /* An error getting the response, just ignore it. */
1935 return 0;
1936 }
1937
1938 netfn = msg->rsp[4] >> 2;
1939 cmd = msg->rsp[8];
1940
1941 read_lock(&(intf->cmd_rcvr_lock));
1942
1943 if (intf->all_cmd_rcvr) {
1944 user = intf->all_cmd_rcvr;
1945 } else {
1946 /* Find the command/netfn. */
1947 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
1948 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
1949 user = rcvr->user;
1950 break;
1951 }
1952 }
1953 }
1954 read_unlock(&(intf->cmd_rcvr_lock));
1955
1956 if (user == NULL) {
1957 /* We didn't find a user, deliver an error response. */
1958 spin_lock_irqsave(&intf->counter_lock, flags);
1959 intf->unhandled_commands++;
1960 spin_unlock_irqrestore(&intf->counter_lock, flags);
1961
1962 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1963 msg->data[1] = IPMI_SEND_MSG_CMD;
1964 msg->data[2] = msg->rsp[3];
1965 msg->data[3] = msg->rsp[6];
1966 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
1967 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
1968 msg->data[6] = intf->my_address;
1969 /* rqseq/lun */
1970 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
1971 msg->data[8] = msg->rsp[8]; /* cmd */
1972 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
1973 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
1974 msg->data_size = 11;
1975
1976#ifdef DEBUG_MSGING
1977 {
1978 int m;
1979 printk("Invalid command:");
1980 for (m=0; m<msg->data_size; m++)
1981 printk(" %2.2x", msg->data[m]);
1982 printk("\n");
1983 }
1984#endif
1985 intf->handlers->sender(intf->send_info, msg, 0);
1986
1987 rv = -1; /* We used the message, so return the value that
1988 causes it to not be freed or queued. */
1989 } else {
1990 /* Deliver the message to the user. */
1991 spin_lock_irqsave(&intf->counter_lock, flags);
1992 intf->handled_commands++;
1993 spin_unlock_irqrestore(&intf->counter_lock, flags);
1994
1995 recv_msg = ipmi_alloc_recv_msg();
1996 if (! recv_msg) {
1997 /* We couldn't allocate memory for the
1998 message, so requeue it for handling
1999 later. */
2000 rv = 1;
2001 } else {
2002 /* Extract the source address from the data. */
2003 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2004 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2005 ipmb_addr->slave_addr = msg->rsp[6];
2006 ipmb_addr->lun = msg->rsp[7] & 3;
2007 ipmb_addr->channel = msg->rsp[3] & 0xf;
2008
2009 /* Extract the rest of the message information
2010 from the IPMB header.*/
2011 recv_msg->user = user;
2012 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2013 recv_msg->msgid = msg->rsp[7] >> 2;
2014 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2015 recv_msg->msg.cmd = msg->rsp[8];
2016 recv_msg->msg.data = recv_msg->msg_data;
2017
2018 /* We chop off 10, not 9 bytes because the checksum
2019 at the end also needs to be removed. */
2020 recv_msg->msg.data_len = msg->rsp_size - 10;
2021 memcpy(recv_msg->msg_data,
2022 &(msg->rsp[9]),
2023 msg->rsp_size - 10);
2024 deliver_response(recv_msg);
2025 }
2026 }
2027
2028 return rv;
2029}
2030
2031static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2032 struct ipmi_smi_msg *msg)
2033{
2034 struct ipmi_lan_addr lan_addr;
2035 struct ipmi_recv_msg *recv_msg;
2036 unsigned long flags;
2037
2038
2039 /* This is 13, not 12, because the response must contain a
2040 * completion code. */
2041 if (msg->rsp_size < 13) {
2042 /* Message not big enough, just ignore it. */
2043 spin_lock_irqsave(&intf->counter_lock, flags);
2044 intf->invalid_lan_responses++;
2045 spin_unlock_irqrestore(&intf->counter_lock, flags);
2046 return 0;
2047 }
2048
2049 if (msg->rsp[2] != 0) {
2050 /* An error getting the response, just ignore it. */
2051 return 0;
2052 }
2053
2054 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
2055 lan_addr.session_handle = msg->rsp[4];
2056 lan_addr.remote_SWID = msg->rsp[8];
2057 lan_addr.local_SWID = msg->rsp[5];
2058 lan_addr.channel = msg->rsp[3] & 0x0f;
2059 lan_addr.privilege = msg->rsp[3] >> 4;
2060 lan_addr.lun = msg->rsp[9] & 3;
2061
2062 /* It's a response from a remote entity. Look up the sequence
2063 number and handle the response. */
2064 if (intf_find_seq(intf,
2065 msg->rsp[9] >> 2,
2066 msg->rsp[3] & 0x0f,
2067 msg->rsp[10],
2068 (msg->rsp[6] >> 2) & (~1),
2069 (struct ipmi_addr *) &(lan_addr),
2070 &recv_msg))
2071 {
2072 /* We were unable to find the sequence number,
2073 so just nuke the message. */
2074 spin_lock_irqsave(&intf->counter_lock, flags);
2075 intf->unhandled_lan_responses++;
2076 spin_unlock_irqrestore(&intf->counter_lock, flags);
2077 return 0;
2078 }
2079
2080 memcpy(recv_msg->msg_data,
2081 &(msg->rsp[11]),
2082 msg->rsp_size - 11);
2083 /* The other fields matched, so no need to set them, except
2084 for netfn, which needs to be the response that was
2085 returned, not the request value. */
2086 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2087 recv_msg->msg.data = recv_msg->msg_data;
2088 recv_msg->msg.data_len = msg->rsp_size - 12;
2089 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2090 spin_lock_irqsave(&intf->counter_lock, flags);
2091 intf->handled_lan_responses++;
2092 spin_unlock_irqrestore(&intf->counter_lock, flags);
2093 deliver_response(recv_msg);
2094
2095 return 0;
2096}
2097
2098static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2099 struct ipmi_smi_msg *msg)
2100{
2101 struct cmd_rcvr *rcvr;
2102 int rv = 0;
2103 unsigned char netfn;
2104 unsigned char cmd;
2105 ipmi_user_t user = NULL;
2106 struct ipmi_lan_addr *lan_addr;
2107 struct ipmi_recv_msg *recv_msg;
2108 unsigned long flags;
2109
2110 if (msg->rsp_size < 12) {
2111 /* Message not big enough, just ignore it. */
2112 spin_lock_irqsave(&intf->counter_lock, flags);
2113 intf->invalid_commands++;
2114 spin_unlock_irqrestore(&intf->counter_lock, flags);
2115 return 0;
2116 }
2117
2118 if (msg->rsp[2] != 0) {
2119 /* An error getting the response, just ignore it. */
2120 return 0;
2121 }
2122
2123 netfn = msg->rsp[6] >> 2;
2124 cmd = msg->rsp[10];
2125
2126 read_lock(&(intf->cmd_rcvr_lock));
2127
2128 if (intf->all_cmd_rcvr) {
2129 user = intf->all_cmd_rcvr;
2130 } else {
2131 /* Find the command/netfn. */
2132 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) {
2133 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) {
2134 user = rcvr->user;
2135 break;
2136 }
2137 }
2138 }
2139 read_unlock(&(intf->cmd_rcvr_lock));
2140
2141 if (user == NULL) {
2142 /* We didn't find a user, deliver an error response. */
2143 spin_lock_irqsave(&intf->counter_lock, flags);
2144 intf->unhandled_commands++;
2145 spin_unlock_irqrestore(&intf->counter_lock, flags);
2146
2147 rv = 0; /* Don't do anything with these messages, just
2148 allow them to be freed. */
2149 } else {
2150 /* Deliver the message to the user. */
2151 spin_lock_irqsave(&intf->counter_lock, flags);
2152 intf->handled_commands++;
2153 spin_unlock_irqrestore(&intf->counter_lock, flags);
2154
2155 recv_msg = ipmi_alloc_recv_msg();
2156 if (! recv_msg) {
2157 /* We couldn't allocate memory for the
2158 message, so requeue it for handling
2159 later. */
2160 rv = 1;
2161 } else {
2162 /* Extract the source address from the data. */
2163 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
2164 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
2165 lan_addr->session_handle = msg->rsp[4];
2166 lan_addr->remote_SWID = msg->rsp[8];
2167 lan_addr->local_SWID = msg->rsp[5];
2168 lan_addr->lun = msg->rsp[9] & 3;
2169 lan_addr->channel = msg->rsp[3] & 0xf;
2170 lan_addr->privilege = msg->rsp[3] >> 4;
2171
2172 /* Extract the rest of the message information
2173 from the IPMB header.*/
2174 recv_msg->user = user;
2175 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2176 recv_msg->msgid = msg->rsp[9] >> 2;
2177 recv_msg->msg.netfn = msg->rsp[6] >> 2;
2178 recv_msg->msg.cmd = msg->rsp[10];
2179 recv_msg->msg.data = recv_msg->msg_data;
2180
2181 /* We chop off 12, not 11 bytes because the checksum
2182 at the end also needs to be removed. */
2183 recv_msg->msg.data_len = msg->rsp_size - 12;
2184 memcpy(recv_msg->msg_data,
2185 &(msg->rsp[11]),
2186 msg->rsp_size - 12);
2187 deliver_response(recv_msg);
2188 }
2189 }
2190
2191 return rv;
2192}
2193
2194static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2195 struct ipmi_smi_msg *msg)
2196{
2197 struct ipmi_system_interface_addr *smi_addr;
2198
2199 recv_msg->msgid = 0;
2200 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
2201 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2202 smi_addr->channel = IPMI_BMC_CHANNEL;
2203 smi_addr->lun = msg->rsp[0] & 3;
2204 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
2205 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2206 recv_msg->msg.cmd = msg->rsp[1];
2207 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
2208 recv_msg->msg.data = recv_msg->msg_data;
2209 recv_msg->msg.data_len = msg->rsp_size - 3;
2210}
2211
2212/* This will be called with the intf->users_lock read-locked, so no need
2213 to do that here. */
2214static int handle_read_event_rsp(ipmi_smi_t intf,
2215 struct ipmi_smi_msg *msg)
2216{
2217 struct ipmi_recv_msg *recv_msg, *recv_msg2;
2218 struct list_head msgs;
2219 ipmi_user_t user;
2220 int rv = 0;
2221 int deliver_count = 0;
2222 unsigned long flags;
2223
2224 if (msg->rsp_size < 19) {
2225 /* Message is too small to be an IPMB event. */
2226 spin_lock_irqsave(&intf->counter_lock, flags);
2227 intf->invalid_events++;
2228 spin_unlock_irqrestore(&intf->counter_lock, flags);
2229 return 0;
2230 }
2231
2232 if (msg->rsp[2] != 0) {
2233 /* An error getting the event, just ignore it. */
2234 return 0;
2235 }
2236
2237 INIT_LIST_HEAD(&msgs);
2238
2239 spin_lock_irqsave(&(intf->events_lock), flags);
2240
2241 spin_lock(&intf->counter_lock);
2242 intf->events++;
2243 spin_unlock(&intf->counter_lock);
2244
2245 /* Allocate and fill in one message for every user that is getting
2246 events. */
2247 list_for_each_entry(user, &(intf->users), link) {
2248 if (! user->gets_events)
2249 continue;
2250
2251 recv_msg = ipmi_alloc_recv_msg();
2252 if (! recv_msg) {
2253 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2254 list_del(&recv_msg->link);
2255 ipmi_free_recv_msg(recv_msg);
2256 }
2257 /* We couldn't allocate memory for the
2258 message, so requeue it for handling
2259 later. */
2260 rv = 1;
2261 goto out;
2262 }
2263
2264 deliver_count++;
2265
2266 copy_event_into_recv_msg(recv_msg, msg);
2267 recv_msg->user = user;
2268 list_add_tail(&(recv_msg->link), &msgs);
2269 }
2270
2271 if (deliver_count) {
2272 /* Now deliver all the messages. */
2273 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2274 list_del(&recv_msg->link);
2275 deliver_response(recv_msg);
2276 }
2277 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
2278 /* No one to receive the message, put it in queue if there's
2279 not already too many things in the queue. */
2280 recv_msg = ipmi_alloc_recv_msg();
2281 if (! recv_msg) {
2282 /* We couldn't allocate memory for the
2283 message, so requeue it for handling
2284 later. */
2285 rv = 1;
2286 goto out;
2287 }
2288
2289 copy_event_into_recv_msg(recv_msg, msg);
2290 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
2291 } else {
2292 /* There's too many things in the queue, discard this
2293 message. */
2294 printk(KERN_WARNING PFX "Event queue full, discarding an"
2295 " incoming event\n");
2296 }
2297
2298 out:
2299 spin_unlock_irqrestore(&(intf->events_lock), flags);
2300
2301 return rv;
2302}
2303
2304static int handle_bmc_rsp(ipmi_smi_t intf,
2305 struct ipmi_smi_msg *msg)
2306{
2307 struct ipmi_recv_msg *recv_msg;
2308 int found = 0;
2309 struct ipmi_user *user;
2310 unsigned long flags;
2311
2312 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2313
2314 /* Make sure the user still exists. */
2315 list_for_each_entry(user, &(intf->users), link) {
2316 if (user == recv_msg->user) {
2317 /* Found it, so we can deliver it */
2318 found = 1;
2319 break;
2320 }
2321 }
2322
2323 if (!found) {
2324 /* Special handling for NULL users. */
2325 if (!recv_msg->user && intf->null_user_handler){
2326 intf->null_user_handler(intf, msg);
2327 spin_lock_irqsave(&intf->counter_lock, flags);
2328 intf->handled_local_responses++;
2329 spin_unlock_irqrestore(&intf->counter_lock, flags);
2330 }else{
2331 /* The user for the message went away, so give up. */
2332 spin_lock_irqsave(&intf->counter_lock, flags);
2333 intf->unhandled_local_responses++;
2334 spin_unlock_irqrestore(&intf->counter_lock, flags);
2335 }
2336 ipmi_free_recv_msg(recv_msg);
2337 } else {
2338 struct ipmi_system_interface_addr *smi_addr;
2339
2340 spin_lock_irqsave(&intf->counter_lock, flags);
2341 intf->handled_local_responses++;
2342 spin_unlock_irqrestore(&intf->counter_lock, flags);
2343 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2344 recv_msg->msgid = msg->msgid;
2345 smi_addr = ((struct ipmi_system_interface_addr *)
2346 &(recv_msg->addr));
2347 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2348 smi_addr->channel = IPMI_BMC_CHANNEL;
2349 smi_addr->lun = msg->rsp[0] & 3;
2350 recv_msg->msg.netfn = msg->rsp[0] >> 2;
2351 recv_msg->msg.cmd = msg->rsp[1];
2352 memcpy(recv_msg->msg_data,
2353 &(msg->rsp[2]),
2354 msg->rsp_size - 2);
2355 recv_msg->msg.data = recv_msg->msg_data;
2356 recv_msg->msg.data_len = msg->rsp_size - 2;
2357 deliver_response(recv_msg);
2358 }
2359
2360 return 0;
2361}
2362
2363/* Handle a new message. Return 1 if the message should be requeued,
2364 0 if the message should be freed, or -1 if the message should not
2365 be freed or requeued. */
2366static int handle_new_recv_msg(ipmi_smi_t intf,
2367 struct ipmi_smi_msg *msg)
2368{
2369 int requeue;
2370 int chan;
2371
2372#ifdef DEBUG_MSGING
2373 int m;
2374 printk("Recv:");
2375 for (m=0; m<msg->rsp_size; m++)
2376 printk(" %2.2x", msg->rsp[m]);
2377 printk("\n");
2378#endif
2379 if (msg->rsp_size < 2) {
2380 /* Message is too small to be correct. */
2381 printk(KERN_WARNING PFX "BMC returned to small a message"
2382 " for netfn %x cmd %x, got %d bytes\n",
2383 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
2384
2385 /* Generate an error response for the message. */
2386 msg->rsp[0] = msg->data[0] | (1 << 2);
2387 msg->rsp[1] = msg->data[1];
2388 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2389 msg->rsp_size = 3;
2390 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
2391 || (msg->rsp[1] != msg->data[1])) /* Command */
2392 {
2393 /* The response is not even marginally correct. */
2394 printk(KERN_WARNING PFX "BMC returned incorrect response,"
2395 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
2396 (msg->data[0] >> 2) | 1, msg->data[1],
2397 msg->rsp[0] >> 2, msg->rsp[1]);
2398
2399 /* Generate an error response for the message. */
2400 msg->rsp[0] = msg->data[0] | (1 << 2);
2401 msg->rsp[1] = msg->data[1];
2402 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
2403 msg->rsp_size = 3;
2404 }
2405
2406 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2407 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
2408 && (msg->user_data != NULL))
2409 {
2410 /* It's a response to a response we sent. For this we
2411 deliver a send message response to the user. */
2412 struct ipmi_recv_msg *recv_msg = msg->user_data;
2413
2414 requeue = 0;
2415 if (msg->rsp_size < 2)
2416 /* Message is too small to be correct. */
2417 goto out;
2418
2419 chan = msg->data[2] & 0x0f;
2420 if (chan >= IPMI_MAX_CHANNELS)
2421 /* Invalid channel number */
2422 goto out;
2423
2424 if (recv_msg) {
2425 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
2426 recv_msg->msg.data = recv_msg->msg_data;
2427 recv_msg->msg.data_len = 1;
2428 recv_msg->msg_data[0] = msg->rsp[2];
2429 deliver_response(recv_msg);
2430 }
2431 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2432 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
2433 {
2434 /* It's from the receive queue. */
2435 chan = msg->rsp[3] & 0xf;
2436 if (chan >= IPMI_MAX_CHANNELS) {
2437 /* Invalid channel number */
2438 requeue = 0;
2439 goto out;
2440 }
2441
2442 switch (intf->channels[chan].medium) {
2443 case IPMI_CHANNEL_MEDIUM_IPMB:
2444 if (msg->rsp[4] & 0x04) {
2445 /* It's a response, so find the
2446 requesting message and send it up. */
2447 requeue = handle_ipmb_get_msg_rsp(intf, msg);
2448 } else {
2449 /* It's a command to the SMS from some other
2450 entity. Handle that. */
2451 requeue = handle_ipmb_get_msg_cmd(intf, msg);
2452 }
2453 break;
2454
2455 case IPMI_CHANNEL_MEDIUM_8023LAN:
2456 case IPMI_CHANNEL_MEDIUM_ASYNC:
2457 if (msg->rsp[6] & 0x04) {
2458 /* It's a response, so find the
2459 requesting message and send it up. */
2460 requeue = handle_lan_get_msg_rsp(intf, msg);
2461 } else {
2462 /* It's a command to the SMS from some other
2463 entity. Handle that. */
2464 requeue = handle_lan_get_msg_cmd(intf, msg);
2465 }
2466 break;
2467
2468 default:
2469 /* We don't handle the channel type, so just
2470 * free the message. */
2471 requeue = 0;
2472 }
2473
2474 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2475 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
2476 {
2477 /* It's an asyncronous event. */
2478 requeue = handle_read_event_rsp(intf, msg);
2479 } else {
2480 /* It's a response from the local BMC. */
2481 requeue = handle_bmc_rsp(intf, msg);
2482 }
2483
2484 out:
2485 return requeue;
2486}
2487
2488/* Handle a new message from the lower layer. */
2489void ipmi_smi_msg_received(ipmi_smi_t intf,
2490 struct ipmi_smi_msg *msg)
2491{
2492 unsigned long flags;
2493 int rv;
2494
2495
2496 /* Lock the user lock so the user can't go away while we are
2497 working on it. */
2498 read_lock(&(intf->users_lock));
2499
2500 if ((msg->data_size >= 2)
2501 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2502 && (msg->data[1] == IPMI_SEND_MSG_CMD)
2503 && (msg->user_data == NULL)) {
2504 /* This is the local response to a command send, start
2505 the timer for these. The user_data will not be
2506 NULL if this is a response send, and we will let
2507 response sends just go through. */
2508
2509 /* Check for errors, if we get certain errors (ones
2510 that mean basically we can try again later), we
2511 ignore them and start the timer. Otherwise we
2512 report the error immediately. */
2513 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
2514 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
2515 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR))
2516 {
2517 int chan = msg->rsp[3] & 0xf;
2518
2519 /* Got an error sending the message, handle it. */
2520 spin_lock_irqsave(&intf->counter_lock, flags);
2521 if (chan >= IPMI_MAX_CHANNELS)
2522 ; /* This shouldn't happen */
2523 else if ((intf->channels[chan].medium
2524 == IPMI_CHANNEL_MEDIUM_8023LAN)
2525 || (intf->channels[chan].medium
2526 == IPMI_CHANNEL_MEDIUM_ASYNC))
2527 intf->sent_lan_command_errs++;
2528 else
2529 intf->sent_ipmb_command_errs++;
2530 spin_unlock_irqrestore(&intf->counter_lock, flags);
2531 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
2532 } else {
2533 /* The message was sent, start the timer. */
2534 intf_start_seq_timer(intf, msg->msgid);
2535 }
2536
2537 ipmi_free_smi_msg(msg);
2538 goto out_unlock;
2539 }
2540
2541 /* To preserve message order, if the list is not empty, we
2542 tack this message onto the end of the list. */
2543 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2544 if (!list_empty(&(intf->waiting_msgs))) {
2545 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2546 spin_unlock(&(intf->waiting_msgs_lock));
2547 goto out_unlock;
2548 }
2549 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2550
2551 rv = handle_new_recv_msg(intf, msg);
2552 if (rv > 0) {
2553 /* Could not handle the message now, just add it to a
2554 list to handle later. */
2555 spin_lock(&(intf->waiting_msgs_lock));
2556 list_add_tail(&(msg->link), &(intf->waiting_msgs));
2557 spin_unlock(&(intf->waiting_msgs_lock));
2558 } else if (rv == 0) {
2559 ipmi_free_smi_msg(msg);
2560 }
2561
2562 out_unlock:
2563 read_unlock(&(intf->users_lock));
2564}
2565
2566void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
2567{
2568 ipmi_user_t user;
2569
2570 read_lock(&(intf->users_lock));
2571 list_for_each_entry(user, &(intf->users), link) {
2572 if (! user->handler->ipmi_watchdog_pretimeout)
2573 continue;
2574
2575 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
2576 }
2577 read_unlock(&(intf->users_lock));
2578}
2579
2580static void
2581handle_msg_timeout(struct ipmi_recv_msg *msg)
2582{
2583 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2584 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
2585 msg->msg.netfn |= 1; /* Convert to a response. */
2586 msg->msg.data_len = 1;
2587 msg->msg.data = msg->msg_data;
2588 deliver_response(msg);
2589}
2590
2591static void
2592send_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2593 struct ipmi_smi_msg *smi_msg,
2594 unsigned char seq, long seqid)
2595{
2596 if (!smi_msg)
2597 smi_msg = ipmi_alloc_smi_msg();
2598 if (!smi_msg)
2599 /* If we can't allocate the message, then just return, we
2600 get 4 retries, so this should be ok. */
2601 return;
2602
2603 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
2604 smi_msg->data_size = recv_msg->msg.data_len;
2605 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
2606
2607 /* Send the new message. We send with a zero priority. It
2608 timed out, I doubt time is that critical now, and high
2609 priority messages are really only for messages to the local
2610 MC, which don't get resent. */
2611 intf->handlers->sender(intf->send_info, smi_msg, 0);
2612
2613#ifdef DEBUG_MSGING
2614 {
2615 int m;
2616 printk("Resend: ");
2617 for (m=0; m<smi_msg->data_size; m++)
2618 printk(" %2.2x", smi_msg->data[m]);
2619 printk("\n");
2620 }
2621#endif
2622}
2623
2624static void
2625ipmi_timeout_handler(long timeout_period)
2626{
2627 ipmi_smi_t intf;
2628 struct list_head timeouts;
2629 struct ipmi_recv_msg *msg, *msg2;
2630 struct ipmi_smi_msg *smi_msg, *smi_msg2;
2631 unsigned long flags;
2632 int i, j;
2633
2634 INIT_LIST_HEAD(&timeouts);
2635
2636 spin_lock(&interfaces_lock);
2637 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2638 intf = ipmi_interfaces[i];
2639 if (intf == NULL)
2640 continue;
2641
2642 read_lock(&(intf->users_lock));
2643
2644 /* See if any waiting messages need to be processed. */
2645 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags);
2646 list_for_each_entry_safe(smi_msg, smi_msg2, &(intf->waiting_msgs), link) {
2647 if (! handle_new_recv_msg(intf, smi_msg)) {
2648 list_del(&smi_msg->link);
2649 ipmi_free_smi_msg(smi_msg);
2650 } else {
2651 /* To preserve message order, quit if we
2652 can't handle a message. */
2653 break;
2654 }
2655 }
2656 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags);
2657
2658 /* Go through the seq table and find any messages that
2659 have timed out, putting them in the timeouts
2660 list. */
2661 spin_lock_irqsave(&(intf->seq_lock), flags);
2662 for (j=0; j<IPMI_IPMB_NUM_SEQ; j++) {
2663 struct seq_table *ent = &(intf->seq_table[j]);
2664 if (!ent->inuse)
2665 continue;
2666
2667 ent->timeout -= timeout_period;
2668 if (ent->timeout > 0)
2669 continue;
2670
2671 if (ent->retries_left == 0) {
2672 /* The message has used all its retries. */
2673 ent->inuse = 0;
2674 msg = ent->recv_msg;
2675 list_add_tail(&(msg->link), &timeouts);
2676 spin_lock(&intf->counter_lock);
2677 if (ent->broadcast)
2678 intf->timed_out_ipmb_broadcasts++;
2679 else if (ent->recv_msg->addr.addr_type
2680 == IPMI_LAN_ADDR_TYPE)
2681 intf->timed_out_lan_commands++;
2682 else
2683 intf->timed_out_ipmb_commands++;
2684 spin_unlock(&intf->counter_lock);
2685 } else {
2686 /* More retries, send again. */
2687
2688 /* Start with the max timer, set to normal
2689 timer after the message is sent. */
2690 ent->timeout = MAX_MSG_TIMEOUT;
2691 ent->retries_left--;
2692 send_from_recv_msg(intf, ent->recv_msg, NULL,
2693 j, ent->seqid);
2694 spin_lock(&intf->counter_lock);
2695 if (ent->recv_msg->addr.addr_type
2696 == IPMI_LAN_ADDR_TYPE)
2697 intf->retransmitted_lan_commands++;
2698 else
2699 intf->retransmitted_ipmb_commands++;
2700 spin_unlock(&intf->counter_lock);
2701 }
2702 }
2703 spin_unlock_irqrestore(&(intf->seq_lock), flags);
2704
2705 list_for_each_entry_safe(msg, msg2, &timeouts, link) {
2706 handle_msg_timeout(msg);
2707 }
2708
2709 read_unlock(&(intf->users_lock));
2710 }
2711 spin_unlock(&interfaces_lock);
2712}
2713
2714static void ipmi_request_event(void)
2715{
2716 ipmi_smi_t intf;
2717 int i;
2718
2719 spin_lock(&interfaces_lock);
2720 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2721 intf = ipmi_interfaces[i];
2722 if (intf == NULL)
2723 continue;
2724
2725 intf->handlers->request_events(intf->send_info);
2726 }
2727 spin_unlock(&interfaces_lock);
2728}
2729
2730static struct timer_list ipmi_timer;
2731
2732/* Call every ~100 ms. */
2733#define IPMI_TIMEOUT_TIME 100
2734
2735/* How many jiffies does it take to get to the timeout time. */
2736#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
2737
2738/* Request events from the queue every second (this is the number of
2739 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
2740 future, IPMI will add a way to know immediately if an event is in
2741 the queue and this silliness can go away. */
2742#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
2743
2744static volatile int stop_operation = 0;
2745static volatile int timer_stopped = 0;
2746static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2747
2748static void ipmi_timeout(unsigned long data)
2749{
2750 if (stop_operation) {
2751 timer_stopped = 1;
2752 return;
2753 }
2754
2755 ticks_to_req_ev--;
2756 if (ticks_to_req_ev == 0) {
2757 ipmi_request_event();
2758 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
2759 }
2760
2761 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
2762
2763 ipmi_timer.expires += IPMI_TIMEOUT_JIFFIES;
2764 add_timer(&ipmi_timer);
2765}
2766
2767
2768static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
2769static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
2770
2771/* FIXME - convert these to slabs. */
2772static void free_smi_msg(struct ipmi_smi_msg *msg)
2773{
2774 atomic_dec(&smi_msg_inuse_count);
2775 kfree(msg);
2776}
2777
2778struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
2779{
2780 struct ipmi_smi_msg *rv;
2781 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
2782 if (rv) {
2783 rv->done = free_smi_msg;
2784 rv->user_data = NULL;
2785 atomic_inc(&smi_msg_inuse_count);
2786 }
2787 return rv;
2788}
2789
2790static void free_recv_msg(struct ipmi_recv_msg *msg)
2791{
2792 atomic_dec(&recv_msg_inuse_count);
2793 kfree(msg);
2794}
2795
2796struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
2797{
2798 struct ipmi_recv_msg *rv;
2799
2800 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
2801 if (rv) {
2802 rv->done = free_recv_msg;
2803 atomic_inc(&recv_msg_inuse_count);
2804 }
2805 return rv;
2806}
2807
2808#ifdef CONFIG_IPMI_PANIC_EVENT
2809
2810static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
2811{
2812}
2813
2814static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
2815{
2816}
2817
2818#ifdef CONFIG_IPMI_PANIC_STRING
2819static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
2820{
2821 if ((msg->rsp[0] == (IPMI_NETFN_SENSOR_EVENT_RESPONSE << 2))
2822 && (msg->rsp[1] == IPMI_GET_EVENT_RECEIVER_CMD)
2823 && (msg->rsp[2] == IPMI_CC_NO_ERROR))
2824 {
2825 /* A get event receiver command, save it. */
2826 intf->event_receiver = msg->rsp[3];
2827 intf->event_receiver_lun = msg->rsp[4] & 0x3;
2828 }
2829}
2830
2831static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_smi_msg *msg)
2832{
2833 if ((msg->rsp[0] == (IPMI_NETFN_APP_RESPONSE << 2))
2834 && (msg->rsp[1] == IPMI_GET_DEVICE_ID_CMD)
2835 && (msg->rsp[2] == IPMI_CC_NO_ERROR))
2836 {
2837 /* A get device id command, save if we are an event
2838 receiver or generator. */
2839 intf->local_sel_device = (msg->rsp[8] >> 2) & 1;
2840 intf->local_event_generator = (msg->rsp[8] >> 5) & 1;
2841 }
2842}
2843#endif
2844
2845static void send_panic_events(char *str)
2846{
2847 struct kernel_ipmi_msg msg;
2848 ipmi_smi_t intf;
2849 unsigned char data[16];
2850 int i;
2851 struct ipmi_system_interface_addr *si;
2852 struct ipmi_addr addr;
2853 struct ipmi_smi_msg smi_msg;
2854 struct ipmi_recv_msg recv_msg;
2855
2856 si = (struct ipmi_system_interface_addr *) &addr;
2857 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2858 si->channel = IPMI_BMC_CHANNEL;
2859 si->lun = 0;
2860
2861 /* Fill in an event telling that we have failed. */
2862 msg.netfn = 0x04; /* Sensor or Event. */
2863 msg.cmd = 2; /* Platform event command. */
2864 msg.data = data;
2865 msg.data_len = 8;
2866 data[0] = 0x21; /* Kernel generator ID, IPMI table 5-4 */
2867 data[1] = 0x03; /* This is for IPMI 1.0. */
2868 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
2869 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
2870 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
2871
2872 /* Put a few breadcrumbs in. Hopefully later we can add more things
2873 to make the panic events more useful. */
2874 if (str) {
2875 data[3] = str[0];
2876 data[6] = str[1];
2877 data[7] = str[2];
2878 }
2879
2880 smi_msg.done = dummy_smi_done_handler;
2881 recv_msg.done = dummy_recv_done_handler;
2882
2883 /* For every registered interface, send the event. */
2884 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2885 intf = ipmi_interfaces[i];
2886 if (intf == NULL)
2887 continue;
2888
2889 /* Send the event announcing the panic. */
2890 intf->handlers->set_run_to_completion(intf->send_info, 1);
2891 i_ipmi_request(NULL,
2892 intf,
2893 &addr,
2894 0,
2895 &msg,
2896 NULL,
2897 &smi_msg,
2898 &recv_msg,
2899 0,
2900 intf->my_address,
2901 intf->my_lun,
2902 0, 1); /* Don't retry, and don't wait. */
2903 }
2904
2905#ifdef CONFIG_IPMI_PANIC_STRING
2906 /* On every interface, dump a bunch of OEM event holding the
2907 string. */
2908 if (!str)
2909 return;
2910
2911 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
2912 char *p = str;
2913 struct ipmi_ipmb_addr *ipmb;
2914 int j;
2915
2916 intf = ipmi_interfaces[i];
2917 if (intf == NULL)
2918 continue;
2919
2920 /* First job here is to figure out where to send the
2921 OEM events. There's no way in IPMI to send OEM
2922 events using an event send command, so we have to
2923 find the SEL to put them in and stick them in
2924 there. */
2925
2926 /* Get capabilities from the get device id. */
2927 intf->local_sel_device = 0;
2928 intf->local_event_generator = 0;
2929 intf->event_receiver = 0;
2930
2931 /* Request the device info from the local MC. */
2932 msg.netfn = IPMI_NETFN_APP_REQUEST;
2933 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2934 msg.data = NULL;
2935 msg.data_len = 0;
2936 intf->null_user_handler = device_id_fetcher;
2937 i_ipmi_request(NULL,
2938 intf,
2939 &addr,
2940 0,
2941 &msg,
2942 NULL,
2943 &smi_msg,
2944 &recv_msg,
2945 0,
2946 intf->my_address,
2947 intf->my_lun,
2948 0, 1); /* Don't retry, and don't wait. */
2949
2950 if (intf->local_event_generator) {
2951 /* Request the event receiver from the local MC. */
2952 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
2953 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
2954 msg.data = NULL;
2955 msg.data_len = 0;
2956 intf->null_user_handler = event_receiver_fetcher;
2957 i_ipmi_request(NULL,
2958 intf,
2959 &addr,
2960 0,
2961 &msg,
2962 NULL,
2963 &smi_msg,
2964 &recv_msg,
2965 0,
2966 intf->my_address,
2967 intf->my_lun,
2968 0, 1); /* no retry, and no wait. */
2969 }
2970 intf->null_user_handler = NULL;
2971
2972 /* Validate the event receiver. The low bit must not
2973 be 1 (it must be a valid IPMB address), it cannot
2974 be zero, and it must not be my address. */
2975 if (((intf->event_receiver & 1) == 0)
2976 && (intf->event_receiver != 0)
2977 && (intf->event_receiver != intf->my_address))
2978 {
2979 /* The event receiver is valid, send an IPMB
2980 message. */
2981 ipmb = (struct ipmi_ipmb_addr *) &addr;
2982 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
2983 ipmb->channel = 0; /* FIXME - is this right? */
2984 ipmb->lun = intf->event_receiver_lun;
2985 ipmb->slave_addr = intf->event_receiver;
2986 } else if (intf->local_sel_device) {
2987 /* The event receiver was not valid (or was
2988 me), but I am an SEL device, just dump it
2989 in my SEL. */
2990 si = (struct ipmi_system_interface_addr *) &addr;
2991 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2992 si->channel = IPMI_BMC_CHANNEL;
2993 si->lun = 0;
2994 } else
2995 continue; /* No where to send the event. */
2996
2997
2998 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
2999 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
3000 msg.data = data;
3001 msg.data_len = 16;
3002
3003 j = 0;
3004 while (*p) {
3005 int size = strlen(p);
3006
3007 if (size > 11)
3008 size = 11;
3009 data[0] = 0;
3010 data[1] = 0;
3011 data[2] = 0xf0; /* OEM event without timestamp. */
3012 data[3] = intf->my_address;
3013 data[4] = j++; /* sequence # */
3014 /* Always give 11 bytes, so strncpy will fill
3015 it with zeroes for me. */
3016 strncpy(data+5, p, 11);
3017 p += size;
3018
3019 i_ipmi_request(NULL,
3020 intf,
3021 &addr,
3022 0,
3023 &msg,
3024 NULL,
3025 &smi_msg,
3026 &recv_msg,
3027 0,
3028 intf->my_address,
3029 intf->my_lun,
3030 0, 1); /* no retry, and no wait. */
3031 }
3032 }
3033#endif /* CONFIG_IPMI_PANIC_STRING */
3034}
3035#endif /* CONFIG_IPMI_PANIC_EVENT */
3036
3037static int has_paniced = 0;
3038
3039static int panic_event(struct notifier_block *this,
3040 unsigned long event,
3041 void *ptr)
3042{
3043 int i;
3044 ipmi_smi_t intf;
3045
3046 if (has_paniced)
3047 return NOTIFY_DONE;
3048 has_paniced = 1;
3049
3050 /* For every registered interface, set it to run to completion. */
3051 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
3052 intf = ipmi_interfaces[i];
3053 if (intf == NULL)
3054 continue;
3055
3056 intf->handlers->set_run_to_completion(intf->send_info, 1);
3057 }
3058
3059#ifdef CONFIG_IPMI_PANIC_EVENT
3060 send_panic_events(ptr);
3061#endif
3062
3063 return NOTIFY_DONE;
3064}
3065
3066static struct notifier_block panic_block = {
3067 .notifier_call = panic_event,
3068 .next = NULL,
3069 .priority = 200 /* priority: INT_MAX >= x >= 0 */
3070};
3071
3072static int ipmi_init_msghandler(void)
3073{
3074 int i;
3075
3076 if (initialized)
3077 return 0;
3078
3079 printk(KERN_INFO "ipmi message handler version "
3080 IPMI_MSGHANDLER_VERSION "\n");
3081
3082 for (i=0; i<MAX_IPMI_INTERFACES; i++) {
3083 ipmi_interfaces[i] = NULL;
3084 }
3085
3086 proc_ipmi_root = proc_mkdir("ipmi", NULL);
3087 if (!proc_ipmi_root) {
3088 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
3089 return -ENOMEM;
3090 }
3091
3092 proc_ipmi_root->owner = THIS_MODULE;
3093
3094 init_timer(&ipmi_timer);
3095 ipmi_timer.data = 0;
3096 ipmi_timer.function = ipmi_timeout;
3097 ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
3098 add_timer(&ipmi_timer);
3099
3100 notifier_chain_register(&panic_notifier_list, &panic_block);
3101
3102 initialized = 1;
3103
3104 return 0;
3105}
3106
3107static __init int ipmi_init_msghandler_mod(void)
3108{
3109 ipmi_init_msghandler();
3110 return 0;
3111}
3112
3113static __exit void cleanup_ipmi(void)
3114{
3115 int count;
3116
3117 if (!initialized)
3118 return;
3119
3120 notifier_chain_unregister(&panic_notifier_list, &panic_block);
3121
3122 /* This can't be called if any interfaces exist, so no worry about
3123 shutting down the interfaces. */
3124
3125 /* Tell the timer to stop, then wait for it to stop. This avoids
3126 problems with race conditions removing the timer here. */
3127 stop_operation = 1;
3128 while (!timer_stopped) {
3129 set_current_state(TASK_UNINTERRUPTIBLE);
3130 schedule_timeout(1);
3131 }
3132
3133 remove_proc_entry(proc_ipmi_root->name, &proc_root);
3134
3135 initialized = 0;
3136
3137 /* Check for buffer leaks. */
3138 count = atomic_read(&smi_msg_inuse_count);
3139 if (count != 0)
3140 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
3141 count);
3142 count = atomic_read(&recv_msg_inuse_count);
3143 if (count != 0)
3144 printk(KERN_WARNING PFX "recv message count %d at exit\n",
3145 count);
3146}
3147module_exit(cleanup_ipmi);
3148
3149module_init(ipmi_init_msghandler_mod);
3150MODULE_LICENSE("GPL");
3151
3152EXPORT_SYMBOL(ipmi_create_user);
3153EXPORT_SYMBOL(ipmi_destroy_user);
3154EXPORT_SYMBOL(ipmi_get_version);
3155EXPORT_SYMBOL(ipmi_request_settime);
3156EXPORT_SYMBOL(ipmi_request_supply_msgs);
3157EXPORT_SYMBOL(ipmi_register_smi);
3158EXPORT_SYMBOL(ipmi_unregister_smi);
3159EXPORT_SYMBOL(ipmi_register_for_cmd);
3160EXPORT_SYMBOL(ipmi_unregister_for_cmd);
3161EXPORT_SYMBOL(ipmi_smi_msg_received);
3162EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3163EXPORT_SYMBOL(ipmi_alloc_smi_msg);
3164EXPORT_SYMBOL(ipmi_addr_length);
3165EXPORT_SYMBOL(ipmi_validate_addr);
3166EXPORT_SYMBOL(ipmi_set_gets_events);
3167EXPORT_SYMBOL(ipmi_smi_watcher_register);
3168EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
3169EXPORT_SYMBOL(ipmi_set_my_address);
3170EXPORT_SYMBOL(ipmi_get_my_address);
3171EXPORT_SYMBOL(ipmi_set_my_LUN);
3172EXPORT_SYMBOL(ipmi_get_my_LUN);
3173EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3174EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
new file mode 100644
index 000000000000..cb5cdc6f14bf
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -0,0 +1,549 @@
1/*
2 * ipmi_poweroff.c
3 *
4 * MontaVista IPMI Poweroff extension to sys_reboot
5 *
6 * Author: MontaVista Software, Inc.
7 * Steven Dake <sdake@mvista.com>
8 * Corey Minyard <cminyard@mvista.com>
9 * source@mvista.com
10 *
11 * Copyright 2002,2004 MontaVista Software Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 *
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34#include <asm/semaphore.h>
35#include <linux/kdev_t.h>
36#include <linux/module.h>
37#include <linux/string.h>
38#include <linux/ipmi.h>
39#include <linux/ipmi_smi.h>
40
41#define PFX "IPMI poweroff: "
42#define IPMI_POWEROFF_VERSION "v33"
43
44/* Where to we insert our poweroff function? */
45extern void (*pm_power_off)(void);
46
47/* Stuff from the get device id command. */
48static unsigned int mfg_id;
49static unsigned int prod_id;
50static unsigned char capabilities;
51
52/* We use our own messages for this operation, we don't let the system
53 allocate them, since we may be in a panic situation. The whole
54 thing is single-threaded, anyway, so multiple messages are not
55 required. */
56static void dummy_smi_free(struct ipmi_smi_msg *msg)
57{
58}
59static void dummy_recv_free(struct ipmi_recv_msg *msg)
60{
61}
62static struct ipmi_smi_msg halt_smi_msg =
63{
64 .done = dummy_smi_free
65};
66static struct ipmi_recv_msg halt_recv_msg =
67{
68 .done = dummy_recv_free
69};
70
71
72/*
73 * Code to send a message and wait for the reponse.
74 */
75
76static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
77{
78 struct semaphore *sem = recv_msg->user_msg_data;
79
80 if (sem)
81 up(sem);
82}
83
84static struct ipmi_user_hndl ipmi_poweroff_handler =
85{
86 .ipmi_recv_hndl = receive_handler
87};
88
89
90static int ipmi_request_wait_for_response(ipmi_user_t user,
91 struct ipmi_addr *addr,
92 struct kernel_ipmi_msg *send_msg)
93{
94 int rv;
95 struct semaphore sem;
96
97 sema_init (&sem, 0);
98
99 rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &sem,
100 &halt_smi_msg, &halt_recv_msg, 0);
101 if (rv)
102 return rv;
103
104 down (&sem);
105
106 return halt_recv_msg.msg.data[0];
107}
108
109/* We are in run-to-completion mode, no semaphore is desired. */
110static int ipmi_request_in_rc_mode(ipmi_user_t user,
111 struct ipmi_addr *addr,
112 struct kernel_ipmi_msg *send_msg)
113{
114 int rv;
115
116 rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
117 &halt_smi_msg, &halt_recv_msg, 0);
118 if (rv)
119 return rv;
120
121 return halt_recv_msg.msg.data[0];
122}
123
124/*
125 * ATCA Support
126 */
127
128#define IPMI_NETFN_ATCA 0x2c
129#define IPMI_ATCA_SET_POWER_CMD 0x11
130#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
131#define IPMI_PICMG_ID 0
132
133static int ipmi_atca_detect (ipmi_user_t user)
134{
135 struct ipmi_system_interface_addr smi_addr;
136 struct kernel_ipmi_msg send_msg;
137 int rv;
138 unsigned char data[1];
139
140 /*
141 * Configure IPMI address for local access
142 */
143 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
144 smi_addr.channel = IPMI_BMC_CHANNEL;
145 smi_addr.lun = 0;
146
147 /*
148 * Use get address info to check and see if we are ATCA
149 */
150 send_msg.netfn = IPMI_NETFN_ATCA;
151 send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
152 data[0] = IPMI_PICMG_ID;
153 send_msg.data = data;
154 send_msg.data_len = sizeof(data);
155 rv = ipmi_request_wait_for_response(user,
156 (struct ipmi_addr *) &smi_addr,
157 &send_msg);
158 return !rv;
159}
160
161static void ipmi_poweroff_atca (ipmi_user_t user)
162{
163 struct ipmi_system_interface_addr smi_addr;
164 struct kernel_ipmi_msg send_msg;
165 int rv;
166 unsigned char data[4];
167
168 /*
169 * Configure IPMI address for local access
170 */
171 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
172 smi_addr.channel = IPMI_BMC_CHANNEL;
173 smi_addr.lun = 0;
174
175 printk(KERN_INFO PFX "Powering down via ATCA power command\n");
176
177 /*
178 * Power down
179 */
180 send_msg.netfn = IPMI_NETFN_ATCA;
181 send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
182 data[0] = IPMI_PICMG_ID;
183 data[1] = 0; /* FRU id */
184 data[2] = 0; /* Power Level */
185 data[3] = 0; /* Don't change saved presets */
186 send_msg.data = data;
187 send_msg.data_len = sizeof (data);
188 rv = ipmi_request_in_rc_mode(user,
189 (struct ipmi_addr *) &smi_addr,
190 &send_msg);
191 if (rv) {
192 printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
193 " IPMI error 0x%x\n", rv);
194 goto out;
195 }
196
197 out:
198 return;
199}
200
201/*
202 * CPI1 Support
203 */
204
205#define IPMI_NETFN_OEM_1 0xf8
206#define OEM_GRP_CMD_SET_RESET_STATE 0x84
207#define OEM_GRP_CMD_SET_POWER_STATE 0x82
208#define IPMI_NETFN_OEM_8 0xf8
209#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80
210#define OEM_GRP_CMD_GET_SLOT_GA 0xa3
211#define IPMI_NETFN_SENSOR_EVT 0x10
212#define IPMI_CMD_GET_EVENT_RECEIVER 0x01
213
214#define IPMI_CPI1_PRODUCT_ID 0x000157
215#define IPMI_CPI1_MANUFACTURER_ID 0x0108
216
217static int ipmi_cpi1_detect (ipmi_user_t user)
218{
219 return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
220 && (prod_id == IPMI_CPI1_PRODUCT_ID));
221}
222
223static void ipmi_poweroff_cpi1 (ipmi_user_t user)
224{
225 struct ipmi_system_interface_addr smi_addr;
226 struct ipmi_ipmb_addr ipmb_addr;
227 struct kernel_ipmi_msg send_msg;
228 int rv;
229 unsigned char data[1];
230 int slot;
231 unsigned char hotswap_ipmb;
232 unsigned char aer_addr;
233 unsigned char aer_lun;
234
235 /*
236 * Configure IPMI address for local access
237 */
238 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
239 smi_addr.channel = IPMI_BMC_CHANNEL;
240 smi_addr.lun = 0;
241
242 printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
243
244 /*
245 * Get IPMI ipmb address
246 */
247 send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
248 send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
249 send_msg.data = NULL;
250 send_msg.data_len = 0;
251 rv = ipmi_request_in_rc_mode(user,
252 (struct ipmi_addr *) &smi_addr,
253 &send_msg);
254 if (rv)
255 goto out;
256 slot = halt_recv_msg.msg.data[1];
257 hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
258
259 /*
260 * Get active event receiver
261 */
262 send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
263 send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
264 send_msg.data = NULL;
265 send_msg.data_len = 0;
266 rv = ipmi_request_in_rc_mode(user,
267 (struct ipmi_addr *) &smi_addr,
268 &send_msg);
269 if (rv)
270 goto out;
271 aer_addr = halt_recv_msg.msg.data[1];
272 aer_lun = halt_recv_msg.msg.data[2];
273
274 /*
275 * Setup IPMB address target instead of local target
276 */
277 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
278 ipmb_addr.channel = 0;
279 ipmb_addr.slave_addr = aer_addr;
280 ipmb_addr.lun = aer_lun;
281
282 /*
283 * Send request hotswap control to remove blade from dpv
284 */
285 send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
286 send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
287 send_msg.data = &hotswap_ipmb;
288 send_msg.data_len = 1;
289 ipmi_request_in_rc_mode(user,
290 (struct ipmi_addr *) &ipmb_addr,
291 &send_msg);
292
293 /*
294 * Set reset asserted
295 */
296 send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
297 send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
298 send_msg.data = data;
299 data[0] = 1; /* Reset asserted state */
300 send_msg.data_len = 1;
301 rv = ipmi_request_in_rc_mode(user,
302 (struct ipmi_addr *) &smi_addr,
303 &send_msg);
304 if (rv)
305 goto out;
306
307 /*
308 * Power down
309 */
310 send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
311 send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
312 send_msg.data = data;
313 data[0] = 1; /* Power down state */
314 send_msg.data_len = 1;
315 rv = ipmi_request_in_rc_mode(user,
316 (struct ipmi_addr *) &smi_addr,
317 &send_msg);
318 if (rv)
319 goto out;
320
321 out:
322 return;
323}
324
325/*
326 * Standard chassis support
327 */
328
329#define IPMI_NETFN_CHASSIS_REQUEST 0
330#define IPMI_CHASSIS_CONTROL_CMD 0x02
331
332static int ipmi_chassis_detect (ipmi_user_t user)
333{
334 /* Chassis support, use it. */
335 return (capabilities & 0x80);
336}
337
338static void ipmi_poweroff_chassis (ipmi_user_t user)
339{
340 struct ipmi_system_interface_addr smi_addr;
341 struct kernel_ipmi_msg send_msg;
342 int rv;
343 unsigned char data[1];
344
345 /*
346 * Configure IPMI address for local access
347 */
348 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
349 smi_addr.channel = IPMI_BMC_CHANNEL;
350 smi_addr.lun = 0;
351
352 printk(KERN_INFO PFX "Powering down via IPMI chassis control command\n");
353
354 /*
355 * Power down
356 */
357 send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
358 send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
359 data[0] = 0; /* Power down */
360 send_msg.data = data;
361 send_msg.data_len = sizeof(data);
362 rv = ipmi_request_in_rc_mode(user,
363 (struct ipmi_addr *) &smi_addr,
364 &send_msg);
365 if (rv) {
366 printk(KERN_ERR PFX "Unable to send chassis powerdown message,"
367 " IPMI error 0x%x\n", rv);
368 goto out;
369 }
370
371 out:
372 return;
373}
374
375
376/* Table of possible power off functions. */
377struct poweroff_function {
378 char *platform_type;
379 int (*detect)(ipmi_user_t user);
380 void (*poweroff_func)(ipmi_user_t user);
381};
382
383static struct poweroff_function poweroff_functions[] = {
384 { .platform_type = "ATCA",
385 .detect = ipmi_atca_detect,
386 .poweroff_func = ipmi_poweroff_atca },
387 { .platform_type = "CPI1",
388 .detect = ipmi_cpi1_detect,
389 .poweroff_func = ipmi_poweroff_cpi1 },
390 /* Chassis should generally be last, other things should override
391 it. */
392 { .platform_type = "chassis",
393 .detect = ipmi_chassis_detect,
394 .poweroff_func = ipmi_poweroff_chassis },
395};
396#define NUM_PO_FUNCS (sizeof(poweroff_functions) \
397 / sizeof(struct poweroff_function))
398
399
400/* Our local state. */
401static int ready = 0;
402static ipmi_user_t ipmi_user;
403static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
404
405/* Holds the old poweroff function so we can restore it on removal. */
406static void (*old_poweroff_func)(void);
407
408
409/* Called on a powerdown request. */
410static void ipmi_poweroff_function (void)
411{
412 if (!ready)
413 return;
414
415 /* Use run-to-completion mode, since interrupts may be off. */
416 ipmi_user_set_run_to_completion(ipmi_user, 1);
417 specific_poweroff_func(ipmi_user);
418 ipmi_user_set_run_to_completion(ipmi_user, 0);
419}
420
421/* Wait for an IPMI interface to be installed, the first one installed
422 will be grabbed by this code and used to perform the powerdown. */
423static void ipmi_po_new_smi(int if_num)
424{
425 struct ipmi_system_interface_addr smi_addr;
426 struct kernel_ipmi_msg send_msg;
427 int rv;
428 int i;
429
430 if (ready)
431 return;
432
433 rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, &ipmi_user);
434 if (rv) {
435 printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
436 rv);
437 return;
438 }
439
440 /*
441 * Do a get device ide and store some results, since this is
442 * used by several functions.
443 */
444 smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
445 smi_addr.channel = IPMI_BMC_CHANNEL;
446 smi_addr.lun = 0;
447
448 send_msg.netfn = IPMI_NETFN_APP_REQUEST;
449 send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
450 send_msg.data = NULL;
451 send_msg.data_len = 0;
452 rv = ipmi_request_wait_for_response(ipmi_user,
453 (struct ipmi_addr *) &smi_addr,
454 &send_msg);
455 if (rv) {
456 printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
457 " IPMI error 0x%x\n", rv);
458 goto out_err;
459 }
460
461 if (halt_recv_msg.msg.data_len < 12) {
462 printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
463 " short, was %d bytes, needed %d bytes\n",
464 halt_recv_msg.msg.data_len, 12);
465 goto out_err;
466 }
467
468 mfg_id = (halt_recv_msg.msg.data[7]
469 | (halt_recv_msg.msg.data[8] << 8)
470 | (halt_recv_msg.msg.data[9] << 16));
471 prod_id = (halt_recv_msg.msg.data[10]
472 | (halt_recv_msg.msg.data[11] << 8));
473 capabilities = halt_recv_msg.msg.data[6];
474
475
476 /* Scan for a poweroff method */
477 for (i=0; i<NUM_PO_FUNCS; i++) {
478 if (poweroff_functions[i].detect(ipmi_user))
479 goto found;
480 }
481
482 out_err:
483 printk(KERN_ERR PFX "Unable to find a poweroff function that"
484 " will work, giving up\n");
485 ipmi_destroy_user(ipmi_user);
486 return;
487
488 found:
489 printk(KERN_INFO PFX "Found a %s style poweroff function\n",
490 poweroff_functions[i].platform_type);
491 specific_poweroff_func = poweroff_functions[i].poweroff_func;
492 old_poweroff_func = pm_power_off;
493 pm_power_off = ipmi_poweroff_function;
494 ready = 1;
495}
496
497static void ipmi_po_smi_gone(int if_num)
498{
499 /* This can never be called, because once poweroff driver is
500 registered, the interface can't go away until the power
501 driver is unregistered. */
502}
503
504static struct ipmi_smi_watcher smi_watcher =
505{
506 .owner = THIS_MODULE,
507 .new_smi = ipmi_po_new_smi,
508 .smi_gone = ipmi_po_smi_gone
509};
510
511
512/*
513 * Startup and shutdown functions.
514 */
515static int ipmi_poweroff_init (void)
516{
517 int rv;
518
519 printk ("Copyright (C) 2004 MontaVista Software -"
520 " IPMI Powerdown via sys_reboot version "
521 IPMI_POWEROFF_VERSION ".\n");
522
523 rv = ipmi_smi_watcher_register(&smi_watcher);
524 if (rv)
525 printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
526
527 return rv;
528}
529
530#ifdef MODULE
531static __exit void ipmi_poweroff_cleanup(void)
532{
533 int rv;
534
535 ipmi_smi_watcher_unregister(&smi_watcher);
536
537 if (ready) {
538 rv = ipmi_destroy_user(ipmi_user);
539 if (rv)
540 printk(KERN_ERR PFX "could not cleanup the IPMI"
541 " user: 0x%x\n", rv);
542 pm_power_off = old_poweroff_func;
543 }
544}
545module_exit(ipmi_poweroff_cleanup);
546#endif
547
548module_init(ipmi_poweroff_init);
549MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
new file mode 100644
index 000000000000..29de259a981e
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -0,0 +1,2359 @@
1/*
2 * ipmi_si.c
3 *
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
6 *
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 *
19 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
20 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
27 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
28 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 *
30 * You should have received a copy of the GNU General Public License along
31 * with this program; if not, write to the Free Software Foundation, Inc.,
32 * 675 Mass Ave, Cambridge, MA 02139, USA.
33 */
34
35/*
36 * This file holds the "policy" for the interface to the SMI state
37 * machine. It does the configuration, handles timers and interrupts,
38 * and drives the real SMI state machine.
39 */
40
41#include <linux/config.h>
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <asm/system.h>
45#include <linux/sched.h>
46#include <linux/timer.h>
47#include <linux/errno.h>
48#include <linux/spinlock.h>
49#include <linux/slab.h>
50#include <linux/delay.h>
51#include <linux/list.h>
52#include <linux/pci.h>
53#include <linux/ioport.h>
54#include <asm/irq.h>
55#ifdef CONFIG_HIGH_RES_TIMERS
56#include <linux/hrtime.h>
57# if defined(schedule_next_int)
58/* Old high-res timer code, do translations. */
59# define get_arch_cycles(a) quick_update_jiffies_sub(a)
60# define arch_cycles_per_jiffy cycles_per_jiffies
61# endif
62static inline void add_usec_to_timer(struct timer_list *t, long v)
63{
64 t->sub_expires += nsec_to_arch_cycle(v * 1000);
65 while (t->sub_expires >= arch_cycles_per_jiffy)
66 {
67 t->expires++;
68 t->sub_expires -= arch_cycles_per_jiffy;
69 }
70}
71#endif
72#include <linux/interrupt.h>
73#include <linux/rcupdate.h>
74#include <linux/ipmi_smi.h>
75#include <asm/io.h>
76#include "ipmi_si_sm.h"
77#include <linux/init.h>
78
79#define IPMI_SI_VERSION "v33"
80
81/* Measure times between events in the driver. */
82#undef DEBUG_TIMING
83
84/* Call every 10 ms. */
85#define SI_TIMEOUT_TIME_USEC 10000
86#define SI_USEC_PER_JIFFY (1000000/HZ)
87#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
88#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
89 short timeout */
90
91enum si_intf_state {
92 SI_NORMAL,
93 SI_GETTING_FLAGS,
94 SI_GETTING_EVENTS,
95 SI_CLEARING_FLAGS,
96 SI_CLEARING_FLAGS_THEN_SET_IRQ,
97 SI_GETTING_MESSAGES,
98 SI_ENABLE_INTERRUPTS1,
99 SI_ENABLE_INTERRUPTS2
100 /* FIXME - add watchdog stuff. */
101};
102
103enum si_type {
104 SI_KCS, SI_SMIC, SI_BT
105};
106
107struct smi_info
108{
109 ipmi_smi_t intf;
110 struct si_sm_data *si_sm;
111 struct si_sm_handlers *handlers;
112 enum si_type si_type;
113 spinlock_t si_lock;
114 spinlock_t msg_lock;
115 struct list_head xmit_msgs;
116 struct list_head hp_xmit_msgs;
117 struct ipmi_smi_msg *curr_msg;
118 enum si_intf_state si_state;
119
120 /* Used to handle the various types of I/O that can occur with
121 IPMI */
122 struct si_sm_io io;
123 int (*io_setup)(struct smi_info *info);
124 void (*io_cleanup)(struct smi_info *info);
125 int (*irq_setup)(struct smi_info *info);
126 void (*irq_cleanup)(struct smi_info *info);
127 unsigned int io_size;
128
129 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
130 is set to hold the flags until we are done handling everything
131 from the flags. */
132#define RECEIVE_MSG_AVAIL 0x01
133#define EVENT_MSG_BUFFER_FULL 0x02
134#define WDT_PRE_TIMEOUT_INT 0x08
135 unsigned char msg_flags;
136
137 /* If set to true, this will request events the next time the
138 state machine is idle. */
139 atomic_t req_events;
140
141 /* If true, run the state machine to completion on every send
142 call. Generally used after a panic to make sure stuff goes
143 out. */
144 int run_to_completion;
145
146 /* The I/O port of an SI interface. */
147 int port;
148
149 /* The space between start addresses of the two ports. For
150 instance, if the first port is 0xca2 and the spacing is 4, then
151 the second port is 0xca6. */
152 unsigned int spacing;
153
154 /* zero if no irq; */
155 int irq;
156
157 /* The timer for this si. */
158 struct timer_list si_timer;
159
160 /* The time (in jiffies) the last timeout occurred at. */
161 unsigned long last_timeout_jiffies;
162
163 /* Used to gracefully stop the timer without race conditions. */
164 volatile int stop_operation;
165 volatile int timer_stopped;
166
167 /* The driver will disable interrupts when it gets into a
168 situation where it cannot handle messages due to lack of
169 memory. Once that situation clears up, it will re-enable
170 interrupts. */
171 int interrupt_disabled;
172
173 unsigned char ipmi_si_dev_rev;
174 unsigned char ipmi_si_fw_rev_major;
175 unsigned char ipmi_si_fw_rev_minor;
176 unsigned char ipmi_version_major;
177 unsigned char ipmi_version_minor;
178
179 /* Slave address, could be reported from DMI. */
180 unsigned char slave_addr;
181
182 /* Counters and things for the proc filesystem. */
183 spinlock_t count_lock;
184 unsigned long short_timeouts;
185 unsigned long long_timeouts;
186 unsigned long timeout_restarts;
187 unsigned long idles;
188 unsigned long interrupts;
189 unsigned long attentions;
190 unsigned long flag_fetches;
191 unsigned long hosed_count;
192 unsigned long complete_transactions;
193 unsigned long events;
194 unsigned long watchdog_pretimeouts;
195 unsigned long incoming_messages;
196};
197
198static void si_restart_short_timer(struct smi_info *smi_info);
199
200static void deliver_recv_msg(struct smi_info *smi_info,
201 struct ipmi_smi_msg *msg)
202{
203 /* Deliver the message to the upper layer with the lock
204 released. */
205 spin_unlock(&(smi_info->si_lock));
206 ipmi_smi_msg_received(smi_info->intf, msg);
207 spin_lock(&(smi_info->si_lock));
208}
209
210static void return_hosed_msg(struct smi_info *smi_info)
211{
212 struct ipmi_smi_msg *msg = smi_info->curr_msg;
213
214 /* Make it a reponse */
215 msg->rsp[0] = msg->data[0] | 4;
216 msg->rsp[1] = msg->data[1];
217 msg->rsp[2] = 0xFF; /* Unknown error. */
218 msg->rsp_size = 3;
219
220 smi_info->curr_msg = NULL;
221 deliver_recv_msg(smi_info, msg);
222}
223
224static enum si_sm_result start_next_msg(struct smi_info *smi_info)
225{
226 int rv;
227 struct list_head *entry = NULL;
228#ifdef DEBUG_TIMING
229 struct timeval t;
230#endif
231
232 /* No need to save flags, we aleady have interrupts off and we
233 already hold the SMI lock. */
234 spin_lock(&(smi_info->msg_lock));
235
236 /* Pick the high priority queue first. */
237 if (! list_empty(&(smi_info->hp_xmit_msgs))) {
238 entry = smi_info->hp_xmit_msgs.next;
239 } else if (! list_empty(&(smi_info->xmit_msgs))) {
240 entry = smi_info->xmit_msgs.next;
241 }
242
243 if (!entry) {
244 smi_info->curr_msg = NULL;
245 rv = SI_SM_IDLE;
246 } else {
247 int err;
248
249 list_del(entry);
250 smi_info->curr_msg = list_entry(entry,
251 struct ipmi_smi_msg,
252 link);
253#ifdef DEBUG_TIMING
254 do_gettimeofday(&t);
255 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
256#endif
257 err = smi_info->handlers->start_transaction(
258 smi_info->si_sm,
259 smi_info->curr_msg->data,
260 smi_info->curr_msg->data_size);
261 if (err) {
262 return_hosed_msg(smi_info);
263 }
264
265 rv = SI_SM_CALL_WITHOUT_DELAY;
266 }
267 spin_unlock(&(smi_info->msg_lock));
268
269 return rv;
270}
271
272static void start_enable_irq(struct smi_info *smi_info)
273{
274 unsigned char msg[2];
275
276 /* If we are enabling interrupts, we have to tell the
277 BMC to use them. */
278 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
279 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
280
281 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
282 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
283}
284
285static void start_clear_flags(struct smi_info *smi_info)
286{
287 unsigned char msg[3];
288
289 /* Make sure the watchdog pre-timeout flag is not set at startup. */
290 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
291 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
292 msg[2] = WDT_PRE_TIMEOUT_INT;
293
294 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
295 smi_info->si_state = SI_CLEARING_FLAGS;
296}
297
298/* When we have a situtaion where we run out of memory and cannot
299 allocate messages, we just leave them in the BMC and run the system
300 polled until we can allocate some memory. Once we have some
301 memory, we will re-enable the interrupt. */
302static inline void disable_si_irq(struct smi_info *smi_info)
303{
304 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
305 disable_irq_nosync(smi_info->irq);
306 smi_info->interrupt_disabled = 1;
307 }
308}
309
310static inline void enable_si_irq(struct smi_info *smi_info)
311{
312 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
313 enable_irq(smi_info->irq);
314 smi_info->interrupt_disabled = 0;
315 }
316}
317
318static void handle_flags(struct smi_info *smi_info)
319{
320 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
321 /* Watchdog pre-timeout */
322 spin_lock(&smi_info->count_lock);
323 smi_info->watchdog_pretimeouts++;
324 spin_unlock(&smi_info->count_lock);
325
326 start_clear_flags(smi_info);
327 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
328 spin_unlock(&(smi_info->si_lock));
329 ipmi_smi_watchdog_pretimeout(smi_info->intf);
330 spin_lock(&(smi_info->si_lock));
331 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
332 /* Messages available. */
333 smi_info->curr_msg = ipmi_alloc_smi_msg();
334 if (!smi_info->curr_msg) {
335 disable_si_irq(smi_info);
336 smi_info->si_state = SI_NORMAL;
337 return;
338 }
339 enable_si_irq(smi_info);
340
341 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
342 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
343 smi_info->curr_msg->data_size = 2;
344
345 smi_info->handlers->start_transaction(
346 smi_info->si_sm,
347 smi_info->curr_msg->data,
348 smi_info->curr_msg->data_size);
349 smi_info->si_state = SI_GETTING_MESSAGES;
350 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
351 /* Events available. */
352 smi_info->curr_msg = ipmi_alloc_smi_msg();
353 if (!smi_info->curr_msg) {
354 disable_si_irq(smi_info);
355 smi_info->si_state = SI_NORMAL;
356 return;
357 }
358 enable_si_irq(smi_info);
359
360 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
361 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
362 smi_info->curr_msg->data_size = 2;
363
364 smi_info->handlers->start_transaction(
365 smi_info->si_sm,
366 smi_info->curr_msg->data,
367 smi_info->curr_msg->data_size);
368 smi_info->si_state = SI_GETTING_EVENTS;
369 } else {
370 smi_info->si_state = SI_NORMAL;
371 }
372}
373
374static void handle_transaction_done(struct smi_info *smi_info)
375{
376 struct ipmi_smi_msg *msg;
377#ifdef DEBUG_TIMING
378 struct timeval t;
379
380 do_gettimeofday(&t);
381 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
382#endif
383 switch (smi_info->si_state) {
384 case SI_NORMAL:
385 if (!smi_info->curr_msg)
386 break;
387
388 smi_info->curr_msg->rsp_size
389 = smi_info->handlers->get_result(
390 smi_info->si_sm,
391 smi_info->curr_msg->rsp,
392 IPMI_MAX_MSG_LENGTH);
393
394 /* Do this here becase deliver_recv_msg() releases the
395 lock, and a new message can be put in during the
396 time the lock is released. */
397 msg = smi_info->curr_msg;
398 smi_info->curr_msg = NULL;
399 deliver_recv_msg(smi_info, msg);
400 break;
401
402 case SI_GETTING_FLAGS:
403 {
404 unsigned char msg[4];
405 unsigned int len;
406
407 /* We got the flags from the SMI, now handle them. */
408 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
409 if (msg[2] != 0) {
410 /* Error fetching flags, just give up for
411 now. */
412 smi_info->si_state = SI_NORMAL;
413 } else if (len < 4) {
414 /* Hmm, no flags. That's technically illegal, but
415 don't use uninitialized data. */
416 smi_info->si_state = SI_NORMAL;
417 } else {
418 smi_info->msg_flags = msg[3];
419 handle_flags(smi_info);
420 }
421 break;
422 }
423
424 case SI_CLEARING_FLAGS:
425 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
426 {
427 unsigned char msg[3];
428
429 /* We cleared the flags. */
430 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
431 if (msg[2] != 0) {
432 /* Error clearing flags */
433 printk(KERN_WARNING
434 "ipmi_si: Error clearing flags: %2.2x\n",
435 msg[2]);
436 }
437 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
438 start_enable_irq(smi_info);
439 else
440 smi_info->si_state = SI_NORMAL;
441 break;
442 }
443
444 case SI_GETTING_EVENTS:
445 {
446 smi_info->curr_msg->rsp_size
447 = smi_info->handlers->get_result(
448 smi_info->si_sm,
449 smi_info->curr_msg->rsp,
450 IPMI_MAX_MSG_LENGTH);
451
452 /* Do this here becase deliver_recv_msg() releases the
453 lock, and a new message can be put in during the
454 time the lock is released. */
455 msg = smi_info->curr_msg;
456 smi_info->curr_msg = NULL;
457 if (msg->rsp[2] != 0) {
458 /* Error getting event, probably done. */
459 msg->done(msg);
460
461 /* Take off the event flag. */
462 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
463 handle_flags(smi_info);
464 } else {
465 spin_lock(&smi_info->count_lock);
466 smi_info->events++;
467 spin_unlock(&smi_info->count_lock);
468
469 /* Do this before we deliver the message
470 because delivering the message releases the
471 lock and something else can mess with the
472 state. */
473 handle_flags(smi_info);
474
475 deliver_recv_msg(smi_info, msg);
476 }
477 break;
478 }
479
480 case SI_GETTING_MESSAGES:
481 {
482 smi_info->curr_msg->rsp_size
483 = smi_info->handlers->get_result(
484 smi_info->si_sm,
485 smi_info->curr_msg->rsp,
486 IPMI_MAX_MSG_LENGTH);
487
488 /* Do this here becase deliver_recv_msg() releases the
489 lock, and a new message can be put in during the
490 time the lock is released. */
491 msg = smi_info->curr_msg;
492 smi_info->curr_msg = NULL;
493 if (msg->rsp[2] != 0) {
494 /* Error getting event, probably done. */
495 msg->done(msg);
496
497 /* Take off the msg flag. */
498 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
499 handle_flags(smi_info);
500 } else {
501 spin_lock(&smi_info->count_lock);
502 smi_info->incoming_messages++;
503 spin_unlock(&smi_info->count_lock);
504
505 /* Do this before we deliver the message
506 because delivering the message releases the
507 lock and something else can mess with the
508 state. */
509 handle_flags(smi_info);
510
511 deliver_recv_msg(smi_info, msg);
512 }
513 break;
514 }
515
516 case SI_ENABLE_INTERRUPTS1:
517 {
518 unsigned char msg[4];
519
520 /* We got the flags from the SMI, now handle them. */
521 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
522 if (msg[2] != 0) {
523 printk(KERN_WARNING
524 "ipmi_si: Could not enable interrupts"
525 ", failed get, using polled mode.\n");
526 smi_info->si_state = SI_NORMAL;
527 } else {
528 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
529 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
530 msg[2] = msg[3] | 1; /* enable msg queue int */
531 smi_info->handlers->start_transaction(
532 smi_info->si_sm, msg, 3);
533 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
534 }
535 break;
536 }
537
538 case SI_ENABLE_INTERRUPTS2:
539 {
540 unsigned char msg[4];
541
542 /* We got the flags from the SMI, now handle them. */
543 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
544 if (msg[2] != 0) {
545 printk(KERN_WARNING
546 "ipmi_si: Could not enable interrupts"
547 ", failed set, using polled mode.\n");
548 }
549 smi_info->si_state = SI_NORMAL;
550 break;
551 }
552 }
553}
554
555/* Called on timeouts and events. Timeouts should pass the elapsed
556 time, interrupts should pass in zero. */
557static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
558 int time)
559{
560 enum si_sm_result si_sm_result;
561
562 restart:
563 /* There used to be a loop here that waited a little while
564 (around 25us) before giving up. That turned out to be
565 pointless, the minimum delays I was seeing were in the 300us
566 range, which is far too long to wait in an interrupt. So
567 we just run until the state machine tells us something
568 happened or it needs a delay. */
569 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
570 time = 0;
571 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
572 {
573 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
574 }
575
576 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
577 {
578 spin_lock(&smi_info->count_lock);
579 smi_info->complete_transactions++;
580 spin_unlock(&smi_info->count_lock);
581
582 handle_transaction_done(smi_info);
583 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
584 }
585 else if (si_sm_result == SI_SM_HOSED)
586 {
587 spin_lock(&smi_info->count_lock);
588 smi_info->hosed_count++;
589 spin_unlock(&smi_info->count_lock);
590
591 /* Do the before return_hosed_msg, because that
592 releases the lock. */
593 smi_info->si_state = SI_NORMAL;
594 if (smi_info->curr_msg != NULL) {
595 /* If we were handling a user message, format
596 a response to send to the upper layer to
597 tell it about the error. */
598 return_hosed_msg(smi_info);
599 }
600 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
601 }
602
603 /* We prefer handling attn over new messages. */
604 if (si_sm_result == SI_SM_ATTN)
605 {
606 unsigned char msg[2];
607
608 spin_lock(&smi_info->count_lock);
609 smi_info->attentions++;
610 spin_unlock(&smi_info->count_lock);
611
612 /* Got a attn, send down a get message flags to see
613 what's causing it. It would be better to handle
614 this in the upper layer, but due to the way
615 interrupts work with the SMI, that's not really
616 possible. */
617 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
618 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
619
620 smi_info->handlers->start_transaction(
621 smi_info->si_sm, msg, 2);
622 smi_info->si_state = SI_GETTING_FLAGS;
623 goto restart;
624 }
625
626 /* If we are currently idle, try to start the next message. */
627 if (si_sm_result == SI_SM_IDLE) {
628 spin_lock(&smi_info->count_lock);
629 smi_info->idles++;
630 spin_unlock(&smi_info->count_lock);
631
632 si_sm_result = start_next_msg(smi_info);
633 if (si_sm_result != SI_SM_IDLE)
634 goto restart;
635 }
636
637 if ((si_sm_result == SI_SM_IDLE)
638 && (atomic_read(&smi_info->req_events)))
639 {
640 /* We are idle and the upper layer requested that I fetch
641 events, so do so. */
642 unsigned char msg[2];
643
644 spin_lock(&smi_info->count_lock);
645 smi_info->flag_fetches++;
646 spin_unlock(&smi_info->count_lock);
647
648 atomic_set(&smi_info->req_events, 0);
649 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
650 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
651
652 smi_info->handlers->start_transaction(
653 smi_info->si_sm, msg, 2);
654 smi_info->si_state = SI_GETTING_FLAGS;
655 goto restart;
656 }
657
658 return si_sm_result;
659}
660
661static void sender(void *send_info,
662 struct ipmi_smi_msg *msg,
663 int priority)
664{
665 struct smi_info *smi_info = send_info;
666 enum si_sm_result result;
667 unsigned long flags;
668#ifdef DEBUG_TIMING
669 struct timeval t;
670#endif
671
672 spin_lock_irqsave(&(smi_info->msg_lock), flags);
673#ifdef DEBUG_TIMING
674 do_gettimeofday(&t);
675 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
676#endif
677
678 if (smi_info->run_to_completion) {
679 /* If we are running to completion, then throw it in
680 the list and run transactions until everything is
681 clear. Priority doesn't matter here. */
682 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
683
684 /* We have to release the msg lock and claim the smi
685 lock in this case, because of race conditions. */
686 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
687
688 spin_lock_irqsave(&(smi_info->si_lock), flags);
689 result = smi_event_handler(smi_info, 0);
690 while (result != SI_SM_IDLE) {
691 udelay(SI_SHORT_TIMEOUT_USEC);
692 result = smi_event_handler(smi_info,
693 SI_SHORT_TIMEOUT_USEC);
694 }
695 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
696 return;
697 } else {
698 if (priority > 0) {
699 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
700 } else {
701 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
702 }
703 }
704 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
705
706 spin_lock_irqsave(&(smi_info->si_lock), flags);
707 if ((smi_info->si_state == SI_NORMAL)
708 && (smi_info->curr_msg == NULL))
709 {
710 start_next_msg(smi_info);
711 si_restart_short_timer(smi_info);
712 }
713 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
714}
715
716static void set_run_to_completion(void *send_info, int i_run_to_completion)
717{
718 struct smi_info *smi_info = send_info;
719 enum si_sm_result result;
720 unsigned long flags;
721
722 spin_lock_irqsave(&(smi_info->si_lock), flags);
723
724 smi_info->run_to_completion = i_run_to_completion;
725 if (i_run_to_completion) {
726 result = smi_event_handler(smi_info, 0);
727 while (result != SI_SM_IDLE) {
728 udelay(SI_SHORT_TIMEOUT_USEC);
729 result = smi_event_handler(smi_info,
730 SI_SHORT_TIMEOUT_USEC);
731 }
732 }
733
734 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
735}
736
737static void poll(void *send_info)
738{
739 struct smi_info *smi_info = send_info;
740
741 smi_event_handler(smi_info, 0);
742}
743
744static void request_events(void *send_info)
745{
746 struct smi_info *smi_info = send_info;
747
748 atomic_set(&smi_info->req_events, 1);
749}
750
751static int initialized = 0;
752
753/* Must be called with interrupts off and with the si_lock held. */
754static void si_restart_short_timer(struct smi_info *smi_info)
755{
756#if defined(CONFIG_HIGH_RES_TIMERS)
757 unsigned long flags;
758 unsigned long jiffies_now;
759
760 if (del_timer(&(smi_info->si_timer))) {
761 /* If we don't delete the timer, then it will go off
762 immediately, anyway. So we only process if we
763 actually delete the timer. */
764
765 /* We already have irqsave on, so no need for it
766 here. */
767 read_lock(&xtime_lock);
768 jiffies_now = jiffies;
769 smi_info->si_timer.expires = jiffies_now;
770 smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
771
772 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
773
774 add_timer(&(smi_info->si_timer));
775 spin_lock_irqsave(&smi_info->count_lock, flags);
776 smi_info->timeout_restarts++;
777 spin_unlock_irqrestore(&smi_info->count_lock, flags);
778 }
779#endif
780}
781
782static void smi_timeout(unsigned long data)
783{
784 struct smi_info *smi_info = (struct smi_info *) data;
785 enum si_sm_result smi_result;
786 unsigned long flags;
787 unsigned long jiffies_now;
788 unsigned long time_diff;
789#ifdef DEBUG_TIMING
790 struct timeval t;
791#endif
792
793 if (smi_info->stop_operation) {
794 smi_info->timer_stopped = 1;
795 return;
796 }
797
798 spin_lock_irqsave(&(smi_info->si_lock), flags);
799#ifdef DEBUG_TIMING
800 do_gettimeofday(&t);
801 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
802#endif
803 jiffies_now = jiffies;
804 time_diff = ((jiffies_now - smi_info->last_timeout_jiffies)
805 * SI_USEC_PER_JIFFY);
806 smi_result = smi_event_handler(smi_info, time_diff);
807
808 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
809
810 smi_info->last_timeout_jiffies = jiffies_now;
811
812 if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
813 /* Running with interrupts, only do long timeouts. */
814 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
815 spin_lock_irqsave(&smi_info->count_lock, flags);
816 smi_info->long_timeouts++;
817 spin_unlock_irqrestore(&smi_info->count_lock, flags);
818 goto do_add_timer;
819 }
820
821 /* If the state machine asks for a short delay, then shorten
822 the timer timeout. */
823 if (smi_result == SI_SM_CALL_WITH_DELAY) {
824 spin_lock_irqsave(&smi_info->count_lock, flags);
825 smi_info->short_timeouts++;
826 spin_unlock_irqrestore(&smi_info->count_lock, flags);
827#if defined(CONFIG_HIGH_RES_TIMERS)
828 read_lock(&xtime_lock);
829 smi_info->si_timer.expires = jiffies;
830 smi_info->si_timer.sub_expires
831 = get_arch_cycles(smi_info->si_timer.expires);
832 read_unlock(&xtime_lock);
833 add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
834#else
835 smi_info->si_timer.expires = jiffies + 1;
836#endif
837 } else {
838 spin_lock_irqsave(&smi_info->count_lock, flags);
839 smi_info->long_timeouts++;
840 spin_unlock_irqrestore(&smi_info->count_lock, flags);
841 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
842#if defined(CONFIG_HIGH_RES_TIMERS)
843 smi_info->si_timer.sub_expires = 0;
844#endif
845 }
846
847 do_add_timer:
848 add_timer(&(smi_info->si_timer));
849}
850
851static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
852{
853 struct smi_info *smi_info = data;
854 unsigned long flags;
855#ifdef DEBUG_TIMING
856 struct timeval t;
857#endif
858
859 spin_lock_irqsave(&(smi_info->si_lock), flags);
860
861 spin_lock(&smi_info->count_lock);
862 smi_info->interrupts++;
863 spin_unlock(&smi_info->count_lock);
864
865 if (smi_info->stop_operation)
866 goto out;
867
868#ifdef DEBUG_TIMING
869 do_gettimeofday(&t);
870 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
871#endif
872 smi_event_handler(smi_info, 0);
873 out:
874 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
875 return IRQ_HANDLED;
876}
877
878static struct ipmi_smi_handlers handlers =
879{
880 .owner = THIS_MODULE,
881 .sender = sender,
882 .request_events = request_events,
883 .set_run_to_completion = set_run_to_completion,
884 .poll = poll,
885};
886
887/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
888 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
889
890#define SI_MAX_PARMS 4
891#define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2)
892static struct smi_info *smi_infos[SI_MAX_DRIVERS] =
893{ NULL, NULL, NULL, NULL };
894
895#define DEVICE_NAME "ipmi_si"
896
897#define DEFAULT_KCS_IO_PORT 0xca2
898#define DEFAULT_SMIC_IO_PORT 0xca9
899#define DEFAULT_BT_IO_PORT 0xe4
900#define DEFAULT_REGSPACING 1
901
902static int si_trydefaults = 1;
903static char *si_type[SI_MAX_PARMS];
904#define MAX_SI_TYPE_STR 30
905static char si_type_str[MAX_SI_TYPE_STR];
906static unsigned long addrs[SI_MAX_PARMS];
907static int num_addrs;
908static unsigned int ports[SI_MAX_PARMS];
909static int num_ports;
910static int irqs[SI_MAX_PARMS];
911static int num_irqs;
912static int regspacings[SI_MAX_PARMS];
913static int num_regspacings = 0;
914static int regsizes[SI_MAX_PARMS];
915static int num_regsizes = 0;
916static int regshifts[SI_MAX_PARMS];
917static int num_regshifts = 0;
918static int slave_addrs[SI_MAX_PARMS];
919static int num_slave_addrs = 0;
920
921
922module_param_named(trydefaults, si_trydefaults, bool, 0);
923MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
924 " default scan of the KCS and SMIC interface at the standard"
925 " address");
926module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
927MODULE_PARM_DESC(type, "Defines the type of each interface, each"
928 " interface separated by commas. The types are 'kcs',"
929 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
930 " the first interface to kcs and the second to bt");
931module_param_array(addrs, long, &num_addrs, 0);
932MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
933 " addresses separated by commas. Only use if an interface"
934 " is in memory. Otherwise, set it to zero or leave"
935 " it blank.");
936module_param_array(ports, int, &num_ports, 0);
937MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
938 " addresses separated by commas. Only use if an interface"
939 " is a port. Otherwise, set it to zero or leave"
940 " it blank.");
941module_param_array(irqs, int, &num_irqs, 0);
942MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
943 " addresses separated by commas. Only use if an interface"
944 " has an interrupt. Otherwise, set it to zero or leave"
945 " it blank.");
946module_param_array(regspacings, int, &num_regspacings, 0);
947MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
948 " and each successive register used by the interface. For"
949 " instance, if the start address is 0xca2 and the spacing"
950 " is 2, then the second address is at 0xca4. Defaults"
951 " to 1.");
952module_param_array(regsizes, int, &num_regsizes, 0);
953MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
954 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
955 " 16-bit, 32-bit, or 64-bit register. Use this if you"
956 " the 8-bit IPMI register has to be read from a larger"
957 " register.");
958module_param_array(regshifts, int, &num_regshifts, 0);
959MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
960 " IPMI register, in bits. For instance, if the data"
961 " is read from a 32-bit word and the IPMI data is in"
962 " bit 8-15, then the shift would be 8");
963module_param_array(slave_addrs, int, &num_slave_addrs, 0);
964MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
965 " the controller. Normally this is 0x20, but can be"
966 " overridden by this parm. This is an array indexed"
967 " by interface number.");
968
969
970#define IPMI_MEM_ADDR_SPACE 1
971#define IPMI_IO_ADDR_SPACE 2
972
973#if defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
974static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
975{
976 int i;
977
978 for (i = 0; i < SI_MAX_PARMS; ++i) {
979 /* Don't check our address. */
980 if (i == intf)
981 continue;
982 if (si_type[i] != NULL) {
983 if ((addr_space == IPMI_MEM_ADDR_SPACE &&
984 base_addr == addrs[i]) ||
985 (addr_space == IPMI_IO_ADDR_SPACE &&
986 base_addr == ports[i]))
987 return 0;
988 }
989 else
990 break;
991 }
992
993 return 1;
994}
995#endif
996
997static int std_irq_setup(struct smi_info *info)
998{
999 int rv;
1000
1001 if (!info->irq)
1002 return 0;
1003
1004 rv = request_irq(info->irq,
1005 si_irq_handler,
1006 SA_INTERRUPT,
1007 DEVICE_NAME,
1008 info);
1009 if (rv) {
1010 printk(KERN_WARNING
1011 "ipmi_si: %s unable to claim interrupt %d,"
1012 " running polled\n",
1013 DEVICE_NAME, info->irq);
1014 info->irq = 0;
1015 } else {
1016 printk(" Using irq %d\n", info->irq);
1017 }
1018
1019 return rv;
1020}
1021
1022static void std_irq_cleanup(struct smi_info *info)
1023{
1024 if (!info->irq)
1025 return;
1026
1027 free_irq(info->irq, info);
1028}
1029
1030static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1031{
1032 unsigned int *addr = io->info;
1033
1034 return inb((*addr)+(offset*io->regspacing));
1035}
1036
1037static void port_outb(struct si_sm_io *io, unsigned int offset,
1038 unsigned char b)
1039{
1040 unsigned int *addr = io->info;
1041
1042 outb(b, (*addr)+(offset * io->regspacing));
1043}
1044
1045static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1046{
1047 unsigned int *addr = io->info;
1048
1049 return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1050}
1051
1052static void port_outw(struct si_sm_io *io, unsigned int offset,
1053 unsigned char b)
1054{
1055 unsigned int *addr = io->info;
1056
1057 outw(b << io->regshift, (*addr)+(offset * io->regspacing));
1058}
1059
1060static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1061{
1062 unsigned int *addr = io->info;
1063
1064 return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff;
1065}
1066
1067static void port_outl(struct si_sm_io *io, unsigned int offset,
1068 unsigned char b)
1069{
1070 unsigned int *addr = io->info;
1071
1072 outl(b << io->regshift, (*addr)+(offset * io->regspacing));
1073}
1074
1075static void port_cleanup(struct smi_info *info)
1076{
1077 unsigned int *addr = info->io.info;
1078 int mapsize;
1079
1080 if (addr && (*addr)) {
1081 mapsize = ((info->io_size * info->io.regspacing)
1082 - (info->io.regspacing - info->io.regsize));
1083
1084 release_region (*addr, mapsize);
1085 }
1086 kfree(info);
1087}
1088
1089static int port_setup(struct smi_info *info)
1090{
1091 unsigned int *addr = info->io.info;
1092 int mapsize;
1093
1094 if (!addr || (!*addr))
1095 return -ENODEV;
1096
1097 info->io_cleanup = port_cleanup;
1098
1099 /* Figure out the actual inb/inw/inl/etc routine to use based
1100 upon the register size. */
1101 switch (info->io.regsize) {
1102 case 1:
1103 info->io.inputb = port_inb;
1104 info->io.outputb = port_outb;
1105 break;
1106 case 2:
1107 info->io.inputb = port_inw;
1108 info->io.outputb = port_outw;
1109 break;
1110 case 4:
1111 info->io.inputb = port_inl;
1112 info->io.outputb = port_outl;
1113 break;
1114 default:
1115 printk("ipmi_si: Invalid register size: %d\n",
1116 info->io.regsize);
1117 return -EINVAL;
1118 }
1119
1120 /* Calculate the total amount of memory to claim. This is an
1121 * unusual looking calculation, but it avoids claiming any
1122 * more memory than it has to. It will claim everything
1123 * between the first address to the end of the last full
1124 * register. */
1125 mapsize = ((info->io_size * info->io.regspacing)
1126 - (info->io.regspacing - info->io.regsize));
1127
1128 if (request_region(*addr, mapsize, DEVICE_NAME) == NULL)
1129 return -EIO;
1130 return 0;
1131}
1132
1133static int try_init_port(int intf_num, struct smi_info **new_info)
1134{
1135 struct smi_info *info;
1136
1137 if (!ports[intf_num])
1138 return -ENODEV;
1139
1140 if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
1141 ports[intf_num]))
1142 return -ENODEV;
1143
1144 info = kmalloc(sizeof(*info), GFP_KERNEL);
1145 if (!info) {
1146 printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
1147 return -ENOMEM;
1148 }
1149 memset(info, 0, sizeof(*info));
1150
1151 info->io_setup = port_setup;
1152 info->io.info = &(ports[intf_num]);
1153 info->io.addr = NULL;
1154 info->io.regspacing = regspacings[intf_num];
1155 if (!info->io.regspacing)
1156 info->io.regspacing = DEFAULT_REGSPACING;
1157 info->io.regsize = regsizes[intf_num];
1158 if (!info->io.regsize)
1159 info->io.regsize = DEFAULT_REGSPACING;
1160 info->io.regshift = regshifts[intf_num];
1161 info->irq = 0;
1162 info->irq_setup = NULL;
1163 *new_info = info;
1164
1165 if (si_type[intf_num] == NULL)
1166 si_type[intf_num] = "kcs";
1167
1168 printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n",
1169 si_type[intf_num], ports[intf_num]);
1170 return 0;
1171}
1172
1173static unsigned char mem_inb(struct si_sm_io *io, unsigned int offset)
1174{
1175 return readb((io->addr)+(offset * io->regspacing));
1176}
1177
1178static void mem_outb(struct si_sm_io *io, unsigned int offset,
1179 unsigned char b)
1180{
1181 writeb(b, (io->addr)+(offset * io->regspacing));
1182}
1183
1184static unsigned char mem_inw(struct si_sm_io *io, unsigned int offset)
1185{
1186 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1187 && 0xff;
1188}
1189
1190static void mem_outw(struct si_sm_io *io, unsigned int offset,
1191 unsigned char b)
1192{
1193 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1194}
1195
1196static unsigned char mem_inl(struct si_sm_io *io, unsigned int offset)
1197{
1198 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1199 && 0xff;
1200}
1201
1202static void mem_outl(struct si_sm_io *io, unsigned int offset,
1203 unsigned char b)
1204{
1205 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1206}
1207
1208#ifdef readq
1209static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1210{
1211 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1212 && 0xff;
1213}
1214
1215static void mem_outq(struct si_sm_io *io, unsigned int offset,
1216 unsigned char b)
1217{
1218 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1219}
1220#endif
1221
1222static void mem_cleanup(struct smi_info *info)
1223{
1224 unsigned long *addr = info->io.info;
1225 int mapsize;
1226
1227 if (info->io.addr) {
1228 iounmap(info->io.addr);
1229
1230 mapsize = ((info->io_size * info->io.regspacing)
1231 - (info->io.regspacing - info->io.regsize));
1232
1233 release_mem_region(*addr, mapsize);
1234 }
1235 kfree(info);
1236}
1237
1238static int mem_setup(struct smi_info *info)
1239{
1240 unsigned long *addr = info->io.info;
1241 int mapsize;
1242
1243 if (!addr || (!*addr))
1244 return -ENODEV;
1245
1246 info->io_cleanup = mem_cleanup;
1247
1248 /* Figure out the actual readb/readw/readl/etc routine to use based
1249 upon the register size. */
1250 switch (info->io.regsize) {
1251 case 1:
1252 info->io.inputb = mem_inb;
1253 info->io.outputb = mem_outb;
1254 break;
1255 case 2:
1256 info->io.inputb = mem_inw;
1257 info->io.outputb = mem_outw;
1258 break;
1259 case 4:
1260 info->io.inputb = mem_inl;
1261 info->io.outputb = mem_outl;
1262 break;
1263#ifdef readq
1264 case 8:
1265 info->io.inputb = mem_inq;
1266 info->io.outputb = mem_outq;
1267 break;
1268#endif
1269 default:
1270 printk("ipmi_si: Invalid register size: %d\n",
1271 info->io.regsize);
1272 return -EINVAL;
1273 }
1274
1275 /* Calculate the total amount of memory to claim. This is an
1276 * unusual looking calculation, but it avoids claiming any
1277 * more memory than it has to. It will claim everything
1278 * between the first address to the end of the last full
1279 * register. */
1280 mapsize = ((info->io_size * info->io.regspacing)
1281 - (info->io.regspacing - info->io.regsize));
1282
1283 if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL)
1284 return -EIO;
1285
1286 info->io.addr = ioremap(*addr, mapsize);
1287 if (info->io.addr == NULL) {
1288 release_mem_region(*addr, mapsize);
1289 return -EIO;
1290 }
1291 return 0;
1292}
1293
1294static int try_init_mem(int intf_num, struct smi_info **new_info)
1295{
1296 struct smi_info *info;
1297
1298 if (!addrs[intf_num])
1299 return -ENODEV;
1300
1301 if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
1302 addrs[intf_num]))
1303 return -ENODEV;
1304
1305 info = kmalloc(sizeof(*info), GFP_KERNEL);
1306 if (!info) {
1307 printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
1308 return -ENOMEM;
1309 }
1310 memset(info, 0, sizeof(*info));
1311
1312 info->io_setup = mem_setup;
1313 info->io.info = &addrs[intf_num];
1314 info->io.addr = NULL;
1315 info->io.regspacing = regspacings[intf_num];
1316 if (!info->io.regspacing)
1317 info->io.regspacing = DEFAULT_REGSPACING;
1318 info->io.regsize = regsizes[intf_num];
1319 if (!info->io.regsize)
1320 info->io.regsize = DEFAULT_REGSPACING;
1321 info->io.regshift = regshifts[intf_num];
1322 info->irq = 0;
1323 info->irq_setup = NULL;
1324 *new_info = info;
1325
1326 if (si_type[intf_num] == NULL)
1327 si_type[intf_num] = "kcs";
1328
1329 printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n",
1330 si_type[intf_num], addrs[intf_num]);
1331 return 0;
1332}
1333
1334
1335#ifdef CONFIG_ACPI_INTERPRETER
1336
1337#include <linux/acpi.h>
1338
1339/* Once we get an ACPI failure, we don't try any more, because we go
1340 through the tables sequentially. Once we don't find a table, there
1341 are no more. */
1342static int acpi_failure = 0;
1343
1344/* For GPE-type interrupts. */
1345static u32 ipmi_acpi_gpe(void *context)
1346{
1347 struct smi_info *smi_info = context;
1348 unsigned long flags;
1349#ifdef DEBUG_TIMING
1350 struct timeval t;
1351#endif
1352
1353 spin_lock_irqsave(&(smi_info->si_lock), flags);
1354
1355 spin_lock(&smi_info->count_lock);
1356 smi_info->interrupts++;
1357 spin_unlock(&smi_info->count_lock);
1358
1359 if (smi_info->stop_operation)
1360 goto out;
1361
1362#ifdef DEBUG_TIMING
1363 do_gettimeofday(&t);
1364 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1365#endif
1366 smi_event_handler(smi_info, 0);
1367 out:
1368 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1369
1370 return ACPI_INTERRUPT_HANDLED;
1371}
1372
1373static int acpi_gpe_irq_setup(struct smi_info *info)
1374{
1375 acpi_status status;
1376
1377 if (!info->irq)
1378 return 0;
1379
1380 /* FIXME - is level triggered right? */
1381 status = acpi_install_gpe_handler(NULL,
1382 info->irq,
1383 ACPI_GPE_LEVEL_TRIGGERED,
1384 &ipmi_acpi_gpe,
1385 info);
1386 if (status != AE_OK) {
1387 printk(KERN_WARNING
1388 "ipmi_si: %s unable to claim ACPI GPE %d,"
1389 " running polled\n",
1390 DEVICE_NAME, info->irq);
1391 info->irq = 0;
1392 return -EINVAL;
1393 } else {
1394 printk(" Using ACPI GPE %d\n", info->irq);
1395 return 0;
1396 }
1397}
1398
1399static void acpi_gpe_irq_cleanup(struct smi_info *info)
1400{
1401 if (!info->irq)
1402 return;
1403
1404 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1405}
1406
1407/*
1408 * Defined at
1409 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1410 */
1411struct SPMITable {
1412 s8 Signature[4];
1413 u32 Length;
1414 u8 Revision;
1415 u8 Checksum;
1416 s8 OEMID[6];
1417 s8 OEMTableID[8];
1418 s8 OEMRevision[4];
1419 s8 CreatorID[4];
1420 s8 CreatorRevision[4];
1421 u8 InterfaceType;
1422 u8 IPMIlegacy;
1423 s16 SpecificationRevision;
1424
1425 /*
1426 * Bit 0 - SCI interrupt supported
1427 * Bit 1 - I/O APIC/SAPIC
1428 */
1429 u8 InterruptType;
1430
1431 /* If bit 0 of InterruptType is set, then this is the SCI
1432 interrupt in the GPEx_STS register. */
1433 u8 GPE;
1434
1435 s16 Reserved;
1436
1437 /* If bit 1 of InterruptType is set, then this is the I/O
1438 APIC/SAPIC interrupt. */
1439 u32 GlobalSystemInterrupt;
1440
1441 /* The actual register address. */
1442 struct acpi_generic_address addr;
1443
1444 u8 UID[4];
1445
1446 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1447};
1448
1449static int try_init_acpi(int intf_num, struct smi_info **new_info)
1450{
1451 struct smi_info *info;
1452 acpi_status status;
1453 struct SPMITable *spmi;
1454 char *io_type;
1455 u8 addr_space;
1456
1457 if (acpi_failure)
1458 return -ENODEV;
1459
1460 status = acpi_get_firmware_table("SPMI", intf_num+1,
1461 ACPI_LOGICAL_ADDRESSING,
1462 (struct acpi_table_header **) &spmi);
1463 if (status != AE_OK) {
1464 acpi_failure = 1;
1465 return -ENODEV;
1466 }
1467
1468 if (spmi->IPMIlegacy != 1) {
1469 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1470 return -ENODEV;
1471 }
1472
1473 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1474 addr_space = IPMI_MEM_ADDR_SPACE;
1475 else
1476 addr_space = IPMI_IO_ADDR_SPACE;
1477 if (!is_new_interface(-1, addr_space, spmi->addr.address))
1478 return -ENODEV;
1479
1480 if (!spmi->addr.register_bit_width) {
1481 acpi_failure = 1;
1482 return -ENODEV;
1483 }
1484
1485 /* Figure out the interface type. */
1486 switch (spmi->InterfaceType)
1487 {
1488 case 1: /* KCS */
1489 si_type[intf_num] = "kcs";
1490 break;
1491
1492 case 2: /* SMIC */
1493 si_type[intf_num] = "smic";
1494 break;
1495
1496 case 3: /* BT */
1497 si_type[intf_num] = "bt";
1498 break;
1499
1500 default:
1501 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1502 spmi->InterfaceType);
1503 return -EIO;
1504 }
1505
1506 info = kmalloc(sizeof(*info), GFP_KERNEL);
1507 if (!info) {
1508 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1509 return -ENOMEM;
1510 }
1511 memset(info, 0, sizeof(*info));
1512
1513 if (spmi->InterruptType & 1) {
1514 /* We've got a GPE interrupt. */
1515 info->irq = spmi->GPE;
1516 info->irq_setup = acpi_gpe_irq_setup;
1517 info->irq_cleanup = acpi_gpe_irq_cleanup;
1518 } else if (spmi->InterruptType & 2) {
1519 /* We've got an APIC/SAPIC interrupt. */
1520 info->irq = spmi->GlobalSystemInterrupt;
1521 info->irq_setup = std_irq_setup;
1522 info->irq_cleanup = std_irq_cleanup;
1523 } else {
1524 /* Use the default interrupt setting. */
1525 info->irq = 0;
1526 info->irq_setup = NULL;
1527 }
1528
1529 regspacings[intf_num] = spmi->addr.register_bit_width / 8;
1530 info->io.regspacing = spmi->addr.register_bit_width / 8;
1531 regsizes[intf_num] = regspacings[intf_num];
1532 info->io.regsize = regsizes[intf_num];
1533 regshifts[intf_num] = spmi->addr.register_bit_offset;
1534 info->io.regshift = regshifts[intf_num];
1535
1536 if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1537 io_type = "memory";
1538 info->io_setup = mem_setup;
1539 addrs[intf_num] = spmi->addr.address;
1540 info->io.info = &(addrs[intf_num]);
1541 } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1542 io_type = "I/O";
1543 info->io_setup = port_setup;
1544 ports[intf_num] = spmi->addr.address;
1545 info->io.info = &(ports[intf_num]);
1546 } else {
1547 kfree(info);
1548 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1549 return -EIO;
1550 }
1551
1552 *new_info = info;
1553
1554 printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n",
1555 si_type[intf_num], io_type, (unsigned long) spmi->addr.address);
1556 return 0;
1557}
1558#endif
1559
1560#ifdef CONFIG_X86
1561typedef struct dmi_ipmi_data
1562{
1563 u8 type;
1564 u8 addr_space;
1565 unsigned long base_addr;
1566 u8 irq;
1567 u8 offset;
1568 u8 slave_addr;
1569} dmi_ipmi_data_t;
1570
1571static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
1572static int dmi_data_entries;
1573
1574typedef struct dmi_header
1575{
1576 u8 type;
1577 u8 length;
1578 u16 handle;
1579} dmi_header_t;
1580
1581static int decode_dmi(dmi_header_t *dm, int intf_num)
1582{
1583 u8 *data = (u8 *)dm;
1584 unsigned long base_addr;
1585 u8 reg_spacing;
1586 u8 len = dm->length;
1587 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1588
1589 ipmi_data->type = data[4];
1590
1591 memcpy(&base_addr, data+8, sizeof(unsigned long));
1592 if (len >= 0x11) {
1593 if (base_addr & 1) {
1594 /* I/O */
1595 base_addr &= 0xFFFE;
1596 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1597 }
1598 else {
1599 /* Memory */
1600 ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE;
1601 }
1602 /* If bit 4 of byte 0x10 is set, then the lsb for the address
1603 is odd. */
1604 ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1605
1606 ipmi_data->irq = data[0x11];
1607
1608 /* The top two bits of byte 0x10 hold the register spacing. */
1609 reg_spacing = (data[0x10] & 0xC0) >> 6;
1610 switch(reg_spacing){
1611 case 0x00: /* Byte boundaries */
1612 ipmi_data->offset = 1;
1613 break;
1614 case 0x01: /* 32-bit boundaries */
1615 ipmi_data->offset = 4;
1616 break;
1617 case 0x02: /* 16-byte boundaries */
1618 ipmi_data->offset = 16;
1619 break;
1620 default:
1621 /* Some other interface, just ignore it. */
1622 return -EIO;
1623 }
1624 } else {
1625 /* Old DMI spec. */
1626 ipmi_data->base_addr = base_addr;
1627 ipmi_data->addr_space = IPMI_IO_ADDR_SPACE;
1628 ipmi_data->offset = 1;
1629 }
1630
1631 ipmi_data->slave_addr = data[6];
1632
1633 if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
1634 dmi_data_entries++;
1635 return 0;
1636 }
1637
1638 memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t));
1639
1640 return -1;
1641}
1642
1643static int dmi_table(u32 base, int len, int num)
1644{
1645 u8 *buf;
1646 struct dmi_header *dm;
1647 u8 *data;
1648 int i=1;
1649 int status=-1;
1650 int intf_num = 0;
1651
1652 buf = ioremap(base, len);
1653 if(buf==NULL)
1654 return -1;
1655
1656 data = buf;
1657
1658 while(i<num && (data - buf) < len)
1659 {
1660 dm=(dmi_header_t *)data;
1661
1662 if((data-buf+dm->length) >= len)
1663 break;
1664
1665 if (dm->type == 38) {
1666 if (decode_dmi(dm, intf_num) == 0) {
1667 intf_num++;
1668 if (intf_num >= SI_MAX_DRIVERS)
1669 break;
1670 }
1671 }
1672
1673 data+=dm->length;
1674 while((data-buf) < len && (*data || data[1]))
1675 data++;
1676 data+=2;
1677 i++;
1678 }
1679 iounmap(buf);
1680
1681 return status;
1682}
1683
1684inline static int dmi_checksum(u8 *buf)
1685{
1686 u8 sum=0;
1687 int a;
1688
1689 for(a=0; a<15; a++)
1690 sum+=buf[a];
1691 return (sum==0);
1692}
1693
1694static int dmi_decode(void)
1695{
1696 u8 buf[15];
1697 u32 fp=0xF0000;
1698
1699#ifdef CONFIG_SIMNOW
1700 return -1;
1701#endif
1702
1703 while(fp < 0xFFFFF)
1704 {
1705 isa_memcpy_fromio(buf, fp, 15);
1706 if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
1707 {
1708 u16 num=buf[13]<<8|buf[12];
1709 u16 len=buf[7]<<8|buf[6];
1710 u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
1711
1712 if(dmi_table(base, len, num) == 0)
1713 return 0;
1714 }
1715 fp+=16;
1716 }
1717
1718 return -1;
1719}
1720
1721static int try_init_smbios(int intf_num, struct smi_info **new_info)
1722{
1723 struct smi_info *info;
1724 dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
1725 char *io_type;
1726
1727 if (intf_num >= dmi_data_entries)
1728 return -ENODEV;
1729
1730 switch(ipmi_data->type) {
1731 case 0x01: /* KCS */
1732 si_type[intf_num] = "kcs";
1733 break;
1734 case 0x02: /* SMIC */
1735 si_type[intf_num] = "smic";
1736 break;
1737 case 0x03: /* BT */
1738 si_type[intf_num] = "bt";
1739 break;
1740 default:
1741 return -EIO;
1742 }
1743
1744 info = kmalloc(sizeof(*info), GFP_KERNEL);
1745 if (!info) {
1746 printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
1747 return -ENOMEM;
1748 }
1749 memset(info, 0, sizeof(*info));
1750
1751 if (ipmi_data->addr_space == 1) {
1752 io_type = "memory";
1753 info->io_setup = mem_setup;
1754 addrs[intf_num] = ipmi_data->base_addr;
1755 info->io.info = &(addrs[intf_num]);
1756 } else if (ipmi_data->addr_space == 2) {
1757 io_type = "I/O";
1758 info->io_setup = port_setup;
1759 ports[intf_num] = ipmi_data->base_addr;
1760 info->io.info = &(ports[intf_num]);
1761 } else {
1762 kfree(info);
1763 printk("ipmi_si: Unknown SMBIOS I/O Address type.\n");
1764 return -EIO;
1765 }
1766
1767 regspacings[intf_num] = ipmi_data->offset;
1768 info->io.regspacing = regspacings[intf_num];
1769 if (!info->io.regspacing)
1770 info->io.regspacing = DEFAULT_REGSPACING;
1771 info->io.regsize = DEFAULT_REGSPACING;
1772 info->io.regshift = regshifts[intf_num];
1773
1774 info->slave_addr = ipmi_data->slave_addr;
1775
1776 irqs[intf_num] = ipmi_data->irq;
1777
1778 *new_info = info;
1779
1780 printk("ipmi_si: Found SMBIOS-specified state machine at %s"
1781 " address 0x%lx, slave address 0x%x\n",
1782 io_type, (unsigned long)ipmi_data->base_addr,
1783 ipmi_data->slave_addr);
1784 return 0;
1785}
1786#endif /* CONFIG_X86 */
1787
1788#ifdef CONFIG_PCI
1789
1790#define PCI_ERMC_CLASSCODE 0x0C0700
1791#define PCI_HP_VENDOR_ID 0x103C
1792#define PCI_MMC_DEVICE_ID 0x121A
1793#define PCI_MMC_ADDR_CW 0x10
1794
1795/* Avoid more than one attempt to probe pci smic. */
1796static int pci_smic_checked = 0;
1797
1798static int find_pci_smic(int intf_num, struct smi_info **new_info)
1799{
1800 struct smi_info *info;
1801 int error;
1802 struct pci_dev *pci_dev = NULL;
1803 u16 base_addr;
1804 int fe_rmc = 0;
1805
1806 if (pci_smic_checked)
1807 return -ENODEV;
1808
1809 pci_smic_checked = 1;
1810
1811 if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
1812 NULL)))
1813 ;
1814 else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) &&
1815 pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
1816 fe_rmc = 1;
1817 else
1818 return -ENODEV;
1819
1820 error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
1821 if (error)
1822 {
1823 pci_dev_put(pci_dev);
1824 printk(KERN_ERR
1825 "ipmi_si: pci_read_config_word() failed (%d).\n",
1826 error);
1827 return -ENODEV;
1828 }
1829
1830 /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
1831 if (!(base_addr & 0x0001))
1832 {
1833 pci_dev_put(pci_dev);
1834 printk(KERN_ERR
1835 "ipmi_si: memory mapped I/O not supported for PCI"
1836 " smic.\n");
1837 return -ENODEV;
1838 }
1839
1840 base_addr &= 0xFFFE;
1841 if (!fe_rmc)
1842 /* Data register starts at base address + 1 in eRMC */
1843 ++base_addr;
1844
1845 if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
1846 pci_dev_put(pci_dev);
1847 return -ENODEV;
1848 }
1849
1850 info = kmalloc(sizeof(*info), GFP_KERNEL);
1851 if (!info) {
1852 pci_dev_put(pci_dev);
1853 printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
1854 return -ENOMEM;
1855 }
1856 memset(info, 0, sizeof(*info));
1857
1858 info->io_setup = port_setup;
1859 ports[intf_num] = base_addr;
1860 info->io.info = &(ports[intf_num]);
1861 info->io.regspacing = regspacings[intf_num];
1862 if (!info->io.regspacing)
1863 info->io.regspacing = DEFAULT_REGSPACING;
1864 info->io.regsize = DEFAULT_REGSPACING;
1865 info->io.regshift = regshifts[intf_num];
1866
1867 *new_info = info;
1868
1869 irqs[intf_num] = pci_dev->irq;
1870 si_type[intf_num] = "smic";
1871
1872 printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n",
1873 (long unsigned int) base_addr);
1874
1875 pci_dev_put(pci_dev);
1876 return 0;
1877}
1878#endif /* CONFIG_PCI */
1879
1880static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
1881{
1882#ifdef CONFIG_PCI
1883 if (find_pci_smic(intf_num, new_info)==0)
1884 return 0;
1885#endif
1886 /* Include other methods here. */
1887
1888 return -ENODEV;
1889}
1890
1891
1892static int try_get_dev_id(struct smi_info *smi_info)
1893{
1894 unsigned char msg[2];
1895 unsigned char *resp;
1896 unsigned long resp_len;
1897 enum si_sm_result smi_result;
1898 int rv = 0;
1899
1900 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1901 if (!resp)
1902 return -ENOMEM;
1903
1904 /* Do a Get Device ID command, since it comes back with some
1905 useful info. */
1906 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1907 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1908 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1909
1910 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1911 for (;;)
1912 {
1913 if (smi_result == SI_SM_CALL_WITH_DELAY) {
1914 set_current_state(TASK_UNINTERRUPTIBLE);
1915 schedule_timeout(1);
1916 smi_result = smi_info->handlers->event(
1917 smi_info->si_sm, 100);
1918 }
1919 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1920 {
1921 smi_result = smi_info->handlers->event(
1922 smi_info->si_sm, 0);
1923 }
1924 else
1925 break;
1926 }
1927 if (smi_result == SI_SM_HOSED) {
1928 /* We couldn't get the state machine to run, so whatever's at
1929 the port is probably not an IPMI SMI interface. */
1930 rv = -ENODEV;
1931 goto out;
1932 }
1933
1934 /* Otherwise, we got some data. */
1935 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1936 resp, IPMI_MAX_MSG_LENGTH);
1937 if (resp_len < 6) {
1938 /* That's odd, it should be longer. */
1939 rv = -EINVAL;
1940 goto out;
1941 }
1942
1943 if ((resp[1] != IPMI_GET_DEVICE_ID_CMD) || (resp[2] != 0)) {
1944 /* That's odd, it shouldn't be able to fail. */
1945 rv = -EINVAL;
1946 goto out;
1947 }
1948
1949 /* Record info from the get device id, in case we need it. */
1950 smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
1951 smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
1952 smi_info->ipmi_si_fw_rev_minor = resp[6];
1953 smi_info->ipmi_version_major = resp[7] & 0xf;
1954 smi_info->ipmi_version_minor = resp[7] >> 4;
1955
1956 out:
1957 kfree(resp);
1958 return rv;
1959}
1960
1961static int type_file_read_proc(char *page, char **start, off_t off,
1962 int count, int *eof, void *data)
1963{
1964 char *out = (char *) page;
1965 struct smi_info *smi = data;
1966
1967 switch (smi->si_type) {
1968 case SI_KCS:
1969 return sprintf(out, "kcs\n");
1970 case SI_SMIC:
1971 return sprintf(out, "smic\n");
1972 case SI_BT:
1973 return sprintf(out, "bt\n");
1974 default:
1975 return 0;
1976 }
1977}
1978
1979static int stat_file_read_proc(char *page, char **start, off_t off,
1980 int count, int *eof, void *data)
1981{
1982 char *out = (char *) page;
1983 struct smi_info *smi = data;
1984
1985 out += sprintf(out, "interrupts_enabled: %d\n",
1986 smi->irq && !smi->interrupt_disabled);
1987 out += sprintf(out, "short_timeouts: %ld\n",
1988 smi->short_timeouts);
1989 out += sprintf(out, "long_timeouts: %ld\n",
1990 smi->long_timeouts);
1991 out += sprintf(out, "timeout_restarts: %ld\n",
1992 smi->timeout_restarts);
1993 out += sprintf(out, "idles: %ld\n",
1994 smi->idles);
1995 out += sprintf(out, "interrupts: %ld\n",
1996 smi->interrupts);
1997 out += sprintf(out, "attentions: %ld\n",
1998 smi->attentions);
1999 out += sprintf(out, "flag_fetches: %ld\n",
2000 smi->flag_fetches);
2001 out += sprintf(out, "hosed_count: %ld\n",
2002 smi->hosed_count);
2003 out += sprintf(out, "complete_transactions: %ld\n",
2004 smi->complete_transactions);
2005 out += sprintf(out, "events: %ld\n",
2006 smi->events);
2007 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2008 smi->watchdog_pretimeouts);
2009 out += sprintf(out, "incoming_messages: %ld\n",
2010 smi->incoming_messages);
2011
2012 return (out - ((char *) page));
2013}
2014
2015/* Returns 0 if initialized, or negative on an error. */
2016static int init_one_smi(int intf_num, struct smi_info **smi)
2017{
2018 int rv;
2019 struct smi_info *new_smi;
2020
2021
2022 rv = try_init_mem(intf_num, &new_smi);
2023 if (rv)
2024 rv = try_init_port(intf_num, &new_smi);
2025#ifdef CONFIG_ACPI_INTERPRETER
2026 if ((rv) && (si_trydefaults)) {
2027 rv = try_init_acpi(intf_num, &new_smi);
2028 }
2029#endif
2030#ifdef CONFIG_X86
2031 if ((rv) && (si_trydefaults)) {
2032 rv = try_init_smbios(intf_num, &new_smi);
2033 }
2034#endif
2035 if ((rv) && (si_trydefaults)) {
2036 rv = try_init_plug_and_play(intf_num, &new_smi);
2037 }
2038
2039
2040 if (rv)
2041 return rv;
2042
2043 /* So we know not to free it unless we have allocated one. */
2044 new_smi->intf = NULL;
2045 new_smi->si_sm = NULL;
2046 new_smi->handlers = NULL;
2047
2048 if (!new_smi->irq_setup) {
2049 new_smi->irq = irqs[intf_num];
2050 new_smi->irq_setup = std_irq_setup;
2051 new_smi->irq_cleanup = std_irq_cleanup;
2052 }
2053
2054 /* Default to KCS if no type is specified. */
2055 if (si_type[intf_num] == NULL) {
2056 if (si_trydefaults)
2057 si_type[intf_num] = "kcs";
2058 else {
2059 rv = -EINVAL;
2060 goto out_err;
2061 }
2062 }
2063
2064 /* Set up the state machine to use. */
2065 if (strcmp(si_type[intf_num], "kcs") == 0) {
2066 new_smi->handlers = &kcs_smi_handlers;
2067 new_smi->si_type = SI_KCS;
2068 } else if (strcmp(si_type[intf_num], "smic") == 0) {
2069 new_smi->handlers = &smic_smi_handlers;
2070 new_smi->si_type = SI_SMIC;
2071 } else if (strcmp(si_type[intf_num], "bt") == 0) {
2072 new_smi->handlers = &bt_smi_handlers;
2073 new_smi->si_type = SI_BT;
2074 } else {
2075 /* No support for anything else yet. */
2076 rv = -EIO;
2077 goto out_err;
2078 }
2079
2080 /* Allocate the state machine's data and initialize it. */
2081 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2082 if (!new_smi->si_sm) {
2083 printk(" Could not allocate state machine memory\n");
2084 rv = -ENOMEM;
2085 goto out_err;
2086 }
2087 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2088 &new_smi->io);
2089
2090 /* Now that we know the I/O size, we can set up the I/O. */
2091 rv = new_smi->io_setup(new_smi);
2092 if (rv) {
2093 printk(" Could not set up I/O space\n");
2094 goto out_err;
2095 }
2096
2097 spin_lock_init(&(new_smi->si_lock));
2098 spin_lock_init(&(new_smi->msg_lock));
2099 spin_lock_init(&(new_smi->count_lock));
2100
2101 /* Do low-level detection first. */
2102 if (new_smi->handlers->detect(new_smi->si_sm)) {
2103 rv = -ENODEV;
2104 goto out_err;
2105 }
2106
2107 /* Attempt a get device id command. If it fails, we probably
2108 don't have a SMI here. */
2109 rv = try_get_dev_id(new_smi);
2110 if (rv)
2111 goto out_err;
2112
2113 /* Try to claim any interrupts. */
2114 new_smi->irq_setup(new_smi);
2115
2116 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2117 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2118 new_smi->curr_msg = NULL;
2119 atomic_set(&new_smi->req_events, 0);
2120 new_smi->run_to_completion = 0;
2121
2122 new_smi->interrupt_disabled = 0;
2123 new_smi->timer_stopped = 0;
2124 new_smi->stop_operation = 0;
2125
2126 /* Start clearing the flags before we enable interrupts or the
2127 timer to avoid racing with the timer. */
2128 start_clear_flags(new_smi);
2129 /* IRQ is defined to be set when non-zero. */
2130 if (new_smi->irq)
2131 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2132
2133 /* The ipmi_register_smi() code does some operations to
2134 determine the channel information, so we must be ready to
2135 handle operations before it is called. This means we have
2136 to stop the timer if we get an error after this point. */
2137 init_timer(&(new_smi->si_timer));
2138 new_smi->si_timer.data = (long) new_smi;
2139 new_smi->si_timer.function = smi_timeout;
2140 new_smi->last_timeout_jiffies = jiffies;
2141 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2142 add_timer(&(new_smi->si_timer));
2143
2144 rv = ipmi_register_smi(&handlers,
2145 new_smi,
2146 new_smi->ipmi_version_major,
2147 new_smi->ipmi_version_minor,
2148 new_smi->slave_addr,
2149 &(new_smi->intf));
2150 if (rv) {
2151 printk(KERN_ERR
2152 "ipmi_si: Unable to register device: error %d\n",
2153 rv);
2154 goto out_err_stop_timer;
2155 }
2156
2157 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2158 type_file_read_proc, NULL,
2159 new_smi, THIS_MODULE);
2160 if (rv) {
2161 printk(KERN_ERR
2162 "ipmi_si: Unable to create proc entry: %d\n",
2163 rv);
2164 goto out_err_stop_timer;
2165 }
2166
2167 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2168 stat_file_read_proc, NULL,
2169 new_smi, THIS_MODULE);
2170 if (rv) {
2171 printk(KERN_ERR
2172 "ipmi_si: Unable to create proc entry: %d\n",
2173 rv);
2174 goto out_err_stop_timer;
2175 }
2176
2177 *smi = new_smi;
2178
2179 printk(" IPMI %s interface initialized\n", si_type[intf_num]);
2180
2181 return 0;
2182
2183 out_err_stop_timer:
2184 new_smi->stop_operation = 1;
2185
2186 /* Wait for the timer to stop. This avoids problems with race
2187 conditions removing the timer here. */
2188 while (!new_smi->timer_stopped) {
2189 set_current_state(TASK_UNINTERRUPTIBLE);
2190 schedule_timeout(1);
2191 }
2192
2193 out_err:
2194 if (new_smi->intf)
2195 ipmi_unregister_smi(new_smi->intf);
2196
2197 new_smi->irq_cleanup(new_smi);
2198
2199 /* Wait until we know that we are out of any interrupt
2200 handlers might have been running before we freed the
2201 interrupt. */
2202 synchronize_kernel();
2203
2204 if (new_smi->si_sm) {
2205 if (new_smi->handlers)
2206 new_smi->handlers->cleanup(new_smi->si_sm);
2207 kfree(new_smi->si_sm);
2208 }
2209 new_smi->io_cleanup(new_smi);
2210
2211 return rv;
2212}
2213
2214static __init int init_ipmi_si(void)
2215{
2216 int rv = 0;
2217 int pos = 0;
2218 int i;
2219 char *str;
2220
2221 if (initialized)
2222 return 0;
2223 initialized = 1;
2224
2225 /* Parse out the si_type string into its components. */
2226 str = si_type_str;
2227 if (*str != '\0') {
2228 for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
2229 si_type[i] = str;
2230 str = strchr(str, ',');
2231 if (str) {
2232 *str = '\0';
2233 str++;
2234 } else {
2235 break;
2236 }
2237 }
2238 }
2239
2240 printk(KERN_INFO "IPMI System Interface driver version "
2241 IPMI_SI_VERSION);
2242 if (kcs_smi_handlers.version)
2243 printk(", KCS version %s", kcs_smi_handlers.version);
2244 if (smic_smi_handlers.version)
2245 printk(", SMIC version %s", smic_smi_handlers.version);
2246 if (bt_smi_handlers.version)
2247 printk(", BT version %s", bt_smi_handlers.version);
2248 printk("\n");
2249
2250#ifdef CONFIG_X86
2251 dmi_decode();
2252#endif
2253
2254 rv = init_one_smi(0, &(smi_infos[pos]));
2255 if (rv && !ports[0] && si_trydefaults) {
2256 /* If we are trying defaults and the initial port is
2257 not set, then set it. */
2258 si_type[0] = "kcs";
2259 ports[0] = DEFAULT_KCS_IO_PORT;
2260 rv = init_one_smi(0, &(smi_infos[pos]));
2261 if (rv) {
2262 /* No KCS - try SMIC */
2263 si_type[0] = "smic";
2264 ports[0] = DEFAULT_SMIC_IO_PORT;
2265 rv = init_one_smi(0, &(smi_infos[pos]));
2266 }
2267 if (rv) {
2268 /* No SMIC - try BT */
2269 si_type[0] = "bt";
2270 ports[0] = DEFAULT_BT_IO_PORT;
2271 rv = init_one_smi(0, &(smi_infos[pos]));
2272 }
2273 }
2274 if (rv == 0)
2275 pos++;
2276
2277 for (i=1; i < SI_MAX_PARMS; i++) {
2278 rv = init_one_smi(i, &(smi_infos[pos]));
2279 if (rv == 0)
2280 pos++;
2281 }
2282
2283 if (smi_infos[0] == NULL) {
2284 printk("ipmi_si: Unable to find any System Interface(s)\n");
2285 return -ENODEV;
2286 }
2287
2288 return 0;
2289}
2290module_init(init_ipmi_si);
2291
2292static void __exit cleanup_one_si(struct smi_info *to_clean)
2293{
2294 int rv;
2295 unsigned long flags;
2296
2297 if (! to_clean)
2298 return;
2299
2300 /* Tell the timer and interrupt handlers that we are shutting
2301 down. */
2302 spin_lock_irqsave(&(to_clean->si_lock), flags);
2303 spin_lock(&(to_clean->msg_lock));
2304
2305 to_clean->stop_operation = 1;
2306
2307 to_clean->irq_cleanup(to_clean);
2308
2309 spin_unlock(&(to_clean->msg_lock));
2310 spin_unlock_irqrestore(&(to_clean->si_lock), flags);
2311
2312 /* Wait until we know that we are out of any interrupt
2313 handlers might have been running before we freed the
2314 interrupt. */
2315 synchronize_kernel();
2316
2317 /* Wait for the timer to stop. This avoids problems with race
2318 conditions removing the timer here. */
2319 while (!to_clean->timer_stopped) {
2320 set_current_state(TASK_UNINTERRUPTIBLE);
2321 schedule_timeout(1);
2322 }
2323
2324 /* Interrupts and timeouts are stopped, now make sure the
2325 interface is in a clean state. */
2326 while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) {
2327 poll(to_clean);
2328 set_current_state(TASK_UNINTERRUPTIBLE);
2329 schedule_timeout(1);
2330 }
2331
2332 rv = ipmi_unregister_smi(to_clean->intf);
2333 if (rv) {
2334 printk(KERN_ERR
2335 "ipmi_si: Unable to unregister device: errno=%d\n",
2336 rv);
2337 }
2338
2339 to_clean->handlers->cleanup(to_clean->si_sm);
2340
2341 kfree(to_clean->si_sm);
2342
2343 to_clean->io_cleanup(to_clean);
2344}
2345
2346static __exit void cleanup_ipmi_si(void)
2347{
2348 int i;
2349
2350 if (!initialized)
2351 return;
2352
2353 for (i=0; i<SI_MAX_DRIVERS; i++) {
2354 cleanup_one_si(smi_infos[i]);
2355 }
2356}
2357module_exit(cleanup_ipmi_si);
2358
2359MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
new file mode 100644
index 000000000000..a0212b004016
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -0,0 +1,120 @@
1/*
2 * ipmi_si_sm.h
3 *
4 * State machine interface for low-level IPMI system management
5 * interface state machines. This code is the interface between
6 * the ipmi_smi code (that handles the policy of a KCS, SMIC, or
7 * BT interface) and the actual low-level state machine.
8 *
9 * Author: MontaVista Software, Inc.
10 * Corey Minyard <minyard@mvista.com>
11 * source@mvista.com
12 *
13 * Copyright 2002 MontaVista Software Inc.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
27 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
29 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
30 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * You should have received a copy of the GNU General Public License along
33 * with this program; if not, write to the Free Software Foundation, Inc.,
34 * 675 Mass Ave, Cambridge, MA 02139, USA.
35 */
36
37/* This is defined by the state machines themselves, it is an opaque
38 data type for them to use. */
39struct si_sm_data;
40
41/* The structure for doing I/O in the state machine. The state
42 machine doesn't have the actual I/O routines, they are done through
43 this interface. */
44struct si_sm_io
45{
46 unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
47 void (*outputb)(struct si_sm_io *io,
48 unsigned int offset,
49 unsigned char b);
50
51 /* Generic info used by the actual handling routines, the
52 state machine shouldn't touch these. */
53 void *info;
54 void *addr;
55 int regspacing;
56 int regsize;
57 int regshift;
58};
59
60/* Results of SMI events. */
61enum si_sm_result
62{
63 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
64 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
65 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
66 SI_SM_IDLE, /* The SM is in idle state. */
67 SI_SM_HOSED, /* The hardware violated the state machine. */
68 SI_SM_ATTN /* The hardware is asserting attn and the
69 state machine is idle. */
70};
71
72/* Handlers for the SMI state machine. */
73struct si_sm_handlers
74{
75 /* Put the version number of the state machine here so the
76 upper layer can print it. */
77 char *version;
78
79 /* Initialize the data and return the amount of I/O space to
80 reserve for the space. */
81 unsigned int (*init_data)(struct si_sm_data *smi,
82 struct si_sm_io *io);
83
84 /* Start a new transaction in the state machine. This will
85 return -2 if the state machine is not idle, -1 if the size
86 is invalid (to large or too small), or 0 if the transaction
87 is successfully completed. */
88 int (*start_transaction)(struct si_sm_data *smi,
89 unsigned char *data, unsigned int size);
90
91 /* Return the results after the transaction. This will return
92 -1 if the buffer is too small, zero if no transaction is
93 present, or the actual length of the result data. */
94 int (*get_result)(struct si_sm_data *smi,
95 unsigned char *data, unsigned int length);
96
97 /* Call this periodically (for a polled interface) or upon
98 receiving an interrupt (for a interrupt-driven interface).
99 If interrupt driven, you should probably poll this
100 periodically when not in idle state. This should be called
101 with the time that passed since the last call, if it is
102 significant. Time is in microseconds. */
103 enum si_sm_result (*event)(struct si_sm_data *smi, long time);
104
105 /* Attempt to detect an SMI. Returns 0 on success or nonzero
106 on failure. */
107 int (*detect)(struct si_sm_data *smi);
108
109 /* The interface is shutting down, so clean it up. */
110 void (*cleanup)(struct si_sm_data *smi);
111
112 /* Return the size of the SMI structure in bytes. */
113 int (*size)(void);
114};
115
116/* Current state machines that we can use. */
117extern struct si_sm_handlers kcs_smi_handlers;
118extern struct si_sm_handlers smic_smi_handlers;
119extern struct si_sm_handlers bt_smi_handlers;
120
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
new file mode 100644
index 000000000000..ae18747e670b
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -0,0 +1,599 @@
1/*
2 * ipmi_smic_sm.c
3 *
4 * The state-machine driver for an IPMI SMIC driver
5 *
6 * It started as a copy of Corey Minyard's driver for the KSC interface
7 * and the kernel patch "mmcdev-patch-245" by HP
8 *
9 * modified by: Hannes Schulz <schulz@schwaar.com>
10 * ipmi@schwaar.com
11 *
12 *
13 * Corey Minyard's driver for the KSC interface has the following
14 * copyright notice:
15 * Copyright 2002 MontaVista Software Inc.
16 *
17 * the kernel patch "mmcdev-patch-245" by HP has the following
18 * copyright notice:
19 * (c) Copyright 2001 Grant Grundler (c) Copyright
20 * 2001 Hewlett-Packard Company
21 *
22 *
23 * This program is free software; you can redistribute it and/or modify it
24 * under the terms of the GNU General Public License as published by the
25 * Free Software Foundation; either version 2 of the License, or (at your
26 * option) any later version.
27 *
28 *
29 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
30 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
34 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
35 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
36 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
37 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * You should have received a copy of the GNU General Public License along
41 * with this program; if not, write to the Free Software Foundation, Inc.,
42 * 675 Mass Ave, Cambridge, MA 02139, USA. */
43
44#include <linux/kernel.h> /* For printk. */
45#include <linux/string.h>
46#include <linux/ipmi_msgdefs.h> /* for completion codes */
47#include "ipmi_si_sm.h"
48
49#define IPMI_SMIC_VERSION "v33"
50
51/* smic_debug is a bit-field
52 * SMIC_DEBUG_ENABLE - turned on for now
53 * SMIC_DEBUG_MSG - commands and their responses
54 * SMIC_DEBUG_STATES - state machine
55*/
56#define SMIC_DEBUG_STATES 4
57#define SMIC_DEBUG_MSG 2
58#define SMIC_DEBUG_ENABLE 1
59
60static int smic_debug = 1;
61
62enum smic_states {
63 SMIC_IDLE,
64 SMIC_START_OP,
65 SMIC_OP_OK,
66 SMIC_WRITE_START,
67 SMIC_WRITE_NEXT,
68 SMIC_WRITE_END,
69 SMIC_WRITE2READ,
70 SMIC_READ_START,
71 SMIC_READ_NEXT,
72 SMIC_READ_END,
73 SMIC_HOSED
74};
75
76#define MAX_SMIC_READ_SIZE 80
77#define MAX_SMIC_WRITE_SIZE 80
78#define SMIC_MAX_ERROR_RETRIES 3
79
80/* Timeouts in microseconds. */
81#define SMIC_RETRY_TIMEOUT 100000
82
83/* SMIC Flags Register Bits */
84#define SMIC_RX_DATA_READY 0x80
85#define SMIC_TX_DATA_READY 0x40
86#define SMIC_SMI 0x10
87#define SMIC_EVM_DATA_AVAIL 0x08
88#define SMIC_SMS_DATA_AVAIL 0x04
89#define SMIC_FLAG_BSY 0x01
90
91/* SMIC Error Codes */
92#define EC_NO_ERROR 0x00
93#define EC_ABORTED 0x01
94#define EC_ILLEGAL_CONTROL 0x02
95#define EC_NO_RESPONSE 0x03
96#define EC_ILLEGAL_COMMAND 0x04
97#define EC_BUFFER_FULL 0x05
98
99struct si_sm_data
100{
101 enum smic_states state;
102 struct si_sm_io *io;
103 unsigned char write_data[MAX_SMIC_WRITE_SIZE];
104 int write_pos;
105 int write_count;
106 int orig_write_count;
107 unsigned char read_data[MAX_SMIC_READ_SIZE];
108 int read_pos;
109 int truncated;
110 unsigned int error_retries;
111 long smic_timeout;
112};
113
114static unsigned int init_smic_data (struct si_sm_data *smic,
115 struct si_sm_io *io)
116{
117 smic->state = SMIC_IDLE;
118 smic->io = io;
119 smic->write_pos = 0;
120 smic->write_count = 0;
121 smic->orig_write_count = 0;
122 smic->read_pos = 0;
123 smic->error_retries = 0;
124 smic->truncated = 0;
125 smic->smic_timeout = SMIC_RETRY_TIMEOUT;
126
127 /* We use 3 bytes of I/O. */
128 return 3;
129}
130
131static int start_smic_transaction(struct si_sm_data *smic,
132 unsigned char *data, unsigned int size)
133{
134 unsigned int i;
135
136 if ((size < 2) || (size > MAX_SMIC_WRITE_SIZE)) {
137 return -1;
138 }
139 if ((smic->state != SMIC_IDLE) && (smic->state != SMIC_HOSED)) {
140 return -2;
141 }
142 if (smic_debug & SMIC_DEBUG_MSG) {
143 printk(KERN_INFO "start_smic_transaction -");
144 for (i = 0; i < size; i ++) {
145 printk (" %02x", (unsigned char) (data [i]));
146 }
147 printk ("\n");
148 }
149 smic->error_retries = 0;
150 memcpy(smic->write_data, data, size);
151 smic->write_count = size;
152 smic->orig_write_count = size;
153 smic->write_pos = 0;
154 smic->read_pos = 0;
155 smic->state = SMIC_START_OP;
156 smic->smic_timeout = SMIC_RETRY_TIMEOUT;
157 return 0;
158}
159
160static int smic_get_result(struct si_sm_data *smic,
161 unsigned char *data, unsigned int length)
162{
163 int i;
164
165 if (smic_debug & SMIC_DEBUG_MSG) {
166 printk (KERN_INFO "smic_get result -");
167 for (i = 0; i < smic->read_pos; i ++) {
168 printk (" %02x", (smic->read_data [i]));
169 }
170 printk ("\n");
171 }
172 if (length < smic->read_pos) {
173 smic->read_pos = length;
174 smic->truncated = 1;
175 }
176 memcpy(data, smic->read_data, smic->read_pos);
177
178 if ((length >= 3) && (smic->read_pos < 3)) {
179 data[2] = IPMI_ERR_UNSPECIFIED;
180 smic->read_pos = 3;
181 }
182 if (smic->truncated) {
183 data[2] = IPMI_ERR_MSG_TRUNCATED;
184 smic->truncated = 0;
185 }
186 return smic->read_pos;
187}
188
189static inline unsigned char read_smic_flags(struct si_sm_data *smic)
190{
191 return smic->io->inputb(smic->io, 2);
192}
193
194static inline unsigned char read_smic_status(struct si_sm_data *smic)
195{
196 return smic->io->inputb(smic->io, 1);
197}
198
199static inline unsigned char read_smic_data(struct si_sm_data *smic)
200{
201 return smic->io->inputb(smic->io, 0);
202}
203
204static inline void write_smic_flags(struct si_sm_data *smic,
205 unsigned char flags)
206{
207 smic->io->outputb(smic->io, 2, flags);
208}
209
210static inline void write_smic_control(struct si_sm_data *smic,
211 unsigned char control)
212{
213 smic->io->outputb(smic->io, 1, control);
214}
215
216static inline void write_si_sm_data (struct si_sm_data *smic,
217 unsigned char data)
218{
219 smic->io->outputb(smic->io, 0, data);
220}
221
222static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
223{
224 (smic->error_retries)++;
225 if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
226 if (smic_debug & SMIC_DEBUG_ENABLE) {
227 printk(KERN_WARNING
228 "ipmi_smic_drv: smic hosed: %s\n", reason);
229 }
230 smic->state = SMIC_HOSED;
231 } else {
232 smic->write_count = smic->orig_write_count;
233 smic->write_pos = 0;
234 smic->read_pos = 0;
235 smic->state = SMIC_START_OP;
236 smic->smic_timeout = SMIC_RETRY_TIMEOUT;
237 }
238}
239
240static inline void write_next_byte(struct si_sm_data *smic)
241{
242 write_si_sm_data(smic, smic->write_data[smic->write_pos]);
243 (smic->write_pos)++;
244 (smic->write_count)--;
245}
246
247static inline void read_next_byte (struct si_sm_data *smic)
248{
249 if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
250 read_smic_data (smic);
251 smic->truncated = 1;
252 } else {
253 smic->read_data[smic->read_pos] = read_smic_data(smic);
254 (smic->read_pos)++;
255 }
256}
257
258/* SMIC Control/Status Code Components */
259#define SMIC_GET_STATUS 0x00 /* Control form's name */
260#define SMIC_READY 0x00 /* Status form's name */
261#define SMIC_WR_START 0x01 /* Unified Control/Status names... */
262#define SMIC_WR_NEXT 0x02
263#define SMIC_WR_END 0x03
264#define SMIC_RD_START 0x04
265#define SMIC_RD_NEXT 0x05
266#define SMIC_RD_END 0x06
267#define SMIC_CODE_MASK 0x0f
268
269#define SMIC_CONTROL 0x00
270#define SMIC_STATUS 0x80
271#define SMIC_CS_MASK 0x80
272
273#define SMIC_SMS 0x40
274#define SMIC_SMM 0x60
275#define SMIC_STREAM_MASK 0x60
276
277/* SMIC Control Codes */
278#define SMIC_CC_SMS_GET_STATUS (SMIC_CONTROL|SMIC_SMS|SMIC_GET_STATUS)
279#define SMIC_CC_SMS_WR_START (SMIC_CONTROL|SMIC_SMS|SMIC_WR_START)
280#define SMIC_CC_SMS_WR_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_WR_NEXT)
281#define SMIC_CC_SMS_WR_END (SMIC_CONTROL|SMIC_SMS|SMIC_WR_END)
282#define SMIC_CC_SMS_RD_START (SMIC_CONTROL|SMIC_SMS|SMIC_RD_START)
283#define SMIC_CC_SMS_RD_NEXT (SMIC_CONTROL|SMIC_SMS|SMIC_RD_NEXT)
284#define SMIC_CC_SMS_RD_END (SMIC_CONTROL|SMIC_SMS|SMIC_RD_END)
285
286#define SMIC_CC_SMM_GET_STATUS (SMIC_CONTROL|SMIC_SMM|SMIC_GET_STATUS)
287#define SMIC_CC_SMM_WR_START (SMIC_CONTROL|SMIC_SMM|SMIC_WR_START)
288#define SMIC_CC_SMM_WR_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_WR_NEXT)
289#define SMIC_CC_SMM_WR_END (SMIC_CONTROL|SMIC_SMM|SMIC_WR_END)
290#define SMIC_CC_SMM_RD_START (SMIC_CONTROL|SMIC_SMM|SMIC_RD_START)
291#define SMIC_CC_SMM_RD_NEXT (SMIC_CONTROL|SMIC_SMM|SMIC_RD_NEXT)
292#define SMIC_CC_SMM_RD_END (SMIC_CONTROL|SMIC_SMM|SMIC_RD_END)
293
294/* SMIC Status Codes */
295#define SMIC_SC_SMS_READY (SMIC_STATUS|SMIC_SMS|SMIC_READY)
296#define SMIC_SC_SMS_WR_START (SMIC_STATUS|SMIC_SMS|SMIC_WR_START)
297#define SMIC_SC_SMS_WR_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_WR_NEXT)
298#define SMIC_SC_SMS_WR_END (SMIC_STATUS|SMIC_SMS|SMIC_WR_END)
299#define SMIC_SC_SMS_RD_START (SMIC_STATUS|SMIC_SMS|SMIC_RD_START)
300#define SMIC_SC_SMS_RD_NEXT (SMIC_STATUS|SMIC_SMS|SMIC_RD_NEXT)
301#define SMIC_SC_SMS_RD_END (SMIC_STATUS|SMIC_SMS|SMIC_RD_END)
302
303#define SMIC_SC_SMM_READY (SMIC_STATUS|SMIC_SMM|SMIC_READY)
304#define SMIC_SC_SMM_WR_START (SMIC_STATUS|SMIC_SMM|SMIC_WR_START)
305#define SMIC_SC_SMM_WR_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_WR_NEXT)
306#define SMIC_SC_SMM_WR_END (SMIC_STATUS|SMIC_SMM|SMIC_WR_END)
307#define SMIC_SC_SMM_RD_START (SMIC_STATUS|SMIC_SMM|SMIC_RD_START)
308#define SMIC_SC_SMM_RD_NEXT (SMIC_STATUS|SMIC_SMM|SMIC_RD_NEXT)
309#define SMIC_SC_SMM_RD_END (SMIC_STATUS|SMIC_SMM|SMIC_RD_END)
310
311/* these are the control/status codes we actually use
312 SMIC_CC_SMS_GET_STATUS 0x40
313 SMIC_CC_SMS_WR_START 0x41
314 SMIC_CC_SMS_WR_NEXT 0x42
315 SMIC_CC_SMS_WR_END 0x43
316 SMIC_CC_SMS_RD_START 0x44
317 SMIC_CC_SMS_RD_NEXT 0x45
318 SMIC_CC_SMS_RD_END 0x46
319
320 SMIC_SC_SMS_READY 0xC0
321 SMIC_SC_SMS_WR_START 0xC1
322 SMIC_SC_SMS_WR_NEXT 0xC2
323 SMIC_SC_SMS_WR_END 0xC3
324 SMIC_SC_SMS_RD_START 0xC4
325 SMIC_SC_SMS_RD_NEXT 0xC5
326 SMIC_SC_SMS_RD_END 0xC6
327*/
328
329static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
330{
331 unsigned char status;
332 unsigned char flags;
333 unsigned char data;
334
335 if (smic->state == SMIC_HOSED) {
336 init_smic_data(smic, smic->io);
337 return SI_SM_HOSED;
338 }
339 if (smic->state != SMIC_IDLE) {
340 if (smic_debug & SMIC_DEBUG_STATES) {
341 printk(KERN_INFO
342 "smic_event - smic->smic_timeout = %ld,"
343 " time = %ld\n",
344 smic->smic_timeout, time);
345 }
346/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */
347 if (time < SMIC_RETRY_TIMEOUT) {
348 smic->smic_timeout -= time;
349 if (smic->smic_timeout < 0) {
350 start_error_recovery(smic, "smic timed out.");
351 return SI_SM_CALL_WITH_DELAY;
352 }
353 }
354 }
355 flags = read_smic_flags(smic);
356 if (flags & SMIC_FLAG_BSY)
357 return SI_SM_CALL_WITH_DELAY;
358
359 status = read_smic_status (smic);
360 if (smic_debug & SMIC_DEBUG_STATES)
361 printk(KERN_INFO
362 "smic_event - state = %d, flags = 0x%02x,"
363 " status = 0x%02x\n",
364 smic->state, flags, status);
365
366 switch (smic->state) {
367 case SMIC_IDLE:
368 /* in IDLE we check for available messages */
369 if (flags & (SMIC_SMI |
370 SMIC_EVM_DATA_AVAIL | SMIC_SMS_DATA_AVAIL))
371 {
372 return SI_SM_ATTN;
373 }
374 return SI_SM_IDLE;
375
376 case SMIC_START_OP:
377 /* sanity check whether smic is really idle */
378 write_smic_control(smic, SMIC_CC_SMS_GET_STATUS);
379 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
380 smic->state = SMIC_OP_OK;
381 break;
382
383 case SMIC_OP_OK:
384 if (status != SMIC_SC_SMS_READY) {
385 /* this should not happen */
386 start_error_recovery(smic,
387 "state = SMIC_OP_OK,"
388 " status != SMIC_SC_SMS_READY");
389 return SI_SM_CALL_WITH_DELAY;
390 }
391 /* OK so far; smic is idle let us start ... */
392 write_smic_control(smic, SMIC_CC_SMS_WR_START);
393 write_next_byte(smic);
394 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
395 smic->state = SMIC_WRITE_START;
396 break;
397
398 case SMIC_WRITE_START:
399 if (status != SMIC_SC_SMS_WR_START) {
400 start_error_recovery(smic,
401 "state = SMIC_WRITE_START, "
402 "status != SMIC_SC_SMS_WR_START");
403 return SI_SM_CALL_WITH_DELAY;
404 }
405 /* we must not issue WR_(NEXT|END) unless
406 TX_DATA_READY is set */
407 if (flags & SMIC_TX_DATA_READY) {
408 if (smic->write_count == 1) {
409 /* last byte */
410 write_smic_control(smic, SMIC_CC_SMS_WR_END);
411 smic->state = SMIC_WRITE_END;
412 } else {
413 write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
414 smic->state = SMIC_WRITE_NEXT;
415 }
416 write_next_byte(smic);
417 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
418 }
419 else {
420 return SI_SM_CALL_WITH_DELAY;
421 }
422 break;
423
424 case SMIC_WRITE_NEXT:
425 if (status != SMIC_SC_SMS_WR_NEXT) {
426 start_error_recovery(smic,
427 "state = SMIC_WRITE_NEXT, "
428 "status != SMIC_SC_SMS_WR_NEXT");
429 return SI_SM_CALL_WITH_DELAY;
430 }
431 /* this is the same code as in SMIC_WRITE_START */
432 if (flags & SMIC_TX_DATA_READY) {
433 if (smic->write_count == 1) {
434 write_smic_control(smic, SMIC_CC_SMS_WR_END);
435 smic->state = SMIC_WRITE_END;
436 }
437 else {
438 write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
439 smic->state = SMIC_WRITE_NEXT;
440 }
441 write_next_byte(smic);
442 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
443 }
444 else {
445 return SI_SM_CALL_WITH_DELAY;
446 }
447 break;
448
449 case SMIC_WRITE_END:
450 if (status != SMIC_SC_SMS_WR_END) {
451 start_error_recovery (smic,
452 "state = SMIC_WRITE_END, "
453 "status != SMIC_SC_SMS_WR_END");
454 return SI_SM_CALL_WITH_DELAY;
455 }
456 /* data register holds an error code */
457 data = read_smic_data(smic);
458 if (data != 0) {
459 if (smic_debug & SMIC_DEBUG_ENABLE) {
460 printk(KERN_INFO
461 "SMIC_WRITE_END: data = %02x\n", data);
462 }
463 start_error_recovery(smic,
464 "state = SMIC_WRITE_END, "
465 "data != SUCCESS");
466 return SI_SM_CALL_WITH_DELAY;
467 } else {
468 smic->state = SMIC_WRITE2READ;
469 }
470 break;
471
472 case SMIC_WRITE2READ:
473 /* we must wait for RX_DATA_READY to be set before we
474 can continue */
475 if (flags & SMIC_RX_DATA_READY) {
476 write_smic_control(smic, SMIC_CC_SMS_RD_START);
477 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
478 smic->state = SMIC_READ_START;
479 } else {
480 return SI_SM_CALL_WITH_DELAY;
481 }
482 break;
483
484 case SMIC_READ_START:
485 if (status != SMIC_SC_SMS_RD_START) {
486 start_error_recovery(smic,
487 "state = SMIC_READ_START, "
488 "status != SMIC_SC_SMS_RD_START");
489 return SI_SM_CALL_WITH_DELAY;
490 }
491 if (flags & SMIC_RX_DATA_READY) {
492 read_next_byte(smic);
493 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
494 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
495 smic->state = SMIC_READ_NEXT;
496 } else {
497 return SI_SM_CALL_WITH_DELAY;
498 }
499 break;
500
501 case SMIC_READ_NEXT:
502 switch (status) {
503 /* smic tells us that this is the last byte to be read
504 --> clean up */
505 case SMIC_SC_SMS_RD_END:
506 read_next_byte(smic);
507 write_smic_control(smic, SMIC_CC_SMS_RD_END);
508 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
509 smic->state = SMIC_READ_END;
510 break;
511 case SMIC_SC_SMS_RD_NEXT:
512 if (flags & SMIC_RX_DATA_READY) {
513 read_next_byte(smic);
514 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
515 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
516 smic->state = SMIC_READ_NEXT;
517 } else {
518 return SI_SM_CALL_WITH_DELAY;
519 }
520 break;
521 default:
522 start_error_recovery(
523 smic,
524 "state = SMIC_READ_NEXT, "
525 "status != SMIC_SC_SMS_RD_(NEXT|END)");
526 return SI_SM_CALL_WITH_DELAY;
527 }
528 break;
529
530 case SMIC_READ_END:
531 if (status != SMIC_SC_SMS_READY) {
532 start_error_recovery(smic,
533 "state = SMIC_READ_END, "
534 "status != SMIC_SC_SMS_READY");
535 return SI_SM_CALL_WITH_DELAY;
536 }
537 data = read_smic_data(smic);
538 /* data register holds an error code */
539 if (data != 0) {
540 if (smic_debug & SMIC_DEBUG_ENABLE) {
541 printk(KERN_INFO
542 "SMIC_READ_END: data = %02x\n", data);
543 }
544 start_error_recovery(smic,
545 "state = SMIC_READ_END, "
546 "data != SUCCESS");
547 return SI_SM_CALL_WITH_DELAY;
548 } else {
549 smic->state = SMIC_IDLE;
550 return SI_SM_TRANSACTION_COMPLETE;
551 }
552
553 case SMIC_HOSED:
554 init_smic_data(smic, smic->io);
555 return SI_SM_HOSED;
556
557 default:
558 if (smic_debug & SMIC_DEBUG_ENABLE) {
559 printk(KERN_WARNING "smic->state = %d\n", smic->state);
560 start_error_recovery(smic, "state = UNKNOWN");
561 return SI_SM_CALL_WITH_DELAY;
562 }
563 }
564 smic->smic_timeout = SMIC_RETRY_TIMEOUT;
565 return SI_SM_CALL_WITHOUT_DELAY;
566}
567
568static int smic_detect(struct si_sm_data *smic)
569{
570 /* It's impossible for the SMIC fnags register to be all 1's,
571 (assuming a properly functioning, self-initialized BMC)
572 but that's what you get from reading a bogus address, so we
573 test that first. */
574 if (read_smic_flags(smic) == 0xff)
575 return 1;
576
577 return 0;
578}
579
580static void smic_cleanup(struct si_sm_data *kcs)
581{
582}
583
584static int smic_size(void)
585{
586 return sizeof(struct si_sm_data);
587}
588
589struct si_sm_handlers smic_smi_handlers =
590{
591 .version = IPMI_SMIC_VERSION,
592 .init_data = init_smic_data,
593 .start_transaction = start_smic_transaction,
594 .get_result = smic_get_result,
595 .event = smic_event,
596 .detect = smic_detect,
597 .cleanup = smic_cleanup,
598 .size = smic_size,
599};
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
new file mode 100644
index 000000000000..fd7093879c66
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -0,0 +1,1068 @@
1/*
2 * ipmi_watchdog.c
3 *
4 * A watchdog timer based upon the IPMI interface.
5 *
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
8 * source@mvista.com
9 *
10 * Copyright 2002 MontaVista Software Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 *
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
32 */
33
34#include <linux/config.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/ipmi.h>
38#include <linux/ipmi_smi.h>
39#include <linux/watchdog.h>
40#include <linux/miscdevice.h>
41#include <linux/init.h>
42#include <linux/rwsem.h>
43#include <linux/errno.h>
44#include <asm/uaccess.h>
45#include <linux/notifier.h>
46#include <linux/nmi.h>
47#include <linux/reboot.h>
48#include <linux/wait.h>
49#include <linux/poll.h>
50#ifdef CONFIG_X86_LOCAL_APIC
51#include <asm/apic.h>
52#endif
53
54#define PFX "IPMI Watchdog: "
55
56#define IPMI_WATCHDOG_VERSION "v33"
57
58/*
59 * The IPMI command/response information for the watchdog timer.
60 */
61
62/* values for byte 1 of the set command, byte 2 of the get response. */
63#define WDOG_DONT_LOG (1 << 7)
64#define WDOG_DONT_STOP_ON_SET (1 << 6)
65#define WDOG_SET_TIMER_USE(byte, use) \
66 byte = ((byte) & 0xf8) | ((use) & 0x7)
67#define WDOG_GET_TIMER_USE(byte) ((byte) & 0x7)
68#define WDOG_TIMER_USE_BIOS_FRB2 1
69#define WDOG_TIMER_USE_BIOS_POST 2
70#define WDOG_TIMER_USE_OS_LOAD 3
71#define WDOG_TIMER_USE_SMS_OS 4
72#define WDOG_TIMER_USE_OEM 5
73
74/* values for byte 2 of the set command, byte 3 of the get response. */
75#define WDOG_SET_PRETIMEOUT_ACT(byte, use) \
76 byte = ((byte) & 0x8f) | (((use) & 0x7) << 4)
77#define WDOG_GET_PRETIMEOUT_ACT(byte) (((byte) >> 4) & 0x7)
78#define WDOG_PRETIMEOUT_NONE 0
79#define WDOG_PRETIMEOUT_SMI 1
80#define WDOG_PRETIMEOUT_NMI 2
81#define WDOG_PRETIMEOUT_MSG_INT 3
82
83/* Operations that can be performed on a pretimout. */
84#define WDOG_PREOP_NONE 0
85#define WDOG_PREOP_PANIC 1
86#define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to
87 read. Doesn't work in NMI
88 mode. */
89
90/* Actions to perform on a full timeout. */
91#define WDOG_SET_TIMEOUT_ACT(byte, use) \
92 byte = ((byte) & 0xf8) | ((use) & 0x7)
93#define WDOG_GET_TIMEOUT_ACT(byte) ((byte) & 0x7)
94#define WDOG_TIMEOUT_NONE 0
95#define WDOG_TIMEOUT_RESET 1
96#define WDOG_TIMEOUT_POWER_DOWN 2
97#define WDOG_TIMEOUT_POWER_CYCLE 3
98
99/* Byte 3 of the get command, byte 4 of the get response is the
100 pre-timeout in seconds. */
101
102/* Bits for setting byte 4 of the set command, byte 5 of the get response. */
103#define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1)
104#define WDOG_EXPIRE_CLEAR_BIOS_POST (1 << 2)
105#define WDOG_EXPIRE_CLEAR_OS_LOAD (1 << 3)
106#define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4)
107#define WDOG_EXPIRE_CLEAR_OEM (1 << 5)
108
109/* Setting/getting the watchdog timer value. This is for bytes 5 and
110 6 (the timeout time) of the set command, and bytes 6 and 7 (the
111 timeout time) and 8 and 9 (the current countdown value) of the
112 response. The timeout value is given in seconds (in the command it
113 is 100ms intervals). */
114#define WDOG_SET_TIMEOUT(byte1, byte2, val) \
115 (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
116#define WDOG_GET_TIMEOUT(byte1, byte2) \
117 (((byte1) | ((byte2) << 8)) / 10)
118
119#define IPMI_WDOG_RESET_TIMER 0x22
120#define IPMI_WDOG_SET_TIMER 0x24
121#define IPMI_WDOG_GET_TIMER 0x25
122
123/* These are here until the real ones get into the watchdog.h interface. */
124#ifndef WDIOC_GETTIMEOUT
125#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
126#endif
127#ifndef WDIOC_SET_PRETIMEOUT
128#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
129#endif
130#ifndef WDIOC_GET_PRETIMEOUT
131#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
132#endif
133
134#ifdef CONFIG_WATCHDOG_NOWAYOUT
135static int nowayout = 1;
136#else
137static int nowayout;
138#endif
139
140static ipmi_user_t watchdog_user = NULL;
141
142/* Default the timeout to 10 seconds. */
143static int timeout = 10;
144
145/* The pre-timeout is disabled by default. */
146static int pretimeout = 0;
147
148/* Default action is to reset the board on a timeout. */
149static unsigned char action_val = WDOG_TIMEOUT_RESET;
150
151static char action[16] = "reset";
152
153static unsigned char preaction_val = WDOG_PRETIMEOUT_NONE;
154
155static char preaction[16] = "pre_none";
156
157static unsigned char preop_val = WDOG_PREOP_NONE;
158
159static char preop[16] = "preop_none";
160static DEFINE_SPINLOCK(ipmi_read_lock);
161static char data_to_read = 0;
162static DECLARE_WAIT_QUEUE_HEAD(read_q);
163static struct fasync_struct *fasync_q = NULL;
164static char pretimeout_since_last_heartbeat = 0;
165static char expect_close;
166
167/* If true, the driver will start running as soon as it is configured
168 and ready. */
169static int start_now = 0;
170
171module_param(timeout, int, 0);
172MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
173module_param(pretimeout, int, 0);
174MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
175module_param_string(action, action, sizeof(action), 0);
176MODULE_PARM_DESC(action, "Timeout action. One of: "
177 "reset, none, power_cycle, power_off.");
178module_param_string(preaction, preaction, sizeof(preaction), 0);
179MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
180 "pre_none, pre_smi, pre_nmi, pre_int.");
181module_param_string(preop, preop, sizeof(preop), 0);
182MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
183 "preop_none, preop_panic, preop_give_data.");
184module_param(start_now, int, 0);
185MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
186 "soon as the driver is loaded.");
187module_param(nowayout, int, 0);
188MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
189
190/* Default state of the timer. */
191static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
192
193/* If shutting down via IPMI, we ignore the heartbeat. */
194static int ipmi_ignore_heartbeat = 0;
195
196/* Is someone using the watchdog? Only one user is allowed. */
197static unsigned long ipmi_wdog_open = 0;
198
199/* If set to 1, the heartbeat command will set the state to reset and
200 start the timer. The timer doesn't normally run when the driver is
201 first opened until the heartbeat is set the first time, this
202 variable is used to accomplish this. */
203static int ipmi_start_timer_on_heartbeat = 0;
204
205/* IPMI version of the BMC. */
206static unsigned char ipmi_version_major;
207static unsigned char ipmi_version_minor;
208
209
210static int ipmi_heartbeat(void);
211static void panic_halt_ipmi_heartbeat(void);
212
213
214/* We use a semaphore to make sure that only one thing can send a set
215 timeout at one time, because we only have one copy of the data.
216 The semaphore is claimed when the set_timeout is sent and freed
217 when both messages are free. */
218static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
219static DECLARE_MUTEX(set_timeout_lock);
220static void set_timeout_free_smi(struct ipmi_smi_msg *msg)
221{
222 if (atomic_dec_and_test(&set_timeout_tofree))
223 up(&set_timeout_lock);
224}
225static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
226{
227 if (atomic_dec_and_test(&set_timeout_tofree))
228 up(&set_timeout_lock);
229}
230static struct ipmi_smi_msg set_timeout_smi_msg =
231{
232 .done = set_timeout_free_smi
233};
234static struct ipmi_recv_msg set_timeout_recv_msg =
235{
236 .done = set_timeout_free_recv
237};
238
239static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
240 struct ipmi_recv_msg *recv_msg,
241 int *send_heartbeat_now)
242{
243 struct kernel_ipmi_msg msg;
244 unsigned char data[6];
245 int rv;
246 struct ipmi_system_interface_addr addr;
247 int hbnow = 0;
248
249
250 data[0] = 0;
251 WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
252
253 if ((ipmi_version_major > 1)
254 || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5)))
255 {
256 /* This is an IPMI 1.5-only feature. */
257 data[0] |= WDOG_DONT_STOP_ON_SET;
258 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
259 /* In ipmi 1.0, setting the timer stops the watchdog, we
260 need to start it back up again. */
261 hbnow = 1;
262 }
263
264 data[1] = 0;
265 WDOG_SET_TIMEOUT_ACT(data[1], ipmi_watchdog_state);
266 if (pretimeout > 0) {
267 WDOG_SET_PRETIMEOUT_ACT(data[1], preaction_val);
268 data[2] = pretimeout;
269 } else {
270 WDOG_SET_PRETIMEOUT_ACT(data[1], WDOG_PRETIMEOUT_NONE);
271 data[2] = 0; /* No pretimeout. */
272 }
273 data[3] = 0;
274 WDOG_SET_TIMEOUT(data[4], data[5], timeout);
275
276 addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
277 addr.channel = IPMI_BMC_CHANNEL;
278 addr.lun = 0;
279
280 msg.netfn = 0x06;
281 msg.cmd = IPMI_WDOG_SET_TIMER;
282 msg.data = data;
283 msg.data_len = sizeof(data);
284 rv = ipmi_request_supply_msgs(watchdog_user,
285 (struct ipmi_addr *) &addr,
286 0,
287 &msg,
288 NULL,
289 smi_msg,
290 recv_msg,
291 1);
292 if (rv) {
293 printk(KERN_WARNING PFX "set timeout error: %d\n",
294 rv);
295 }
296
297 if (send_heartbeat_now)
298 *send_heartbeat_now = hbnow;
299
300 return rv;
301}
302
303/* Parameters to ipmi_set_timeout */
304#define IPMI_SET_TIMEOUT_NO_HB 0
305#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1
306#define IPMI_SET_TIMEOUT_FORCE_HB 2
307
308static int ipmi_set_timeout(int do_heartbeat)
309{
310 int send_heartbeat_now;
311 int rv;
312
313
314 /* We can only send one of these at a time. */
315 down(&set_timeout_lock);
316
317 atomic_set(&set_timeout_tofree, 2);
318
319 rv = i_ipmi_set_timeout(&set_timeout_smi_msg,
320 &set_timeout_recv_msg,
321 &send_heartbeat_now);
322 if (rv) {
323 up(&set_timeout_lock);
324 } else {
325 if ((do_heartbeat == IPMI_SET_TIMEOUT_FORCE_HB)
326 || ((send_heartbeat_now)
327 && (do_heartbeat == IPMI_SET_TIMEOUT_HB_IF_NECESSARY)))
328 {
329 rv = ipmi_heartbeat();
330 }
331 }
332
333 return rv;
334}
335
336static void dummy_smi_free(struct ipmi_smi_msg *msg)
337{
338}
339static void dummy_recv_free(struct ipmi_recv_msg *msg)
340{
341}
342static struct ipmi_smi_msg panic_halt_smi_msg =
343{
344 .done = dummy_smi_free
345};
346static struct ipmi_recv_msg panic_halt_recv_msg =
347{
348 .done = dummy_recv_free
349};
350
351/* Special call, doesn't claim any locks. This is only to be called
352 at panic or halt time, in run-to-completion mode, when the caller
353 is the only CPU and the only thing that will be going is these IPMI
354 calls. */
355static void panic_halt_ipmi_set_timeout(void)
356{
357 int send_heartbeat_now;
358 int rv;
359
360 rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
361 &panic_halt_recv_msg,
362 &send_heartbeat_now);
363 if (!rv) {
364 if (send_heartbeat_now)
365 panic_halt_ipmi_heartbeat();
366 }
367}
368
369/* We use a semaphore to make sure that only one thing can send a
370 heartbeat at one time, because we only have one copy of the data.
371 The semaphore is claimed when the set_timeout is sent and freed
372 when both messages are free. */
373static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
374static DECLARE_MUTEX(heartbeat_lock);
375static DECLARE_MUTEX_LOCKED(heartbeat_wait_lock);
376static void heartbeat_free_smi(struct ipmi_smi_msg *msg)
377{
378 if (atomic_dec_and_test(&heartbeat_tofree))
379 up(&heartbeat_wait_lock);
380}
381static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
382{
383 if (atomic_dec_and_test(&heartbeat_tofree))
384 up(&heartbeat_wait_lock);
385}
386static struct ipmi_smi_msg heartbeat_smi_msg =
387{
388 .done = heartbeat_free_smi
389};
390static struct ipmi_recv_msg heartbeat_recv_msg =
391{
392 .done = heartbeat_free_recv
393};
394
395static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
396{
397 .done = dummy_smi_free
398};
399static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
400{
401 .done = dummy_recv_free
402};
403
404static int ipmi_heartbeat(void)
405{
406 struct kernel_ipmi_msg msg;
407 int rv;
408 struct ipmi_system_interface_addr addr;
409
410 if (ipmi_ignore_heartbeat) {
411 return 0;
412 }
413
414 if (ipmi_start_timer_on_heartbeat) {
415 ipmi_start_timer_on_heartbeat = 0;
416 ipmi_watchdog_state = action_val;
417 return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
418 } else if (pretimeout_since_last_heartbeat) {
419 /* A pretimeout occurred, make sure we set the timeout.
420 We don't want to set the action, though, we want to
421 leave that alone (thus it can't be combined with the
422 above operation. */
423 pretimeout_since_last_heartbeat = 0;
424 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
425 }
426
427 down(&heartbeat_lock);
428
429 atomic_set(&heartbeat_tofree, 2);
430
431 /* Don't reset the timer if we have the timer turned off, that
432 re-enables the watchdog. */
433 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
434 up(&heartbeat_lock);
435 return 0;
436 }
437
438 addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
439 addr.channel = IPMI_BMC_CHANNEL;
440 addr.lun = 0;
441
442 msg.netfn = 0x06;
443 msg.cmd = IPMI_WDOG_RESET_TIMER;
444 msg.data = NULL;
445 msg.data_len = 0;
446 rv = ipmi_request_supply_msgs(watchdog_user,
447 (struct ipmi_addr *) &addr,
448 0,
449 &msg,
450 NULL,
451 &heartbeat_smi_msg,
452 &heartbeat_recv_msg,
453 1);
454 if (rv) {
455 up(&heartbeat_lock);
456 printk(KERN_WARNING PFX "heartbeat failure: %d\n",
457 rv);
458 return rv;
459 }
460
461 /* Wait for the heartbeat to be sent. */
462 down(&heartbeat_wait_lock);
463
464 if (heartbeat_recv_msg.msg.data[0] != 0) {
465 /* Got an error in the heartbeat response. It was already
466 reported in ipmi_wdog_msg_handler, but we should return
467 an error here. */
468 rv = -EINVAL;
469 }
470
471 up(&heartbeat_lock);
472
473 return rv;
474}
475
476static void panic_halt_ipmi_heartbeat(void)
477{
478 struct kernel_ipmi_msg msg;
479 struct ipmi_system_interface_addr addr;
480
481
482 /* Don't reset the timer if we have the timer turned off, that
483 re-enables the watchdog. */
484 if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
485 return;
486
487 addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
488 addr.channel = IPMI_BMC_CHANNEL;
489 addr.lun = 0;
490
491 msg.netfn = 0x06;
492 msg.cmd = IPMI_WDOG_RESET_TIMER;
493 msg.data = NULL;
494 msg.data_len = 0;
495 ipmi_request_supply_msgs(watchdog_user,
496 (struct ipmi_addr *) &addr,
497 0,
498 &msg,
499 NULL,
500 &panic_halt_heartbeat_smi_msg,
501 &panic_halt_heartbeat_recv_msg,
502 1);
503}
504
505static struct watchdog_info ident=
506{
507 .options = 0, /* WDIOF_SETTIMEOUT, */
508 .firmware_version = 1,
509 .identity = "IPMI"
510};
511
512static int ipmi_ioctl(struct inode *inode, struct file *file,
513 unsigned int cmd, unsigned long arg)
514{
515 void __user *argp = (void __user *)arg;
516 int i;
517 int val;
518
519 switch(cmd) {
520 case WDIOC_GETSUPPORT:
521 i = copy_to_user(argp, &ident, sizeof(ident));
522 return i ? -EFAULT : 0;
523
524 case WDIOC_SETTIMEOUT:
525 i = copy_from_user(&val, argp, sizeof(int));
526 if (i)
527 return -EFAULT;
528 timeout = val;
529 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
530
531 case WDIOC_GETTIMEOUT:
532 i = copy_to_user(argp, &timeout, sizeof(timeout));
533 if (i)
534 return -EFAULT;
535 return 0;
536
537 case WDIOC_SET_PRETIMEOUT:
538 i = copy_from_user(&val, argp, sizeof(int));
539 if (i)
540 return -EFAULT;
541 pretimeout = val;
542 return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
543
544 case WDIOC_GET_PRETIMEOUT:
545 i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
546 if (i)
547 return -EFAULT;
548 return 0;
549
550 case WDIOC_KEEPALIVE:
551 return ipmi_heartbeat();
552
553 case WDIOC_SETOPTIONS:
554 i = copy_from_user(&val, argp, sizeof(int));
555 if (i)
556 return -EFAULT;
557 if (val & WDIOS_DISABLECARD)
558 {
559 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
560 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
561 ipmi_start_timer_on_heartbeat = 0;
562 }
563
564 if (val & WDIOS_ENABLECARD)
565 {
566 ipmi_watchdog_state = action_val;
567 ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
568 }
569 return 0;
570
571 case WDIOC_GETSTATUS:
572 val = 0;
573 i = copy_to_user(argp, &val, sizeof(val));
574 if (i)
575 return -EFAULT;
576 return 0;
577
578 default:
579 return -ENOIOCTLCMD;
580 }
581}
582
583static ssize_t ipmi_write(struct file *file,
584 const char __user *buf,
585 size_t len,
586 loff_t *ppos)
587{
588 int rv;
589
590 if (len) {
591 if (!nowayout) {
592 size_t i;
593
594 /* In case it was set long ago */
595 expect_close = 0;
596
597 for (i = 0; i != len; i++) {
598 char c;
599
600 if (get_user(c, buf + i))
601 return -EFAULT;
602 if (c == 'V')
603 expect_close = 42;
604 }
605 }
606 rv = ipmi_heartbeat();
607 if (rv)
608 return rv;
609 return 1;
610 }
611 return 0;
612}
613
614static ssize_t ipmi_read(struct file *file,
615 char __user *buf,
616 size_t count,
617 loff_t *ppos)
618{
619 int rv = 0;
620 wait_queue_t wait;
621
622 if (count <= 0)
623 return 0;
624
625 /* Reading returns if the pretimeout has gone off, and it only does
626 it once per pretimeout. */
627 spin_lock(&ipmi_read_lock);
628 if (!data_to_read) {
629 if (file->f_flags & O_NONBLOCK) {
630 rv = -EAGAIN;
631 goto out;
632 }
633
634 init_waitqueue_entry(&wait, current);
635 add_wait_queue(&read_q, &wait);
636 while (!data_to_read) {
637 set_current_state(TASK_INTERRUPTIBLE);
638 spin_unlock(&ipmi_read_lock);
639 schedule();
640 spin_lock(&ipmi_read_lock);
641 }
642 remove_wait_queue(&read_q, &wait);
643
644 if (signal_pending(current)) {
645 rv = -ERESTARTSYS;
646 goto out;
647 }
648 }
649 data_to_read = 0;
650
651 out:
652 spin_unlock(&ipmi_read_lock);
653
654 if (rv == 0) {
655 if (copy_to_user(buf, &data_to_read, 1))
656 rv = -EFAULT;
657 else
658 rv = 1;
659 }
660
661 return rv;
662}
663
664static int ipmi_open(struct inode *ino, struct file *filep)
665{
666 switch (iminor(ino))
667 {
668 case WATCHDOG_MINOR:
669 if(test_and_set_bit(0, &ipmi_wdog_open))
670 return -EBUSY;
671
672 /* Don't start the timer now, let it start on the
673 first heartbeat. */
674 ipmi_start_timer_on_heartbeat = 1;
675 return nonseekable_open(ino, filep);
676
677 default:
678 return (-ENODEV);
679 }
680}
681
682static unsigned int ipmi_poll(struct file *file, poll_table *wait)
683{
684 unsigned int mask = 0;
685
686 poll_wait(file, &read_q, wait);
687
688 spin_lock(&ipmi_read_lock);
689 if (data_to_read)
690 mask |= (POLLIN | POLLRDNORM);
691 spin_unlock(&ipmi_read_lock);
692
693 return mask;
694}
695
696static int ipmi_fasync(int fd, struct file *file, int on)
697{
698 int result;
699
700 result = fasync_helper(fd, file, on, &fasync_q);
701
702 return (result);
703}
704
705static int ipmi_close(struct inode *ino, struct file *filep)
706{
707 if (iminor(ino)==WATCHDOG_MINOR)
708 {
709 if (expect_close == 42) {
710 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
711 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
712 clear_bit(0, &ipmi_wdog_open);
713 } else {
714 printk(KERN_CRIT PFX "Unexpected close, not stopping watchdog!\n");
715 ipmi_heartbeat();
716 }
717 }
718
719 ipmi_fasync (-1, filep, 0);
720 expect_close = 0;
721
722 return 0;
723}
724
725static struct file_operations ipmi_wdog_fops = {
726 .owner = THIS_MODULE,
727 .read = ipmi_read,
728 .poll = ipmi_poll,
729 .write = ipmi_write,
730 .ioctl = ipmi_ioctl,
731 .open = ipmi_open,
732 .release = ipmi_close,
733 .fasync = ipmi_fasync,
734};
735
736static struct miscdevice ipmi_wdog_miscdev = {
737 .minor = WATCHDOG_MINOR,
738 .name = "watchdog",
739 .fops = &ipmi_wdog_fops
740};
741
742static DECLARE_RWSEM(register_sem);
743
744static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
745 void *handler_data)
746{
747 if (msg->msg.data[0] != 0) {
748 printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
749 msg->msg.data[0],
750 msg->msg.cmd);
751 }
752
753 ipmi_free_recv_msg(msg);
754}
755
756static void ipmi_wdog_pretimeout_handler(void *handler_data)
757{
758 if (preaction_val != WDOG_PRETIMEOUT_NONE) {
759 if (preop_val == WDOG_PREOP_PANIC)
760 panic("Watchdog pre-timeout");
761 else if (preop_val == WDOG_PREOP_GIVE_DATA) {
762 spin_lock(&ipmi_read_lock);
763 data_to_read = 1;
764 wake_up_interruptible(&read_q);
765 kill_fasync(&fasync_q, SIGIO, POLL_IN);
766
767 spin_unlock(&ipmi_read_lock);
768 }
769 }
770
771 /* On some machines, the heartbeat will give
772 an error and not work unless we re-enable
773 the timer. So do so. */
774 pretimeout_since_last_heartbeat = 1;
775}
776
777static struct ipmi_user_hndl ipmi_hndlrs =
778{
779 .ipmi_recv_hndl = ipmi_wdog_msg_handler,
780 .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
781};
782
783static void ipmi_register_watchdog(int ipmi_intf)
784{
785 int rv = -EBUSY;
786
787 down_write(&register_sem);
788 if (watchdog_user)
789 goto out;
790
791 rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
792 if (rv < 0) {
793 printk(KERN_CRIT PFX "Unable to register with ipmi\n");
794 goto out;
795 }
796
797 ipmi_get_version(watchdog_user,
798 &ipmi_version_major,
799 &ipmi_version_minor);
800
801 rv = misc_register(&ipmi_wdog_miscdev);
802 if (rv < 0) {
803 ipmi_destroy_user(watchdog_user);
804 watchdog_user = NULL;
805 printk(KERN_CRIT PFX "Unable to register misc device\n");
806 }
807
808 out:
809 up_write(&register_sem);
810
811 if ((start_now) && (rv == 0)) {
812 /* Run from startup, so start the timer now. */
813 start_now = 0; /* Disable this function after first startup. */
814 ipmi_watchdog_state = action_val;
815 ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
816 printk(KERN_INFO PFX "Starting now!\n");
817 }
818}
819
820#ifdef HAVE_NMI_HANDLER
821static int
822ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled)
823{
824 /* If no one else handled the NMI, we assume it was the IPMI
825 watchdog. */
826 if ((!handled) && (preop_val == WDOG_PREOP_PANIC))
827 panic(PFX "pre-timeout");
828
829 /* On some machines, the heartbeat will give
830 an error and not work unless we re-enable
831 the timer. So do so. */
832 pretimeout_since_last_heartbeat = 1;
833
834 return NOTIFY_DONE;
835}
836
837static struct nmi_handler ipmi_nmi_handler =
838{
839 .link = LIST_HEAD_INIT(ipmi_nmi_handler.link),
840 .dev_name = "ipmi_watchdog",
841 .dev_id = NULL,
842 .handler = ipmi_nmi,
843 .priority = 0, /* Call us last. */
844};
845#endif
846
847static int wdog_reboot_handler(struct notifier_block *this,
848 unsigned long code,
849 void *unused)
850{
851 static int reboot_event_handled = 0;
852
853 if ((watchdog_user) && (!reboot_event_handled)) {
854 /* Make sure we only do this once. */
855 reboot_event_handled = 1;
856
857 if (code == SYS_DOWN || code == SYS_HALT) {
858 /* Disable the WDT if we are shutting down. */
859 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
860 panic_halt_ipmi_set_timeout();
861 } else {
862 /* Set a long timer to let the reboot happens, but
863 reboot if it hangs. */
864 timeout = 120;
865 pretimeout = 0;
866 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
867 panic_halt_ipmi_set_timeout();
868 }
869 }
870 return NOTIFY_OK;
871}
872
873static struct notifier_block wdog_reboot_notifier = {
874 .notifier_call = wdog_reboot_handler,
875 .next = NULL,
876 .priority = 0
877};
878
879static int wdog_panic_handler(struct notifier_block *this,
880 unsigned long event,
881 void *unused)
882{
883 static int panic_event_handled = 0;
884
885 /* On a panic, if we have a panic timeout, make sure that the thing
886 reboots, even if it hangs during that panic. */
887 if (watchdog_user && !panic_event_handled) {
888 /* Make sure the panic doesn't hang, and make sure we
889 do this only once. */
890 panic_event_handled = 1;
891
892 timeout = 255;
893 pretimeout = 0;
894 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
895 panic_halt_ipmi_set_timeout();
896 }
897
898 return NOTIFY_OK;
899}
900
901static struct notifier_block wdog_panic_notifier = {
902 .notifier_call = wdog_panic_handler,
903 .next = NULL,
904 .priority = 150 /* priority: INT_MAX >= x >= 0 */
905};
906
907
908static void ipmi_new_smi(int if_num)
909{
910 ipmi_register_watchdog(if_num);
911}
912
913static void ipmi_smi_gone(int if_num)
914{
915 /* This can never be called, because once the watchdog is
916 registered, the interface can't go away until the watchdog
917 is unregistered. */
918}
919
920static struct ipmi_smi_watcher smi_watcher =
921{
922 .owner = THIS_MODULE,
923 .new_smi = ipmi_new_smi,
924 .smi_gone = ipmi_smi_gone
925};
926
927static int __init ipmi_wdog_init(void)
928{
929 int rv;
930
931 printk(KERN_INFO PFX "driver version "
932 IPMI_WATCHDOG_VERSION "\n");
933
934 if (strcmp(action, "reset") == 0) {
935 action_val = WDOG_TIMEOUT_RESET;
936 } else if (strcmp(action, "none") == 0) {
937 action_val = WDOG_TIMEOUT_NONE;
938 } else if (strcmp(action, "power_cycle") == 0) {
939 action_val = WDOG_TIMEOUT_POWER_CYCLE;
940 } else if (strcmp(action, "power_off") == 0) {
941 action_val = WDOG_TIMEOUT_POWER_DOWN;
942 } else {
943 action_val = WDOG_TIMEOUT_RESET;
944 printk(KERN_INFO PFX "Unknown action '%s', defaulting to"
945 " reset\n", action);
946 }
947
948 if (strcmp(preaction, "pre_none") == 0) {
949 preaction_val = WDOG_PRETIMEOUT_NONE;
950 } else if (strcmp(preaction, "pre_smi") == 0) {
951 preaction_val = WDOG_PRETIMEOUT_SMI;
952#ifdef HAVE_NMI_HANDLER
953 } else if (strcmp(preaction, "pre_nmi") == 0) {
954 preaction_val = WDOG_PRETIMEOUT_NMI;
955#endif
956 } else if (strcmp(preaction, "pre_int") == 0) {
957 preaction_val = WDOG_PRETIMEOUT_MSG_INT;
958 } else {
959 preaction_val = WDOG_PRETIMEOUT_NONE;
960 printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to"
961 " none\n", preaction);
962 }
963
964 if (strcmp(preop, "preop_none") == 0) {
965 preop_val = WDOG_PREOP_NONE;
966 } else if (strcmp(preop, "preop_panic") == 0) {
967 preop_val = WDOG_PREOP_PANIC;
968 } else if (strcmp(preop, "preop_give_data") == 0) {
969 preop_val = WDOG_PREOP_GIVE_DATA;
970 } else {
971 preop_val = WDOG_PREOP_NONE;
972 printk(KERN_INFO PFX "Unknown preop '%s', defaulting to"
973 " none\n", preop);
974 }
975
976#ifdef HAVE_NMI_HANDLER
977 if (preaction_val == WDOG_PRETIMEOUT_NMI) {
978 if (preop_val == WDOG_PREOP_GIVE_DATA) {
979 printk(KERN_WARNING PFX "Pretimeout op is to give data"
980 " but NMI pretimeout is enabled, setting"
981 " pretimeout op to none\n");
982 preop_val = WDOG_PREOP_NONE;
983 }
984#ifdef CONFIG_X86_LOCAL_APIC
985 if (nmi_watchdog == NMI_IO_APIC) {
986 printk(KERN_WARNING PFX "nmi_watchdog is set to IO APIC"
987 " mode (value is %d), that is incompatible"
988 " with using NMI in the IPMI watchdog."
989 " Disabling IPMI nmi pretimeout.\n",
990 nmi_watchdog);
991 preaction_val = WDOG_PRETIMEOUT_NONE;
992 } else {
993#endif
994 rv = request_nmi(&ipmi_nmi_handler);
995 if (rv) {
996 printk(KERN_WARNING PFX "Can't register nmi handler\n");
997 return rv;
998 }
999#ifdef CONFIG_X86_LOCAL_APIC
1000 }
1001#endif
1002 }
1003#endif
1004
1005 rv = ipmi_smi_watcher_register(&smi_watcher);
1006 if (rv) {
1007#ifdef HAVE_NMI_HANDLER
1008 if (preaction_val == WDOG_PRETIMEOUT_NMI)
1009 release_nmi(&ipmi_nmi_handler);
1010#endif
1011 printk(KERN_WARNING PFX "can't register smi watcher\n");
1012 return rv;
1013 }
1014
1015 register_reboot_notifier(&wdog_reboot_notifier);
1016 notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
1017
1018 return 0;
1019}
1020
1021static __exit void ipmi_unregister_watchdog(void)
1022{
1023 int rv;
1024
1025 down_write(&register_sem);
1026
1027#ifdef HAVE_NMI_HANDLER
1028 if (preaction_val == WDOG_PRETIMEOUT_NMI)
1029 release_nmi(&ipmi_nmi_handler);
1030#endif
1031
1032 notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier);
1033 unregister_reboot_notifier(&wdog_reboot_notifier);
1034
1035 if (! watchdog_user)
1036 goto out;
1037
1038 /* Make sure no one can call us any more. */
1039 misc_deregister(&ipmi_wdog_miscdev);
1040
1041 /* Wait to make sure the message makes it out. The lower layer has
1042 pointers to our buffers, we want to make sure they are done before
1043 we release our memory. */
1044 while (atomic_read(&set_timeout_tofree)) {
1045 set_current_state(TASK_UNINTERRUPTIBLE);
1046 schedule_timeout(1);
1047 }
1048
1049 /* Disconnect from IPMI. */
1050 rv = ipmi_destroy_user(watchdog_user);
1051 if (rv) {
1052 printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
1053 rv);
1054 }
1055 watchdog_user = NULL;
1056
1057 out:
1058 up_write(&register_sem);
1059}
1060
1061static void __exit ipmi_wdog_exit(void)
1062{
1063 ipmi_smi_watcher_unregister(&smi_watcher);
1064 ipmi_unregister_watchdog();
1065}
1066module_exit(ipmi_wdog_exit);
1067module_init(ipmi_wdog_init);
1068MODULE_LICENSE("GPL");