aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c153
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c153
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c466
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h89
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c149
5 files changed, 574 insertions, 436 deletions
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index e736119b6497..7b98c067190a 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -37,26 +37,32 @@
37#define BT_DEBUG_ENABLE 1 /* Generic messages */ 37#define BT_DEBUG_ENABLE 1 /* Generic messages */
38#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ 38#define BT_DEBUG_MSG 2 /* Prints all request/response buffers */
39#define BT_DEBUG_STATES 4 /* Verbose look at state changes */ 39#define BT_DEBUG_STATES 4 /* Verbose look at state changes */
40/* BT_DEBUG_OFF must be zero to correspond to the default uninitialized 40/*
41 value */ 41 * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
42 * value
43 */
42 44
43static int bt_debug; /* 0 == BT_DEBUG_OFF */ 45static int bt_debug; /* 0 == BT_DEBUG_OFF */
44 46
45module_param(bt_debug, int, 0644); 47module_param(bt_debug, int, 0644);
46MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); 48MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
47 49
48/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, 50/*
49 and 64 byte buffers. However, one HP implementation wants 255 bytes of 51 * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
50 buffer (with a documented message of 160 bytes) so go for the max. 52 * and 64 byte buffers. However, one HP implementation wants 255 bytes of
51 Since the Open IPMI architecture is single-message oriented at this 53 * buffer (with a documented message of 160 bytes) so go for the max.
52 stage, the queue depth of BT is of no concern. */ 54 * Since the Open IPMI architecture is single-message oriented at this
55 * stage, the queue depth of BT is of no concern.
56 */
53 57
54#define BT_NORMAL_TIMEOUT 5 /* seconds */ 58#define BT_NORMAL_TIMEOUT 5 /* seconds */
55#define BT_NORMAL_RETRY_LIMIT 2 59#define BT_NORMAL_RETRY_LIMIT 2
56#define BT_RESET_DELAY 6 /* seconds after warm reset */ 60#define BT_RESET_DELAY 6 /* seconds after warm reset */
57 61
58/* States are written in chronological order and usually cover 62/*
59 multiple rows of the state table discussion in the IPMI spec. */ 63 * States are written in chronological order and usually cover
64 * multiple rows of the state table discussion in the IPMI spec.
65 */
60 66
61enum bt_states { 67enum bt_states {
62 BT_STATE_IDLE = 0, /* Order is critical in this list */ 68 BT_STATE_IDLE = 0, /* Order is critical in this list */
@@ -76,10 +82,12 @@ enum bt_states {
76 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ 82 BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
77}; 83};
78 84
79/* Macros seen at the end of state "case" blocks. They help with legibility 85/*
80 and debugging. */ 86 * Macros seen at the end of state "case" blocks. They help with legibility
87 * and debugging.
88 */
81 89
82#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; } 90#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
83 91
84#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } 92#define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; }
85 93
@@ -110,11 +118,13 @@ struct si_sm_data {
110#define BT_H_BUSY 0x40 118#define BT_H_BUSY 0x40
111#define BT_B_BUSY 0x80 119#define BT_B_BUSY 0x80
112 120
113/* Some bits are toggled on each write: write once to set it, once 121/*
114 more to clear it; writing a zero does nothing. To absolutely 122 * Some bits are toggled on each write: write once to set it, once
115 clear it, check its state and write if set. This avoids the "get 123 * more to clear it; writing a zero does nothing. To absolutely
116 current then use as mask" scheme to modify one bit. Note that the 124 * clear it, check its state and write if set. This avoids the "get
117 variable "bt" is hardcoded into these macros. */ 125 * current then use as mask" scheme to modify one bit. Note that the
126 * variable "bt" is hardcoded into these macros.
127 */
118 128
119#define BT_STATUS bt->io->inputb(bt->io, 0) 129#define BT_STATUS bt->io->inputb(bt->io, 0)
120#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) 130#define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x)
@@ -125,8 +135,10 @@ struct si_sm_data {
125#define BT_INTMASK_R bt->io->inputb(bt->io, 2) 135#define BT_INTMASK_R bt->io->inputb(bt->io, 2)
126#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) 136#define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x)
127 137
128/* Convenience routines for debugging. These are not multi-open safe! 138/*
129 Note the macros have hardcoded variables in them. */ 139 * Convenience routines for debugging. These are not multi-open safe!
140 * Note the macros have hardcoded variables in them.
141 */
130 142
131static char *state2txt(unsigned char state) 143static char *state2txt(unsigned char state)
132{ 144{
@@ -182,7 +194,8 @@ static char *status2txt(unsigned char status)
182static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) 194static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
183{ 195{
184 memset(bt, 0, sizeof(struct si_sm_data)); 196 memset(bt, 0, sizeof(struct si_sm_data));
185 if (bt->io != io) { /* external: one-time only things */ 197 if (bt->io != io) {
198 /* external: one-time only things */
186 bt->io = io; 199 bt->io = io;
187 bt->seq = 0; 200 bt->seq = 0;
188 } 201 }
@@ -229,7 +242,7 @@ static int bt_start_transaction(struct si_sm_data *bt,
229 printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); 242 printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
230 printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); 243 printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
231 for (i = 0; i < size; i ++) 244 for (i = 0; i < size; i ++)
232 printk (" %02x", data[i]); 245 printk(" %02x", data[i]);
233 printk("\n"); 246 printk("\n");
234 } 247 }
235 bt->write_data[0] = size + 1; /* all data plus seq byte */ 248 bt->write_data[0] = size + 1; /* all data plus seq byte */
@@ -246,8 +259,10 @@ static int bt_start_transaction(struct si_sm_data *bt,
246 return 0; 259 return 0;
247} 260}
248 261
249/* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE 262/*
250 it calls this. Strip out the length and seq bytes. */ 263 * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
264 * it calls this. Strip out the length and seq bytes.
265 */
251 266
252static int bt_get_result(struct si_sm_data *bt, 267static int bt_get_result(struct si_sm_data *bt,
253 unsigned char *data, 268 unsigned char *data,
@@ -269,10 +284,10 @@ static int bt_get_result(struct si_sm_data *bt,
269 memcpy(data + 2, bt->read_data + 4, msg_len - 2); 284 memcpy(data + 2, bt->read_data + 4, msg_len - 2);
270 285
271 if (bt_debug & BT_DEBUG_MSG) { 286 if (bt_debug & BT_DEBUG_MSG) {
272 printk (KERN_WARNING "BT: result %d bytes:", msg_len); 287 printk(KERN_WARNING "BT: result %d bytes:", msg_len);
273 for (i = 0; i < msg_len; i++) 288 for (i = 0; i < msg_len; i++)
274 printk(" %02x", data[i]); 289 printk(" %02x", data[i]);
275 printk ("\n"); 290 printk("\n");
276 } 291 }
277 return msg_len; 292 return msg_len;
278} 293}
@@ -292,8 +307,10 @@ static void reset_flags(struct si_sm_data *bt)
292 BT_INTMASK_W(BT_BMC_HWRST); 307 BT_INTMASK_W(BT_BMC_HWRST);
293} 308}
294 309
295/* Get rid of an unwanted/stale response. This should only be needed for 310/*
296 BMCs that support multiple outstanding requests. */ 311 * Get rid of an unwanted/stale response. This should only be needed for
312 * BMCs that support multiple outstanding requests.
313 */
297 314
298static void drain_BMC2HOST(struct si_sm_data *bt) 315static void drain_BMC2HOST(struct si_sm_data *bt)
299{ 316{
@@ -326,8 +343,8 @@ static inline void write_all_bytes(struct si_sm_data *bt)
326 printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", 343 printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
327 bt->write_count, bt->seq); 344 bt->write_count, bt->seq);
328 for (i = 0; i < bt->write_count; i++) 345 for (i = 0; i < bt->write_count; i++)
329 printk (" %02x", bt->write_data[i]); 346 printk(" %02x", bt->write_data[i]);
330 printk ("\n"); 347 printk("\n");
331 } 348 }
332 for (i = 0; i < bt->write_count; i++) 349 for (i = 0; i < bt->write_count; i++)
333 HOST2BMC(bt->write_data[i]); 350 HOST2BMC(bt->write_data[i]);
@@ -337,8 +354,10 @@ static inline int read_all_bytes(struct si_sm_data *bt)
337{ 354{
338 unsigned char i; 355 unsigned char i;
339 356
340 /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. 357 /*
341 Keep layout of first four bytes aligned with write_data[] */ 358 * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
359 * Keep layout of first four bytes aligned with write_data[]
360 */
342 361
343 bt->read_data[0] = BMC2HOST; 362 bt->read_data[0] = BMC2HOST;
344 bt->read_count = bt->read_data[0]; 363 bt->read_count = bt->read_data[0];
@@ -362,8 +381,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
362 if (max > 16) 381 if (max > 16)
363 max = 16; 382 max = 16;
364 for (i = 0; i < max; i++) 383 for (i = 0; i < max; i++)
365 printk (" %02x", bt->read_data[i]); 384 printk(KERN_CONT " %02x", bt->read_data[i]);
366 printk ("%s\n", bt->read_count == max ? "" : " ..."); 385 printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
367 } 386 }
368 387
369 /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ 388 /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
@@ -402,8 +421,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
402 printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ 421 printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */
403 reason, STATE2TXT, STATUS2TXT); 422 reason, STATE2TXT, STATUS2TXT);
404 423
405 /* Per the IPMI spec, retries are based on the sequence number 424 /*
406 known only to this module, so manage a restart here. */ 425 * Per the IPMI spec, retries are based on the sequence number
426 * known only to this module, so manage a restart here.
427 */
407 (bt->error_retries)++; 428 (bt->error_retries)++;
408 if (bt->error_retries < bt->BT_CAP_retries) { 429 if (bt->error_retries < bt->BT_CAP_retries) {
409 printk("%d retries left\n", 430 printk("%d retries left\n",
@@ -412,8 +433,8 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
412 return SI_SM_CALL_WITHOUT_DELAY; 433 return SI_SM_CALL_WITHOUT_DELAY;
413 } 434 }
414 435
415 printk("failed %d retries, sending error response\n", 436 printk(KERN_WARNING "failed %d retries, sending error response\n",
416 bt->BT_CAP_retries); 437 bt->BT_CAP_retries);
417 if (!bt->nonzero_status) 438 if (!bt->nonzero_status)
418 printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); 439 printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
419 440
@@ -424,8 +445,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
424 return SI_SM_CALL_WITHOUT_DELAY; 445 return SI_SM_CALL_WITHOUT_DELAY;
425 } 446 }
426 447
427 /* Concoct a useful error message, set up the next state, and 448 /*
428 be done with this sequence. */ 449 * Concoct a useful error message, set up the next state, and
450 * be done with this sequence.
451 */
429 452
430 bt->state = BT_STATE_IDLE; 453 bt->state = BT_STATE_IDLE;
431 switch (cCode) { 454 switch (cCode) {
@@ -461,10 +484,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
461 last_printed = bt->state; 484 last_printed = bt->state;
462 } 485 }
463 486
464 /* Commands that time out may still (eventually) provide a response. 487 /*
465 This stale response will get in the way of a new response so remove 488 * Commands that time out may still (eventually) provide a response.
466 it if possible (hopefully during IDLE). Even if it comes up later 489 * This stale response will get in the way of a new response so remove
467 it will be rejected by its (now-forgotten) seq number. */ 490 * it if possible (hopefully during IDLE). Even if it comes up later
491 * it will be rejected by its (now-forgotten) seq number.
492 */
468 493
469 if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { 494 if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
470 drain_BMC2HOST(bt); 495 drain_BMC2HOST(bt);
@@ -472,7 +497,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
472 } 497 }
473 498
474 if ((bt->state != BT_STATE_IDLE) && 499 if ((bt->state != BT_STATE_IDLE) &&
475 (bt->state < BT_STATE_PRINTME)) { /* check timeout */ 500 (bt->state < BT_STATE_PRINTME)) {
501 /* check timeout */
476 bt->timeout -= time; 502 bt->timeout -= time;
477 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) 503 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
478 return error_recovery(bt, 504 return error_recovery(bt,
@@ -482,8 +508,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
482 508
483 switch (bt->state) { 509 switch (bt->state) {
484 510
485 /* Idle state first checks for asynchronous messages from another 511 /*
486 channel, then does some opportunistic housekeeping. */ 512 * Idle state first checks for asynchronous messages from another
513 * channel, then does some opportunistic housekeeping.
514 */
487 515
488 case BT_STATE_IDLE: 516 case BT_STATE_IDLE:
489 if (status & BT_SMS_ATN) { 517 if (status & BT_SMS_ATN) {
@@ -531,16 +559,19 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
531 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); 559 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
532 BT_CONTROL(BT_H_BUSY); /* set */ 560 BT_CONTROL(BT_H_BUSY); /* set */
533 561
534 /* Uncached, ordered writes should just proceeed serially but 562 /*
535 some BMCs don't clear B2H_ATN with one hit. Fast-path a 563 * Uncached, ordered writes should just proceeed serially but
536 workaround without too much penalty to the general case. */ 564 * some BMCs don't clear B2H_ATN with one hit. Fast-path a
565 * workaround without too much penalty to the general case.
566 */
537 567
538 BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ 568 BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */
539 BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, 569 BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
540 SI_SM_CALL_WITHOUT_DELAY); 570 SI_SM_CALL_WITHOUT_DELAY);
541 571
542 case BT_STATE_CLEAR_B2H: 572 case BT_STATE_CLEAR_B2H:
543 if (status & BT_B2H_ATN) { /* keep hitting it */ 573 if (status & BT_B2H_ATN) {
574 /* keep hitting it */
544 BT_CONTROL(BT_B2H_ATN); 575 BT_CONTROL(BT_B2H_ATN);
545 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); 576 BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
546 } 577 }
@@ -548,7 +579,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
548 SI_SM_CALL_WITHOUT_DELAY); 579 SI_SM_CALL_WITHOUT_DELAY);
549 580
550 case BT_STATE_READ_BYTES: 581 case BT_STATE_READ_BYTES:
551 if (!(status & BT_H_BUSY)) /* check in case of retry */ 582 if (!(status & BT_H_BUSY))
583 /* check in case of retry */
552 BT_CONTROL(BT_H_BUSY); 584 BT_CONTROL(BT_H_BUSY);
553 BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ 585 BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */
554 i = read_all_bytes(bt); /* true == packet seq match */ 586 i = read_all_bytes(bt); /* true == packet seq match */
@@ -599,8 +631,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
599 BT_STATE_CHANGE(BT_STATE_XACTION_START, 631 BT_STATE_CHANGE(BT_STATE_XACTION_START,
600 SI_SM_CALL_WITH_DELAY); 632 SI_SM_CALL_WITH_DELAY);
601 633
602 /* Get BT Capabilities, using timing of upper level state machine. 634 /*
603 Set outreqs to prevent infinite loop on timeout. */ 635 * Get BT Capabilities, using timing of upper level state machine.
636 * Set outreqs to prevent infinite loop on timeout.
637 */
604 case BT_STATE_CAPABILITIES_BEGIN: 638 case BT_STATE_CAPABILITIES_BEGIN:
605 bt->BT_CAP_outreqs = 1; 639 bt->BT_CAP_outreqs = 1;
606 { 640 {
@@ -638,10 +672,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
638 672
639static int bt_detect(struct si_sm_data *bt) 673static int bt_detect(struct si_sm_data *bt)
640{ 674{
641 /* It's impossible for the BT status and interrupt registers to be 675 /*
642 all 1's, (assuming a properly functioning, self-initialized BMC) 676 * It's impossible for the BT status and interrupt registers to be
643 but that's what you get from reading a bogus address, so we 677 * all 1's, (assuming a properly functioning, self-initialized BMC)
644 test that first. The calling routine uses negative logic. */ 678 * but that's what you get from reading a bogus address, so we
679 * test that first. The calling routine uses negative logic.
680 */
645 681
646 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) 682 if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
647 return 1; 683 return 1;
@@ -658,8 +694,7 @@ static int bt_size(void)
658 return sizeof(struct si_sm_data); 694 return sizeof(struct si_sm_data);
659} 695}
660 696
661struct si_sm_handlers bt_smi_handlers = 697struct si_sm_handlers bt_smi_handlers = {
662{
663 .init_data = bt_init_data, 698 .init_data = bt_init_data,
664 .start_transaction = bt_start_transaction, 699 .start_transaction = bt_start_transaction,
665 .get_result = bt_get_result, 700 .get_result = bt_get_result,
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index c1b8228cb7b6..80704875794c 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -60,37 +60,58 @@ MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
60 60
61/* The states the KCS driver may be in. */ 61/* The states the KCS driver may be in. */
62enum kcs_states { 62enum kcs_states {
63 KCS_IDLE, /* The KCS interface is currently 63 /* The KCS interface is currently doing nothing. */
64 doing nothing. */ 64 KCS_IDLE,
65 KCS_START_OP, /* We are starting an operation. The 65
66 data is in the output buffer, but 66 /*
67 nothing has been done to the 67 * We are starting an operation. The data is in the output
68 interface yet. This was added to 68 * buffer, but nothing has been done to the interface yet. This
69 the state machine in the spec to 69 * was added to the state machine in the spec to wait for the
70 wait for the initial IBF. */ 70 * initial IBF.
71 KCS_WAIT_WRITE_START, /* We have written a write cmd to the 71 */
72 interface. */ 72 KCS_START_OP,
73 KCS_WAIT_WRITE, /* We are writing bytes to the 73
74 interface. */ 74 /* We have written a write cmd to the interface. */
75 KCS_WAIT_WRITE_END, /* We have written the write end cmd 75 KCS_WAIT_WRITE_START,
76 to the interface, and still need to 76
77 write the last byte. */ 77 /* We are writing bytes to the interface. */
78 KCS_WAIT_READ, /* We are waiting to read data from 78 KCS_WAIT_WRITE,
79 the interface. */ 79
80 KCS_ERROR0, /* State to transition to the error 80 /*
81 handler, this was added to the 81 * We have written the write end cmd to the interface, and
82 state machine in the spec to be 82 * still need to write the last byte.
83 sure IBF was there. */ 83 */
84 KCS_ERROR1, /* First stage error handler, wait for 84 KCS_WAIT_WRITE_END,
85 the interface to respond. */ 85
86 KCS_ERROR2, /* The abort cmd has been written, 86 /* We are waiting to read data from the interface. */
87 wait for the interface to 87 KCS_WAIT_READ,
88 respond. */ 88
89 KCS_ERROR3, /* We wrote some data to the 89 /*
90 interface, wait for it to switch to 90 * State to transition to the error handler, this was added to
91 read mode. */ 91 * the state machine in the spec to be sure IBF was there.
92 KCS_HOSED /* The hardware failed to follow the 92 */
93 state machine. */ 93 KCS_ERROR0,
94
95 /*
96 * First stage error handler, wait for the interface to
97 * respond.
98 */
99 KCS_ERROR1,
100
101 /*
102 * The abort cmd has been written, wait for the interface to
103 * respond.
104 */
105 KCS_ERROR2,
106
107 /*
108 * We wrote some data to the interface, wait for it to switch
109 * to read mode.
110 */
111 KCS_ERROR3,
112
113 /* The hardware failed to follow the state machine. */
114 KCS_HOSED
94}; 115};
95 116
96#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH 117#define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
@@ -102,8 +123,7 @@ enum kcs_states {
102#define MAX_ERROR_RETRIES 10 123#define MAX_ERROR_RETRIES 10
103#define ERROR0_OBF_WAIT_JIFFIES (2*HZ) 124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
104 125
105struct si_sm_data 126struct si_sm_data {
106{
107 enum kcs_states state; 127 enum kcs_states state;
108 struct si_sm_io *io; 128 struct si_sm_io *io;
109 unsigned char write_data[MAX_KCS_WRITE_SIZE]; 129 unsigned char write_data[MAX_KCS_WRITE_SIZE];
@@ -187,7 +207,8 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
187 (kcs->error_retries)++; 207 (kcs->error_retries)++;
188 if (kcs->error_retries > MAX_ERROR_RETRIES) { 208 if (kcs->error_retries > MAX_ERROR_RETRIES) {
189 if (kcs_debug & KCS_DEBUG_ENABLE) 209 if (kcs_debug & KCS_DEBUG_ENABLE)
190 printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason); 210 printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
211 reason);
191 kcs->state = KCS_HOSED; 212 kcs->state = KCS_HOSED;
192 } else { 213 } else {
193 kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; 214 kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
@@ -271,10 +292,9 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
271 292
272 if (kcs_debug & KCS_DEBUG_MSG) { 293 if (kcs_debug & KCS_DEBUG_MSG) {
273 printk(KERN_DEBUG "start_kcs_transaction -"); 294 printk(KERN_DEBUG "start_kcs_transaction -");
274 for (i = 0; i < size; i ++) { 295 for (i = 0; i < size; i++)
275 printk(" %02x", (unsigned char) (data [i])); 296 printk(" %02x", (unsigned char) (data [i]));
276 } 297 printk("\n");
277 printk ("\n");
278 } 298 }
279 kcs->error_retries = 0; 299 kcs->error_retries = 0;
280 memcpy(kcs->write_data, data, size); 300 memcpy(kcs->write_data, data, size);
@@ -305,9 +325,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
305 kcs->read_pos = 3; 325 kcs->read_pos = 3;
306 } 326 }
307 if (kcs->truncated) { 327 if (kcs->truncated) {
308 /* Report a truncated error. We might overwrite 328 /*
309 another error, but that's too bad, the user needs 329 * Report a truncated error. We might overwrite
310 to know it was truncated. */ 330 * another error, but that's too bad, the user needs
331 * to know it was truncated.
332 */
311 data[2] = IPMI_ERR_MSG_TRUNCATED; 333 data[2] = IPMI_ERR_MSG_TRUNCATED;
312 kcs->truncated = 0; 334 kcs->truncated = 0;
313 } 335 }
@@ -315,9 +337,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
315 return kcs->read_pos; 337 return kcs->read_pos;
316} 338}
317 339
318/* This implements the state machine defined in the IPMI manual, see 340/*
319 that for details on how this works. Divide that flowchart into 341 * This implements the state machine defined in the IPMI manual, see
320 sections delimited by "Wait for IBF" and this will become clear. */ 342 * that for details on how this works. Divide that flowchart into
343 * sections delimited by "Wait for IBF" and this will become clear.
344 */
321static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) 345static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
322{ 346{
323 unsigned char status; 347 unsigned char status;
@@ -388,11 +412,12 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
388 write_next_byte(kcs); 412 write_next_byte(kcs);
389 } 413 }
390 break; 414 break;
391 415
392 case KCS_WAIT_WRITE_END: 416 case KCS_WAIT_WRITE_END:
393 if (state != KCS_WRITE_STATE) { 417 if (state != KCS_WRITE_STATE) {
394 start_error_recovery(kcs, 418 start_error_recovery(kcs,
395 "Not in write state for write end"); 419 "Not in write state"
420 " for write end");
396 break; 421 break;
397 } 422 }
398 clear_obf(kcs, status); 423 clear_obf(kcs, status);
@@ -413,13 +438,15 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
413 return SI_SM_CALL_WITH_DELAY; 438 return SI_SM_CALL_WITH_DELAY;
414 read_next_byte(kcs); 439 read_next_byte(kcs);
415 } else { 440 } else {
416 /* We don't implement this exactly like the state 441 /*
417 machine in the spec. Some broken hardware 442 * We don't implement this exactly like the state
418 does not write the final dummy byte to the 443 * machine in the spec. Some broken hardware
419 read register. Thus obf will never go high 444 * does not write the final dummy byte to the
420 here. We just go straight to idle, and we 445 * read register. Thus obf will never go high
421 handle clearing out obf in idle state if it 446 * here. We just go straight to idle, and we
422 happens to come in. */ 447 * handle clearing out obf in idle state if it
448 * happens to come in.
449 */
423 clear_obf(kcs, status); 450 clear_obf(kcs, status);
424 kcs->orig_write_count = 0; 451 kcs->orig_write_count = 0;
425 kcs->state = KCS_IDLE; 452 kcs->state = KCS_IDLE;
@@ -430,7 +457,8 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
430 case KCS_ERROR0: 457 case KCS_ERROR0:
431 clear_obf(kcs, status); 458 clear_obf(kcs, status);
432 status = read_status(kcs); 459 status = read_status(kcs);
433 if (GET_STATUS_OBF(status)) /* controller isn't responding */ 460 if (GET_STATUS_OBF(status))
461 /* controller isn't responding */
434 if (time_before(jiffies, kcs->error0_timeout)) 462 if (time_before(jiffies, kcs->error0_timeout))
435 return SI_SM_CALL_WITH_TICK_DELAY; 463 return SI_SM_CALL_WITH_TICK_DELAY;
436 write_cmd(kcs, KCS_GET_STATUS_ABORT); 464 write_cmd(kcs, KCS_GET_STATUS_ABORT);
@@ -442,7 +470,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
442 write_data(kcs, 0); 470 write_data(kcs, 0);
443 kcs->state = KCS_ERROR2; 471 kcs->state = KCS_ERROR2;
444 break; 472 break;
445 473
446 case KCS_ERROR2: 474 case KCS_ERROR2:
447 if (state != KCS_READ_STATE) { 475 if (state != KCS_READ_STATE) {
448 start_error_recovery(kcs, 476 start_error_recovery(kcs,
@@ -456,7 +484,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
456 write_data(kcs, KCS_READ_BYTE); 484 write_data(kcs, KCS_READ_BYTE);
457 kcs->state = KCS_ERROR3; 485 kcs->state = KCS_ERROR3;
458 break; 486 break;
459 487
460 case KCS_ERROR3: 488 case KCS_ERROR3:
461 if (state != KCS_IDLE_STATE) { 489 if (state != KCS_IDLE_STATE) {
462 start_error_recovery(kcs, 490 start_error_recovery(kcs,
@@ -475,7 +503,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
475 return SI_SM_TRANSACTION_COMPLETE; 503 return SI_SM_TRANSACTION_COMPLETE;
476 } 504 }
477 break; 505 break;
478 506
479 case KCS_HOSED: 507 case KCS_HOSED:
480 break; 508 break;
481 } 509 }
@@ -495,10 +523,12 @@ static int kcs_size(void)
495 523
496static int kcs_detect(struct si_sm_data *kcs) 524static int kcs_detect(struct si_sm_data *kcs)
497{ 525{
498 /* It's impossible for the KCS status register to be all 1's, 526 /*
499 (assuming a properly functioning, self-initialized BMC) 527 * It's impossible for the KCS status register to be all 1's,
500 but that's what you get from reading a bogus address, so we 528 * (assuming a properly functioning, self-initialized BMC)
501 test that first. */ 529 * but that's what you get from reading a bogus address, so we
530 * test that first.
531 */
502 if (read_status(kcs) == 0xff) 532 if (read_status(kcs) == 0xff)
503 return 1; 533 return 1;
504 534
@@ -509,8 +539,7 @@ static void kcs_cleanup(struct si_sm_data *kcs)
509{ 539{
510} 540}
511 541
512struct si_sm_handlers kcs_smi_handlers = 542struct si_sm_handlers kcs_smi_handlers = {
513{
514 .init_data = init_kcs_data, 543 .init_data = init_kcs_data,
515 .start_transaction = start_kcs_transaction, 544 .start_transaction = start_kcs_transaction,
516 .get_result = get_kcs_result, 545 .get_result = get_kcs_result,
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index ba7e75b731c6..97b6225c070b 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -80,7 +80,7 @@
80#define SI_USEC_PER_JIFFY (1000000/HZ) 80#define SI_USEC_PER_JIFFY (1000000/HZ)
81#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) 81#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a 82#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
83 short timeout */ 83 short timeout */
84 84
85/* Bit for BMC global enables. */ 85/* Bit for BMC global enables. */
86#define IPMI_BMC_RCV_MSG_INTR 0x01 86#define IPMI_BMC_RCV_MSG_INTR 0x01
@@ -114,8 +114,7 @@ static char *si_to_str[] = { "kcs", "smic", "bt" };
114 114
115#define DEVICE_NAME "ipmi_si" 115#define DEVICE_NAME "ipmi_si"
116 116
117static struct device_driver ipmi_driver = 117static struct device_driver ipmi_driver = {
118{
119 .name = DEVICE_NAME, 118 .name = DEVICE_NAME,
120 .bus = &platform_bus_type 119 .bus = &platform_bus_type
121}; 120};
@@ -169,8 +168,7 @@ enum si_stat_indexes {
169 SI_NUM_STATS 168 SI_NUM_STATS
170}; 169};
171 170
172struct smi_info 171struct smi_info {
173{
174 int intf_num; 172 int intf_num;
175 ipmi_smi_t intf; 173 ipmi_smi_t intf;
176 struct si_sm_data *si_sm; 174 struct si_sm_data *si_sm;
@@ -183,8 +181,10 @@ struct smi_info
183 struct ipmi_smi_msg *curr_msg; 181 struct ipmi_smi_msg *curr_msg;
184 enum si_intf_state si_state; 182 enum si_intf_state si_state;
185 183
186 /* Used to handle the various types of I/O that can occur with 184 /*
187 IPMI */ 185 * Used to handle the various types of I/O that can occur with
186 * IPMI
187 */
188 struct si_sm_io io; 188 struct si_sm_io io;
189 int (*io_setup)(struct smi_info *info); 189 int (*io_setup)(struct smi_info *info);
190 void (*io_cleanup)(struct smi_info *info); 190 void (*io_cleanup)(struct smi_info *info);
@@ -195,15 +195,18 @@ struct smi_info
195 void (*addr_source_cleanup)(struct smi_info *info); 195 void (*addr_source_cleanup)(struct smi_info *info);
196 void *addr_source_data; 196 void *addr_source_data;
197 197
198 /* Per-OEM handler, called from handle_flags(). 198 /*
199 Returns 1 when handle_flags() needs to be re-run 199 * Per-OEM handler, called from handle_flags(). Returns 1
200 or 0 indicating it set si_state itself. 200 * when handle_flags() needs to be re-run or 0 indicating it
201 */ 201 * set si_state itself.
202 */
202 int (*oem_data_avail_handler)(struct smi_info *smi_info); 203 int (*oem_data_avail_handler)(struct smi_info *smi_info);
203 204
204 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN 205 /*
205 is set to hold the flags until we are done handling everything 206 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
206 from the flags. */ 207 * is set to hold the flags until we are done handling everything
208 * from the flags.
209 */
207#define RECEIVE_MSG_AVAIL 0x01 210#define RECEIVE_MSG_AVAIL 0x01
208#define EVENT_MSG_BUFFER_FULL 0x02 211#define EVENT_MSG_BUFFER_FULL 0x02
209#define WDT_PRE_TIMEOUT_INT 0x08 212#define WDT_PRE_TIMEOUT_INT 0x08
@@ -211,25 +214,31 @@ struct smi_info
211#define OEM1_DATA_AVAIL 0x40 214#define OEM1_DATA_AVAIL 0x40
212#define OEM2_DATA_AVAIL 0x80 215#define OEM2_DATA_AVAIL 0x80
213#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ 216#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
214 OEM1_DATA_AVAIL | \ 217 OEM1_DATA_AVAIL | \
215 OEM2_DATA_AVAIL) 218 OEM2_DATA_AVAIL)
216 unsigned char msg_flags; 219 unsigned char msg_flags;
217 220
218 /* If set to true, this will request events the next time the 221 /*
219 state machine is idle. */ 222 * If set to true, this will request events the next time the
223 * state machine is idle.
224 */
220 atomic_t req_events; 225 atomic_t req_events;
221 226
222 /* If true, run the state machine to completion on every send 227 /*
223 call. Generally used after a panic to make sure stuff goes 228 * If true, run the state machine to completion on every send
224 out. */ 229 * call. Generally used after a panic to make sure stuff goes
230 * out.
231 */
225 int run_to_completion; 232 int run_to_completion;
226 233
227 /* The I/O port of an SI interface. */ 234 /* The I/O port of an SI interface. */
228 int port; 235 int port;
229 236
230 /* The space between start addresses of the two ports. For 237 /*
231 instance, if the first port is 0xca2 and the spacing is 4, then 238 * The space between start addresses of the two ports. For
232 the second port is 0xca6. */ 239 * instance, if the first port is 0xca2 and the spacing is 4, then
240 * the second port is 0xca6.
241 */
233 unsigned int spacing; 242 unsigned int spacing;
234 243
235 /* zero if no irq; */ 244 /* zero if no irq; */
@@ -244,10 +253,12 @@ struct smi_info
244 /* Used to gracefully stop the timer without race conditions. */ 253 /* Used to gracefully stop the timer without race conditions. */
245 atomic_t stop_operation; 254 atomic_t stop_operation;
246 255
247 /* The driver will disable interrupts when it gets into a 256 /*
248 situation where it cannot handle messages due to lack of 257 * The driver will disable interrupts when it gets into a
249 memory. Once that situation clears up, it will re-enable 258 * situation where it cannot handle messages due to lack of
250 interrupts. */ 259 * memory. Once that situation clears up, it will re-enable
260 * interrupts.
261 */
251 int interrupt_disabled; 262 int interrupt_disabled;
252 263
253 /* From the get device id response... */ 264 /* From the get device id response... */
@@ -257,8 +268,10 @@ struct smi_info
257 struct device *dev; 268 struct device *dev;
258 struct platform_device *pdev; 269 struct platform_device *pdev;
259 270
260 /* True if we allocated the device, false if it came from 271 /*
261 * someplace else (like PCI). */ 272 * True if we allocated the device, false if it came from
273 * someplace else (like PCI).
274 */
262 int dev_registered; 275 int dev_registered;
263 276
264 /* Slave address, could be reported from DMI. */ 277 /* Slave address, could be reported from DMI. */
@@ -267,7 +280,7 @@ struct smi_info
267 /* Counters and things for the proc filesystem. */ 280 /* Counters and things for the proc filesystem. */
268 atomic_t stats[SI_NUM_STATS]; 281 atomic_t stats[SI_NUM_STATS];
269 282
270 struct task_struct *thread; 283 struct task_struct *thread;
271 284
272 struct list_head link; 285 struct list_head link;
273}; 286};
@@ -288,7 +301,7 @@ static int try_smi_init(struct smi_info *smi);
288static void cleanup_one_si(struct smi_info *to_clean); 301static void cleanup_one_si(struct smi_info *to_clean);
289 302
290static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); 303static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
291static int register_xaction_notifier(struct notifier_block * nb) 304static int register_xaction_notifier(struct notifier_block *nb)
292{ 305{
293 return atomic_notifier_chain_register(&xaction_notifier_list, nb); 306 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
294} 307}
@@ -297,7 +310,7 @@ static void deliver_recv_msg(struct smi_info *smi_info,
297 struct ipmi_smi_msg *msg) 310 struct ipmi_smi_msg *msg)
298{ 311{
299 /* Deliver the message to the upper layer with the lock 312 /* Deliver the message to the upper layer with the lock
300 released. */ 313 released. */
301 spin_unlock(&(smi_info->si_lock)); 314 spin_unlock(&(smi_info->si_lock));
302 ipmi_smi_msg_received(smi_info->intf, msg); 315 ipmi_smi_msg_received(smi_info->intf, msg);
303 spin_lock(&(smi_info->si_lock)); 316 spin_lock(&(smi_info->si_lock));
@@ -329,8 +342,10 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
329 struct timeval t; 342 struct timeval t;
330#endif 343#endif
331 344
332 /* No need to save flags, we aleady have interrupts off and we 345 /*
333 already hold the SMI lock. */ 346 * No need to save flags, we aleady have interrupts off and we
347 * already hold the SMI lock.
348 */
334 if (!smi_info->run_to_completion) 349 if (!smi_info->run_to_completion)
335 spin_lock(&(smi_info->msg_lock)); 350 spin_lock(&(smi_info->msg_lock));
336 351
@@ -353,7 +368,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
353 link); 368 link);
354#ifdef DEBUG_TIMING 369#ifdef DEBUG_TIMING
355 do_gettimeofday(&t); 370 do_gettimeofday(&t);
356 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 371 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
357#endif 372#endif
358 err = atomic_notifier_call_chain(&xaction_notifier_list, 373 err = atomic_notifier_call_chain(&xaction_notifier_list,
359 0, smi_info); 374 0, smi_info);
@@ -365,13 +380,12 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
365 smi_info->si_sm, 380 smi_info->si_sm,
366 smi_info->curr_msg->data, 381 smi_info->curr_msg->data,
367 smi_info->curr_msg->data_size); 382 smi_info->curr_msg->data_size);
368 if (err) { 383 if (err)
369 return_hosed_msg(smi_info, err); 384 return_hosed_msg(smi_info, err);
370 }
371 385
372 rv = SI_SM_CALL_WITHOUT_DELAY; 386 rv = SI_SM_CALL_WITHOUT_DELAY;
373 } 387 }
374 out: 388 out:
375 if (!smi_info->run_to_completion) 389 if (!smi_info->run_to_completion)
376 spin_unlock(&(smi_info->msg_lock)); 390 spin_unlock(&(smi_info->msg_lock));
377 391
@@ -382,8 +396,10 @@ static void start_enable_irq(struct smi_info *smi_info)
382{ 396{
383 unsigned char msg[2]; 397 unsigned char msg[2];
384 398
385 /* If we are enabling interrupts, we have to tell the 399 /*
386 BMC to use them. */ 400 * If we are enabling interrupts, we have to tell the
401 * BMC to use them.
402 */
387 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 403 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
388 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 404 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
389 405
@@ -415,10 +431,12 @@ static void start_clear_flags(struct smi_info *smi_info)
415 smi_info->si_state = SI_CLEARING_FLAGS; 431 smi_info->si_state = SI_CLEARING_FLAGS;
416} 432}
417 433
418/* When we have a situtaion where we run out of memory and cannot 434/*
419 allocate messages, we just leave them in the BMC and run the system 435 * When we have a situtaion where we run out of memory and cannot
420 polled until we can allocate some memory. Once we have some 436 * allocate messages, we just leave them in the BMC and run the system
421 memory, we will re-enable the interrupt. */ 437 * polled until we can allocate some memory. Once we have some
438 * memory, we will re-enable the interrupt.
439 */
422static inline void disable_si_irq(struct smi_info *smi_info) 440static inline void disable_si_irq(struct smi_info *smi_info)
423{ 441{
424 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 442 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
@@ -486,12 +504,11 @@ static void handle_flags(struct smi_info *smi_info)
486 smi_info->curr_msg->data_size); 504 smi_info->curr_msg->data_size);
487 smi_info->si_state = SI_GETTING_EVENTS; 505 smi_info->si_state = SI_GETTING_EVENTS;
488 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && 506 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
489 smi_info->oem_data_avail_handler) { 507 smi_info->oem_data_avail_handler) {
490 if (smi_info->oem_data_avail_handler(smi_info)) 508 if (smi_info->oem_data_avail_handler(smi_info))
491 goto retry; 509 goto retry;
492 } else { 510 } else
493 smi_info->si_state = SI_NORMAL; 511 smi_info->si_state = SI_NORMAL;
494 }
495} 512}
496 513
497static void handle_transaction_done(struct smi_info *smi_info) 514static void handle_transaction_done(struct smi_info *smi_info)
@@ -501,7 +518,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
501 struct timeval t; 518 struct timeval t;
502 519
503 do_gettimeofday(&t); 520 do_gettimeofday(&t);
504 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); 521 printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
505#endif 522#endif
506 switch (smi_info->si_state) { 523 switch (smi_info->si_state) {
507 case SI_NORMAL: 524 case SI_NORMAL:
@@ -514,9 +531,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
514 smi_info->curr_msg->rsp, 531 smi_info->curr_msg->rsp,
515 IPMI_MAX_MSG_LENGTH); 532 IPMI_MAX_MSG_LENGTH);
516 533
517 /* Do this here becase deliver_recv_msg() releases the 534 /*
518 lock, and a new message can be put in during the 535 * Do this here becase deliver_recv_msg() releases the
519 time the lock is released. */ 536 * lock, and a new message can be put in during the
537 * time the lock is released.
538 */
520 msg = smi_info->curr_msg; 539 msg = smi_info->curr_msg;
521 smi_info->curr_msg = NULL; 540 smi_info->curr_msg = NULL;
522 deliver_recv_msg(smi_info, msg); 541 deliver_recv_msg(smi_info, msg);
@@ -530,12 +549,13 @@ static void handle_transaction_done(struct smi_info *smi_info)
530 /* We got the flags from the SMI, now handle them. */ 549 /* We got the flags from the SMI, now handle them. */
531 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 550 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
532 if (msg[2] != 0) { 551 if (msg[2] != 0) {
533 /* Error fetching flags, just give up for 552 /* Error fetching flags, just give up for now. */
534 now. */
535 smi_info->si_state = SI_NORMAL; 553 smi_info->si_state = SI_NORMAL;
536 } else if (len < 4) { 554 } else if (len < 4) {
537 /* Hmm, no flags. That's technically illegal, but 555 /*
538 don't use uninitialized data. */ 556 * Hmm, no flags. That's technically illegal, but
557 * don't use uninitialized data.
558 */
539 smi_info->si_state = SI_NORMAL; 559 smi_info->si_state = SI_NORMAL;
540 } else { 560 } else {
541 smi_info->msg_flags = msg[3]; 561 smi_info->msg_flags = msg[3];
@@ -572,9 +592,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
572 smi_info->curr_msg->rsp, 592 smi_info->curr_msg->rsp,
573 IPMI_MAX_MSG_LENGTH); 593 IPMI_MAX_MSG_LENGTH);
574 594
575 /* Do this here becase deliver_recv_msg() releases the 595 /*
576 lock, and a new message can be put in during the 596 * Do this here becase deliver_recv_msg() releases the
577 time the lock is released. */ 597 * lock, and a new message can be put in during the
598 * time the lock is released.
599 */
578 msg = smi_info->curr_msg; 600 msg = smi_info->curr_msg;
579 smi_info->curr_msg = NULL; 601 smi_info->curr_msg = NULL;
580 if (msg->rsp[2] != 0) { 602 if (msg->rsp[2] != 0) {
@@ -587,10 +609,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
587 } else { 609 } else {
588 smi_inc_stat(smi_info, events); 610 smi_inc_stat(smi_info, events);
589 611
590 /* Do this before we deliver the message 612 /*
591 because delivering the message releases the 613 * Do this before we deliver the message
592 lock and something else can mess with the 614 * because delivering the message releases the
593 state. */ 615 * lock and something else can mess with the
616 * state.
617 */
594 handle_flags(smi_info); 618 handle_flags(smi_info);
595 619
596 deliver_recv_msg(smi_info, msg); 620 deliver_recv_msg(smi_info, msg);
@@ -606,9 +630,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
606 smi_info->curr_msg->rsp, 630 smi_info->curr_msg->rsp,
607 IPMI_MAX_MSG_LENGTH); 631 IPMI_MAX_MSG_LENGTH);
608 632
609 /* Do this here becase deliver_recv_msg() releases the 633 /*
610 lock, and a new message can be put in during the 634 * Do this here becase deliver_recv_msg() releases the
611 time the lock is released. */ 635 * lock, and a new message can be put in during the
636 * time the lock is released.
637 */
612 msg = smi_info->curr_msg; 638 msg = smi_info->curr_msg;
613 smi_info->curr_msg = NULL; 639 smi_info->curr_msg = NULL;
614 if (msg->rsp[2] != 0) { 640 if (msg->rsp[2] != 0) {
@@ -621,10 +647,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
621 } else { 647 } else {
622 smi_inc_stat(smi_info, incoming_messages); 648 smi_inc_stat(smi_info, incoming_messages);
623 649
624 /* Do this before we deliver the message 650 /*
625 because delivering the message releases the 651 * Do this before we deliver the message
626 lock and something else can mess with the 652 * because delivering the message releases the
627 state. */ 653 * lock and something else can mess with the
654 * state.
655 */
628 handle_flags(smi_info); 656 handle_flags(smi_info);
629 657
630 deliver_recv_msg(smi_info, msg); 658 deliver_recv_msg(smi_info, msg);
@@ -712,46 +740,49 @@ static void handle_transaction_done(struct smi_info *smi_info)
712 } 740 }
713} 741}
714 742
715/* Called on timeouts and events. Timeouts should pass the elapsed 743/*
716 time, interrupts should pass in zero. Must be called with 744 * Called on timeouts and events. Timeouts should pass the elapsed
717 si_lock held and interrupts disabled. */ 745 * time, interrupts should pass in zero. Must be called with
746 * si_lock held and interrupts disabled.
747 */
718static enum si_sm_result smi_event_handler(struct smi_info *smi_info, 748static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
719 int time) 749 int time)
720{ 750{
721 enum si_sm_result si_sm_result; 751 enum si_sm_result si_sm_result;
722 752
723 restart: 753 restart:
724 /* There used to be a loop here that waited a little while 754 /*
725 (around 25us) before giving up. That turned out to be 755 * There used to be a loop here that waited a little while
726 pointless, the minimum delays I was seeing were in the 300us 756 * (around 25us) before giving up. That turned out to be
727 range, which is far too long to wait in an interrupt. So 757 * pointless, the minimum delays I was seeing were in the 300us
728 we just run until the state machine tells us something 758 * range, which is far too long to wait in an interrupt. So
729 happened or it needs a delay. */ 759 * we just run until the state machine tells us something
760 * happened or it needs a delay.
761 */
730 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); 762 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
731 time = 0; 763 time = 0;
732 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) 764 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
733 {
734 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 765 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
735 }
736 766
737 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) 767 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
738 {
739 smi_inc_stat(smi_info, complete_transactions); 768 smi_inc_stat(smi_info, complete_transactions);
740 769
741 handle_transaction_done(smi_info); 770 handle_transaction_done(smi_info);
742 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 771 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
743 } 772 } else if (si_sm_result == SI_SM_HOSED) {
744 else if (si_sm_result == SI_SM_HOSED)
745 {
746 smi_inc_stat(smi_info, hosed_count); 773 smi_inc_stat(smi_info, hosed_count);
747 774
748 /* Do the before return_hosed_msg, because that 775 /*
749 releases the lock. */ 776 * Do the before return_hosed_msg, because that
777 * releases the lock.
778 */
750 smi_info->si_state = SI_NORMAL; 779 smi_info->si_state = SI_NORMAL;
751 if (smi_info->curr_msg != NULL) { 780 if (smi_info->curr_msg != NULL) {
752 /* If we were handling a user message, format 781 /*
753 a response to send to the upper layer to 782 * If we were handling a user message, format
754 tell it about the error. */ 783 * a response to send to the upper layer to
784 * tell it about the error.
785 */
755 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); 786 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
756 } 787 }
757 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); 788 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
@@ -761,17 +792,18 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
761 * We prefer handling attn over new messages. But don't do 792 * We prefer handling attn over new messages. But don't do
762 * this if there is not yet an upper layer to handle anything. 793 * this if there is not yet an upper layer to handle anything.
763 */ 794 */
764 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) 795 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
765 {
766 unsigned char msg[2]; 796 unsigned char msg[2];
767 797
768 smi_inc_stat(smi_info, attentions); 798 smi_inc_stat(smi_info, attentions);
769 799
770 /* Got a attn, send down a get message flags to see 800 /*
771 what's causing it. It would be better to handle 801 * Got a attn, send down a get message flags to see
772 this in the upper layer, but due to the way 802 * what's causing it. It would be better to handle
773 interrupts work with the SMI, that's not really 803 * this in the upper layer, but due to the way
774 possible. */ 804 * interrupts work with the SMI, that's not really
805 * possible.
806 */
775 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 807 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
776 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 808 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
777 809
@@ -788,13 +820,14 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
788 si_sm_result = start_next_msg(smi_info); 820 si_sm_result = start_next_msg(smi_info);
789 if (si_sm_result != SI_SM_IDLE) 821 if (si_sm_result != SI_SM_IDLE)
790 goto restart; 822 goto restart;
791 } 823 }
792 824
793 if ((si_sm_result == SI_SM_IDLE) 825 if ((si_sm_result == SI_SM_IDLE)
794 && (atomic_read(&smi_info->req_events))) 826 && (atomic_read(&smi_info->req_events))) {
795 { 827 /*
796 /* We are idle and the upper layer requested that I fetch 828 * We are idle and the upper layer requested that I fetch
797 events, so do so. */ 829 * events, so do so.
830 */
798 atomic_set(&smi_info->req_events, 0); 831 atomic_set(&smi_info->req_events, 0);
799 832
800 smi_info->curr_msg = ipmi_alloc_smi_msg(); 833 smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -871,11 +904,8 @@ static void sender(void *send_info,
871 spin_unlock_irqrestore(&smi_info->msg_lock, flags); 904 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
872 905
873 spin_lock_irqsave(&smi_info->si_lock, flags); 906 spin_lock_irqsave(&smi_info->si_lock, flags);
874 if ((smi_info->si_state == SI_NORMAL) 907 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
875 && (smi_info->curr_msg == NULL))
876 {
877 start_next_msg(smi_info); 908 start_next_msg(smi_info);
878 }
879 spin_unlock_irqrestore(&smi_info->si_lock, flags); 909 spin_unlock_irqrestore(&smi_info->si_lock, flags);
880} 910}
881 911
@@ -906,9 +936,8 @@ static int ipmi_thread(void *data)
906 spin_lock_irqsave(&(smi_info->si_lock), flags); 936 spin_lock_irqsave(&(smi_info->si_lock), flags);
907 smi_result = smi_event_handler(smi_info, 0); 937 smi_result = smi_event_handler(smi_info, 0);
908 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 938 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
909 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { 939 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
910 /* do nothing */ 940 ; /* do nothing */
911 }
912 else if (smi_result == SI_SM_CALL_WITH_DELAY) 941 else if (smi_result == SI_SM_CALL_WITH_DELAY)
913 schedule(); 942 schedule();
914 else 943 else
@@ -959,7 +988,7 @@ static void smi_timeout(unsigned long data)
959 spin_lock_irqsave(&(smi_info->si_lock), flags); 988 spin_lock_irqsave(&(smi_info->si_lock), flags);
960#ifdef DEBUG_TIMING 989#ifdef DEBUG_TIMING
961 do_gettimeofday(&t); 990 do_gettimeofday(&t);
962 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); 991 printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
963#endif 992#endif
964 jiffies_now = jiffies; 993 jiffies_now = jiffies;
965 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) 994 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
@@ -977,8 +1006,10 @@ static void smi_timeout(unsigned long data)
977 goto do_add_timer; 1006 goto do_add_timer;
978 } 1007 }
979 1008
980 /* If the state machine asks for a short delay, then shorten 1009 /*
981 the timer timeout. */ 1010 * If the state machine asks for a short delay, then shorten
1011 * the timer timeout.
1012 */
982 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1013 if (smi_result == SI_SM_CALL_WITH_DELAY) {
983 smi_inc_stat(smi_info, short_timeouts); 1014 smi_inc_stat(smi_info, short_timeouts);
984 smi_info->si_timer.expires = jiffies + 1; 1015 smi_info->si_timer.expires = jiffies + 1;
@@ -1005,7 +1036,7 @@ static irqreturn_t si_irq_handler(int irq, void *data)
1005 1036
1006#ifdef DEBUG_TIMING 1037#ifdef DEBUG_TIMING
1007 do_gettimeofday(&t); 1038 do_gettimeofday(&t);
1008 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); 1039 printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1009#endif 1040#endif
1010 smi_event_handler(smi_info, 0); 1041 smi_event_handler(smi_info, 0);
1011 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 1042 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
@@ -1048,7 +1079,7 @@ static int smi_start_processing(void *send_info,
1048 * The BT interface is efficient enough to not need a thread, 1079 * The BT interface is efficient enough to not need a thread,
1049 * and there is no need for a thread if we have interrupts. 1080 * and there is no need for a thread if we have interrupts.
1050 */ 1081 */
1051 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) 1082 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1052 enable = 1; 1083 enable = 1;
1053 1084
1054 if (enable) { 1085 if (enable) {
@@ -1074,8 +1105,7 @@ static void set_maintenance_mode(void *send_info, int enable)
1074 atomic_set(&smi_info->req_events, 0); 1105 atomic_set(&smi_info->req_events, 0);
1075} 1106}
1076 1107
1077static struct ipmi_smi_handlers handlers = 1108static struct ipmi_smi_handlers handlers = {
1078{
1079 .owner = THIS_MODULE, 1109 .owner = THIS_MODULE,
1080 .start_processing = smi_start_processing, 1110 .start_processing = smi_start_processing,
1081 .sender = sender, 1111 .sender = sender,
@@ -1085,8 +1115,10 @@ static struct ipmi_smi_handlers handlers =
1085 .poll = poll, 1115 .poll = poll,
1086}; 1116};
1087 1117
1088/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, 1118/*
1089 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ 1119 * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1120 * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS.
1121 */
1090 1122
1091static LIST_HEAD(smi_infos); 1123static LIST_HEAD(smi_infos);
1092static DEFINE_MUTEX(smi_infos_lock); 1124static DEFINE_MUTEX(smi_infos_lock);
@@ -1277,10 +1309,9 @@ static void port_cleanup(struct smi_info *info)
1277 int idx; 1309 int idx;
1278 1310
1279 if (addr) { 1311 if (addr) {
1280 for (idx = 0; idx < info->io_size; idx++) { 1312 for (idx = 0; idx < info->io_size; idx++)
1281 release_region(addr + idx * info->io.regspacing, 1313 release_region(addr + idx * info->io.regspacing,
1282 info->io.regsize); 1314 info->io.regsize);
1283 }
1284 } 1315 }
1285} 1316}
1286 1317
@@ -1294,8 +1325,10 @@ static int port_setup(struct smi_info *info)
1294 1325
1295 info->io_cleanup = port_cleanup; 1326 info->io_cleanup = port_cleanup;
1296 1327
1297 /* Figure out the actual inb/inw/inl/etc routine to use based 1328 /*
1298 upon the register size. */ 1329 * Figure out the actual inb/inw/inl/etc routine to use based
1330 * upon the register size.
1331 */
1299 switch (info->io.regsize) { 1332 switch (info->io.regsize) {
1300 case 1: 1333 case 1:
1301 info->io.inputb = port_inb; 1334 info->io.inputb = port_inb;
@@ -1310,17 +1343,18 @@ static int port_setup(struct smi_info *info)
1310 info->io.outputb = port_outl; 1343 info->io.outputb = port_outl;
1311 break; 1344 break;
1312 default: 1345 default:
1313 printk("ipmi_si: Invalid register size: %d\n", 1346 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
1314 info->io.regsize); 1347 info->io.regsize);
1315 return -EINVAL; 1348 return -EINVAL;
1316 } 1349 }
1317 1350
1318 /* Some BIOSes reserve disjoint I/O regions in their ACPI 1351 /*
1352 * Some BIOSes reserve disjoint I/O regions in their ACPI
1319 * tables. This causes problems when trying to register the 1353 * tables. This causes problems when trying to register the
1320 * entire I/O region. Therefore we must register each I/O 1354 * entire I/O region. Therefore we must register each I/O
1321 * port separately. 1355 * port separately.
1322 */ 1356 */
1323 for (idx = 0; idx < info->io_size; idx++) { 1357 for (idx = 0; idx < info->io_size; idx++) {
1324 if (request_region(addr + idx * info->io.regspacing, 1358 if (request_region(addr + idx * info->io.regspacing,
1325 info->io.regsize, DEVICE_NAME) == NULL) { 1359 info->io.regsize, DEVICE_NAME) == NULL) {
1326 /* Undo allocations */ 1360 /* Undo allocations */
@@ -1408,8 +1442,10 @@ static int mem_setup(struct smi_info *info)
1408 1442
1409 info->io_cleanup = mem_cleanup; 1443 info->io_cleanup = mem_cleanup;
1410 1444
1411 /* Figure out the actual readb/readw/readl/etc routine to use based 1445 /*
1412 upon the register size. */ 1446 * Figure out the actual readb/readw/readl/etc routine to use based
1447 * upon the register size.
1448 */
1413 switch (info->io.regsize) { 1449 switch (info->io.regsize) {
1414 case 1: 1450 case 1:
1415 info->io.inputb = intf_mem_inb; 1451 info->io.inputb = intf_mem_inb;
@@ -1430,16 +1466,18 @@ static int mem_setup(struct smi_info *info)
1430 break; 1466 break;
1431#endif 1467#endif
1432 default: 1468 default:
1433 printk("ipmi_si: Invalid register size: %d\n", 1469 printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
1434 info->io.regsize); 1470 info->io.regsize);
1435 return -EINVAL; 1471 return -EINVAL;
1436 } 1472 }
1437 1473
1438 /* Calculate the total amount of memory to claim. This is an 1474 /*
1475 * Calculate the total amount of memory to claim. This is an
1439 * unusual looking calculation, but it avoids claiming any 1476 * unusual looking calculation, but it avoids claiming any
1440 * more memory than it has to. It will claim everything 1477 * more memory than it has to. It will claim everything
1441 * between the first address to the end of the last full 1478 * between the first address to the end of the last full
1442 * register. */ 1479 * register.
1480 */
1443 mapsize = ((info->io_size * info->io.regspacing) 1481 mapsize = ((info->io_size * info->io.regspacing)
1444 - (info->io.regspacing - info->io.regsize)); 1482 - (info->io.regspacing - info->io.regsize));
1445 1483
@@ -1769,9 +1807,11 @@ static __devinit void hardcode_find_bmc(void)
1769 1807
1770#include <linux/acpi.h> 1808#include <linux/acpi.h>
1771 1809
1772/* Once we get an ACPI failure, we don't try any more, because we go 1810/*
1773 through the tables sequentially. Once we don't find a table, there 1811 * Once we get an ACPI failure, we don't try any more, because we go
1774 are no more. */ 1812 * through the tables sequentially. Once we don't find a table, there
1813 * are no more.
1814 */
1775static int acpi_failure; 1815static int acpi_failure;
1776 1816
1777/* For GPE-type interrupts. */ 1817/* For GPE-type interrupts. */
@@ -1834,7 +1874,8 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
1834 1874
1835/* 1875/*
1836 * Defined at 1876 * Defined at
1837 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf 1877 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/
1878 * Docs/TechPapers/IA64/hpspmi.pdf
1838 */ 1879 */
1839struct SPMITable { 1880struct SPMITable {
1840 s8 Signature[4]; 1881 s8 Signature[4];
@@ -1856,14 +1897,18 @@ struct SPMITable {
1856 */ 1897 */
1857 u8 InterruptType; 1898 u8 InterruptType;
1858 1899
1859 /* If bit 0 of InterruptType is set, then this is the SCI 1900 /*
1860 interrupt in the GPEx_STS register. */ 1901 * If bit 0 of InterruptType is set, then this is the SCI
1902 * interrupt in the GPEx_STS register.
1903 */
1861 u8 GPE; 1904 u8 GPE;
1862 1905
1863 s16 Reserved; 1906 s16 Reserved;
1864 1907
1865 /* If bit 1 of InterruptType is set, then this is the I/O 1908 /*
1866 APIC/SAPIC interrupt. */ 1909 * If bit 1 of InterruptType is set, then this is the I/O
1910 * APIC/SAPIC interrupt.
1911 */
1867 u32 GlobalSystemInterrupt; 1912 u32 GlobalSystemInterrupt;
1868 1913
1869 /* The actual register address. */ 1914 /* The actual register address. */
@@ -1881,7 +1926,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1881 1926
1882 if (spmi->IPMIlegacy != 1) { 1927 if (spmi->IPMIlegacy != 1) {
1883 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); 1928 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1884 return -ENODEV; 1929 return -ENODEV;
1885 } 1930 }
1886 1931
1887 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) 1932 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -1898,8 +1943,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1898 info->addr_source = "ACPI"; 1943 info->addr_source = "ACPI";
1899 1944
1900 /* Figure out the interface type. */ 1945 /* Figure out the interface type. */
1901 switch (spmi->InterfaceType) 1946 switch (spmi->InterfaceType) {
1902 {
1903 case 1: /* KCS */ 1947 case 1: /* KCS */
1904 info->si_type = SI_KCS; 1948 info->si_type = SI_KCS;
1905 break; 1949 break;
@@ -1947,7 +1991,8 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
1947 info->io.addr_type = IPMI_IO_ADDR_SPACE; 1991 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1948 } else { 1992 } else {
1949 kfree(info); 1993 kfree(info);
1950 printk("ipmi_si: Unknown ACPI I/O Address type\n"); 1994 printk(KERN_WARNING
1995 "ipmi_si: Unknown ACPI I/O Address type\n");
1951 return -EIO; 1996 return -EIO;
1952 } 1997 }
1953 info->io.addr_data = spmi->addr.address; 1998 info->io.addr_data = spmi->addr.address;
@@ -1981,8 +2026,7 @@ static __devinit void acpi_find_bmc(void)
1981#endif 2026#endif
1982 2027
1983#ifdef CONFIG_DMI 2028#ifdef CONFIG_DMI
1984struct dmi_ipmi_data 2029struct dmi_ipmi_data {
1985{
1986 u8 type; 2030 u8 type;
1987 u8 addr_space; 2031 u8 addr_space;
1988 unsigned long base_addr; 2032 unsigned long base_addr;
@@ -2007,11 +2051,10 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
2007 /* I/O */ 2051 /* I/O */
2008 base_addr &= 0xFFFE; 2052 base_addr &= 0xFFFE;
2009 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2053 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2010 } 2054 } else
2011 else {
2012 /* Memory */ 2055 /* Memory */
2013 dmi->addr_space = IPMI_MEM_ADDR_SPACE; 2056 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
2014 } 2057
2015 /* If bit 4 of byte 0x10 is set, then the lsb for the address 2058 /* If bit 4 of byte 0x10 is set, then the lsb for the address
2016 is odd. */ 2059 is odd. */
2017 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); 2060 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
@@ -2020,7 +2063,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
2020 2063
2021 /* The top two bits of byte 0x10 hold the register spacing. */ 2064 /* The top two bits of byte 0x10 hold the register spacing. */
2022 reg_spacing = (data[0x10] & 0xC0) >> 6; 2065 reg_spacing = (data[0x10] & 0xC0) >> 6;
2023 switch(reg_spacing){ 2066 switch (reg_spacing) {
2024 case 0x00: /* Byte boundaries */ 2067 case 0x00: /* Byte boundaries */
2025 dmi->offset = 1; 2068 dmi->offset = 1;
2026 break; 2069 break;
@@ -2036,12 +2079,14 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
2036 } 2079 }
2037 } else { 2080 } else {
2038 /* Old DMI spec. */ 2081 /* Old DMI spec. */
2039 /* Note that technically, the lower bit of the base 2082 /*
2083 * Note that technically, the lower bit of the base
2040 * address should be 1 if the address is I/O and 0 if 2084 * address should be 1 if the address is I/O and 0 if
2041 * the address is in memory. So many systems get that 2085 * the address is in memory. So many systems get that
2042 * wrong (and all that I have seen are I/O) so we just 2086 * wrong (and all that I have seen are I/O) so we just
2043 * ignore that bit and assume I/O. Systems that use 2087 * ignore that bit and assume I/O. Systems that use
2044 * memory should use the newer spec, anyway. */ 2088 * memory should use the newer spec, anyway.
2089 */
2045 dmi->base_addr = base_addr & 0xfffe; 2090 dmi->base_addr = base_addr & 0xfffe;
2046 dmi->addr_space = IPMI_IO_ADDR_SPACE; 2091 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2047 dmi->offset = 1; 2092 dmi->offset = 1;
@@ -2248,13 +2293,13 @@ static struct pci_device_id ipmi_pci_devices[] = {
2248MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); 2293MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2249 2294
2250static struct pci_driver ipmi_pci_driver = { 2295static struct pci_driver ipmi_pci_driver = {
2251 .name = DEVICE_NAME, 2296 .name = DEVICE_NAME,
2252 .id_table = ipmi_pci_devices, 2297 .id_table = ipmi_pci_devices,
2253 .probe = ipmi_pci_probe, 2298 .probe = ipmi_pci_probe,
2254 .remove = __devexit_p(ipmi_pci_remove), 2299 .remove = __devexit_p(ipmi_pci_remove),
2255#ifdef CONFIG_PM 2300#ifdef CONFIG_PM
2256 .suspend = ipmi_pci_suspend, 2301 .suspend = ipmi_pci_suspend,
2257 .resume = ipmi_pci_resume, 2302 .resume = ipmi_pci_resume,
2258#endif 2303#endif
2259}; 2304};
2260#endif /* CONFIG_PCI */ 2305#endif /* CONFIG_PCI */
@@ -2324,7 +2369,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
2324 info->io.addr_data, info->io.regsize, info->io.regspacing, 2369 info->io.addr_data, info->io.regsize, info->io.regspacing,
2325 info->irq); 2370 info->irq);
2326 2371
2327 dev->dev.driver_data = (void*) info; 2372 dev->dev.driver_data = (void *) info;
2328 2373
2329 return try_smi_init(info); 2374 return try_smi_init(info);
2330} 2375}
@@ -2337,14 +2382,16 @@ static int __devexit ipmi_of_remove(struct of_device *dev)
2337 2382
2338static struct of_device_id ipmi_match[] = 2383static struct of_device_id ipmi_match[] =
2339{ 2384{
2340 { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS }, 2385 { .type = "ipmi", .compatible = "ipmi-kcs",
2341 { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC }, 2386 .data = (void *)(unsigned long) SI_KCS },
2342 { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT }, 2387 { .type = "ipmi", .compatible = "ipmi-smic",
2388 .data = (void *)(unsigned long) SI_SMIC },
2389 { .type = "ipmi", .compatible = "ipmi-bt",
2390 .data = (void *)(unsigned long) SI_BT },
2343 {}, 2391 {},
2344}; 2392};
2345 2393
2346static struct of_platform_driver ipmi_of_platform_driver = 2394static struct of_platform_driver ipmi_of_platform_driver = {
2347{
2348 .name = "ipmi", 2395 .name = "ipmi",
2349 .match_table = ipmi_match, 2396 .match_table = ipmi_match,
2350 .probe = ipmi_of_probe, 2397 .probe = ipmi_of_probe,
@@ -2365,32 +2412,32 @@ static int try_get_dev_id(struct smi_info *smi_info)
2365 if (!resp) 2412 if (!resp)
2366 return -ENOMEM; 2413 return -ENOMEM;
2367 2414
2368 /* Do a Get Device ID command, since it comes back with some 2415 /*
2369 useful info. */ 2416 * Do a Get Device ID command, since it comes back with some
2417 * useful info.
2418 */
2370 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 2419 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2371 msg[1] = IPMI_GET_DEVICE_ID_CMD; 2420 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2372 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 2421 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2373 2422
2374 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 2423 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2375 for (;;) 2424 for (;;) {
2376 {
2377 if (smi_result == SI_SM_CALL_WITH_DELAY || 2425 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2378 smi_result == SI_SM_CALL_WITH_TICK_DELAY) { 2426 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2379 schedule_timeout_uninterruptible(1); 2427 schedule_timeout_uninterruptible(1);
2380 smi_result = smi_info->handlers->event( 2428 smi_result = smi_info->handlers->event(
2381 smi_info->si_sm, 100); 2429 smi_info->si_sm, 100);
2382 } 2430 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
2383 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2384 {
2385 smi_result = smi_info->handlers->event( 2431 smi_result = smi_info->handlers->event(
2386 smi_info->si_sm, 0); 2432 smi_info->si_sm, 0);
2387 } 2433 } else
2388 else
2389 break; 2434 break;
2390 } 2435 }
2391 if (smi_result == SI_SM_HOSED) { 2436 if (smi_result == SI_SM_HOSED) {
2392 /* We couldn't get the state machine to run, so whatever's at 2437 /*
2393 the port is probably not an IPMI SMI interface. */ 2438 * We couldn't get the state machine to run, so whatever's at
2439 * the port is probably not an IPMI SMI interface.
2440 */
2394 rv = -ENODEV; 2441 rv = -ENODEV;
2395 goto out; 2442 goto out;
2396 } 2443 }
@@ -2476,7 +2523,7 @@ static int param_read_proc(char *page, char **start, off_t off,
2476static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) 2523static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2477{ 2524{
2478 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | 2525 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2479 RECEIVE_MSG_AVAIL); 2526 RECEIVE_MSG_AVAIL);
2480 return 1; 2527 return 1;
2481} 2528}
2482 2529
@@ -2518,10 +2565,9 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2518 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { 2565 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2519 smi_info->oem_data_avail_handler = 2566 smi_info->oem_data_avail_handler =
2520 oem_data_avail_to_receive_msg_avail; 2567 oem_data_avail_to_receive_msg_avail;
2521 } 2568 } else if (ipmi_version_major(id) < 1 ||
2522 else if (ipmi_version_major(id) < 1 || 2569 (ipmi_version_major(id) == 1 &&
2523 (ipmi_version_major(id) == 1 && 2570 ipmi_version_minor(id) < 5)) {
2524 ipmi_version_minor(id) < 5)) {
2525 smi_info->oem_data_avail_handler = 2571 smi_info->oem_data_avail_handler =
2526 oem_data_avail_to_receive_msg_avail; 2572 oem_data_avail_to_receive_msg_avail;
2527 } 2573 }
@@ -2613,8 +2659,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
2613static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 2659static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2614{ 2660{
2615 if (smi_info->intf) { 2661 if (smi_info->intf) {
2616 /* The timer and thread are only running if the 2662 /*
2617 interface has been started up and registered. */ 2663 * The timer and thread are only running if the
2664 * interface has been started up and registered.
2665 */
2618 if (smi_info->thread != NULL) 2666 if (smi_info->thread != NULL)
2619 kthread_stop(smi_info->thread); 2667 kthread_stop(smi_info->thread);
2620 del_timer_sync(&smi_info->si_timer); 2668 del_timer_sync(&smi_info->si_timer);
@@ -2739,7 +2787,7 @@ static int try_smi_init(struct smi_info *new_smi)
2739 /* Allocate the state machine's data and initialize it. */ 2787 /* Allocate the state machine's data and initialize it. */
2740 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); 2788 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2741 if (!new_smi->si_sm) { 2789 if (!new_smi->si_sm) {
2742 printk(" Could not allocate state machine memory\n"); 2790 printk(KERN_ERR "Could not allocate state machine memory\n");
2743 rv = -ENOMEM; 2791 rv = -ENOMEM;
2744 goto out_err; 2792 goto out_err;
2745 } 2793 }
@@ -2749,7 +2797,7 @@ static int try_smi_init(struct smi_info *new_smi)
2749 /* Now that we know the I/O size, we can set up the I/O. */ 2797 /* Now that we know the I/O size, we can set up the I/O. */
2750 rv = new_smi->io_setup(new_smi); 2798 rv = new_smi->io_setup(new_smi);
2751 if (rv) { 2799 if (rv) {
2752 printk(" Could not set up I/O space\n"); 2800 printk(KERN_ERR "Could not set up I/O space\n");
2753 goto out_err; 2801 goto out_err;
2754 } 2802 }
2755 2803
@@ -2765,8 +2813,10 @@ static int try_smi_init(struct smi_info *new_smi)
2765 goto out_err; 2813 goto out_err;
2766 } 2814 }
2767 2815
2768 /* Attempt a get device id command. If it fails, we probably 2816 /*
2769 don't have a BMC here. */ 2817 * Attempt a get device id command. If it fails, we probably
2818 * don't have a BMC here.
2819 */
2770 rv = try_get_dev_id(new_smi); 2820 rv = try_get_dev_id(new_smi);
2771 if (rv) { 2821 if (rv) {
2772 if (new_smi->addr_source) 2822 if (new_smi->addr_source)
@@ -2791,16 +2841,20 @@ static int try_smi_init(struct smi_info *new_smi)
2791 new_smi->intf_num = smi_num; 2841 new_smi->intf_num = smi_num;
2792 smi_num++; 2842 smi_num++;
2793 2843
2794 /* Start clearing the flags before we enable interrupts or the 2844 /*
2795 timer to avoid racing with the timer. */ 2845 * Start clearing the flags before we enable interrupts or the
2846 * timer to avoid racing with the timer.
2847 */
2796 start_clear_flags(new_smi); 2848 start_clear_flags(new_smi);
2797 /* IRQ is defined to be set when non-zero. */ 2849 /* IRQ is defined to be set when non-zero. */
2798 if (new_smi->irq) 2850 if (new_smi->irq)
2799 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; 2851 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2800 2852
2801 if (!new_smi->dev) { 2853 if (!new_smi->dev) {
2802 /* If we don't already have a device from something 2854 /*
2803 * else (like PCI), then register a new one. */ 2855 * If we don't already have a device from something
2856 * else (like PCI), then register a new one.
2857 */
2804 new_smi->pdev = platform_device_alloc("ipmi_si", 2858 new_smi->pdev = platform_device_alloc("ipmi_si",
2805 new_smi->intf_num); 2859 new_smi->intf_num);
2806 if (rv) { 2860 if (rv) {
@@ -2871,7 +2925,8 @@ static int try_smi_init(struct smi_info *new_smi)
2871 2925
2872 mutex_unlock(&smi_infos_lock); 2926 mutex_unlock(&smi_infos_lock);
2873 2927
2874 printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); 2928 printk(KERN_INFO "IPMI %s interface initialized\n",
2929 si_to_str[new_smi->si_type]);
2875 2930
2876 return 0; 2931 return 0;
2877 2932
@@ -2886,9 +2941,11 @@ static int try_smi_init(struct smi_info *new_smi)
2886 if (new_smi->irq_cleanup) 2941 if (new_smi->irq_cleanup)
2887 new_smi->irq_cleanup(new_smi); 2942 new_smi->irq_cleanup(new_smi);
2888 2943
2889 /* Wait until we know that we are out of any interrupt 2944 /*
2890 handlers might have been running before we freed the 2945 * Wait until we know that we are out of any interrupt
2891 interrupt. */ 2946 * handlers might have been running before we freed the
2947 * interrupt.
2948 */
2892 synchronize_sched(); 2949 synchronize_sched();
2893 2950
2894 if (new_smi->si_sm) { 2951 if (new_smi->si_sm) {
@@ -2960,11 +3017,10 @@ static __devinit int init_ipmi_si(void)
2960 3017
2961#ifdef CONFIG_PCI 3018#ifdef CONFIG_PCI
2962 rv = pci_register_driver(&ipmi_pci_driver); 3019 rv = pci_register_driver(&ipmi_pci_driver);
2963 if (rv){ 3020 if (rv)
2964 printk(KERN_ERR 3021 printk(KERN_ERR
2965 "init_ipmi_si: Unable to register PCI driver: %d\n", 3022 "init_ipmi_si: Unable to register PCI driver: %d\n",
2966 rv); 3023 rv);
2967 }
2968#endif 3024#endif
2969 3025
2970#ifdef CONFIG_PPC_OF 3026#ifdef CONFIG_PPC_OF
@@ -2993,7 +3049,8 @@ static __devinit int init_ipmi_si(void)
2993 of_unregister_platform_driver(&ipmi_of_platform_driver); 3049 of_unregister_platform_driver(&ipmi_of_platform_driver);
2994#endif 3050#endif
2995 driver_unregister(&ipmi_driver); 3051 driver_unregister(&ipmi_driver);
2996 printk("ipmi_si: Unable to find any System Interface(s)\n"); 3052 printk(KERN_WARNING
3053 "ipmi_si: Unable to find any System Interface(s)\n");
2997 return -ENODEV; 3054 return -ENODEV;
2998 } else { 3055 } else {
2999 mutex_unlock(&smi_infos_lock); 3056 mutex_unlock(&smi_infos_lock);
@@ -3015,13 +3072,17 @@ static void cleanup_one_si(struct smi_info *to_clean)
3015 /* Tell the driver that we are shutting down. */ 3072 /* Tell the driver that we are shutting down. */
3016 atomic_inc(&to_clean->stop_operation); 3073 atomic_inc(&to_clean->stop_operation);
3017 3074
3018 /* Make sure the timer and thread are stopped and will not run 3075 /*
3019 again. */ 3076 * Make sure the timer and thread are stopped and will not run
3077 * again.
3078 */
3020 wait_for_timer_and_thread(to_clean); 3079 wait_for_timer_and_thread(to_clean);
3021 3080
3022 /* Timeouts are stopped, now make sure the interrupts are off 3081 /*
3023 for the device. A little tricky with locks to make sure 3082 * Timeouts are stopped, now make sure the interrupts are off
3024 there are no races. */ 3083 * for the device. A little tricky with locks to make sure
3084 * there are no races.
3085 */
3025 spin_lock_irqsave(&to_clean->si_lock, flags); 3086 spin_lock_irqsave(&to_clean->si_lock, flags);
3026 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3087 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3027 spin_unlock_irqrestore(&to_clean->si_lock, flags); 3088 spin_unlock_irqrestore(&to_clean->si_lock, flags);
@@ -3092,4 +3153,5 @@ module_exit(cleanup_ipmi_si);
3092 3153
3093MODULE_LICENSE("GPL"); 3154MODULE_LICENSE("GPL");
3094MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); 3155MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3095MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces."); 3156MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
3157 " system interfaces.");
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index 4b731b24dc16..df89f73475fb 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -34,22 +34,27 @@
34 * 675 Mass Ave, Cambridge, MA 02139, USA. 34 * 675 Mass Ave, Cambridge, MA 02139, USA.
35 */ 35 */
36 36
37/* This is defined by the state machines themselves, it is an opaque 37/*
38 data type for them to use. */ 38 * This is defined by the state machines themselves, it is an opaque
39 * data type for them to use.
40 */
39struct si_sm_data; 41struct si_sm_data;
40 42
41/* The structure for doing I/O in the state machine. The state 43/*
42 machine doesn't have the actual I/O routines, they are done through 44 * The structure for doing I/O in the state machine. The state
43 this interface. */ 45 * machine doesn't have the actual I/O routines, they are done through
44struct si_sm_io 46 * this interface.
45{ 47 */
48struct si_sm_io {
46 unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); 49 unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
47 void (*outputb)(struct si_sm_io *io, 50 void (*outputb)(struct si_sm_io *io,
48 unsigned int offset, 51 unsigned int offset,
49 unsigned char b); 52 unsigned char b);
50 53
51 /* Generic info used by the actual handling routines, the 54 /*
52 state machine shouldn't touch these. */ 55 * Generic info used by the actual handling routines, the
56 * state machine shouldn't touch these.
57 */
53 void __iomem *addr; 58 void __iomem *addr;
54 int regspacing; 59 int regspacing;
55 int regsize; 60 int regsize;
@@ -59,53 +64,67 @@ struct si_sm_io
59}; 64};
60 65
61/* Results of SMI events. */ 66/* Results of SMI events. */
62enum si_sm_result 67enum si_sm_result {
63{
64 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ 68 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
65 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ 69 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
66 SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */ 70 SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
67 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ 71 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
68 SI_SM_IDLE, /* The SM is in idle state. */ 72 SI_SM_IDLE, /* The SM is in idle state. */
69 SI_SM_HOSED, /* The hardware violated the state machine. */ 73 SI_SM_HOSED, /* The hardware violated the state machine. */
70 SI_SM_ATTN /* The hardware is asserting attn and the 74
71 state machine is idle. */ 75 /*
76 * The hardware is asserting attn and the state machine is
77 * idle.
78 */
79 SI_SM_ATTN
72}; 80};
73 81
74/* Handlers for the SMI state machine. */ 82/* Handlers for the SMI state machine. */
75struct si_sm_handlers 83struct si_sm_handlers {
76{ 84 /*
77 /* Put the version number of the state machine here so the 85 * Put the version number of the state machine here so the
78 upper layer can print it. */ 86 * upper layer can print it.
87 */
79 char *version; 88 char *version;
80 89
81 /* Initialize the data and return the amount of I/O space to 90 /*
82 reserve for the space. */ 91 * Initialize the data and return the amount of I/O space to
92 * reserve for the space.
93 */
83 unsigned int (*init_data)(struct si_sm_data *smi, 94 unsigned int (*init_data)(struct si_sm_data *smi,
84 struct si_sm_io *io); 95 struct si_sm_io *io);
85 96
86 /* Start a new transaction in the state machine. This will 97 /*
87 return -2 if the state machine is not idle, -1 if the size 98 * Start a new transaction in the state machine. This will
88 is invalid (to large or too small), or 0 if the transaction 99 * return -2 if the state machine is not idle, -1 if the size
89 is successfully completed. */ 100 * is invalid (to large or too small), or 0 if the transaction
101 * is successfully completed.
102 */
90 int (*start_transaction)(struct si_sm_data *smi, 103 int (*start_transaction)(struct si_sm_data *smi,
91 unsigned char *data, unsigned int size); 104 unsigned char *data, unsigned int size);
92 105
93 /* Return the results after the transaction. This will return 106 /*
94 -1 if the buffer is too small, zero if no transaction is 107 * Return the results after the transaction. This will return
95 present, or the actual length of the result data. */ 108 * -1 if the buffer is too small, zero if no transaction is
109 * present, or the actual length of the result data.
110 */
96 int (*get_result)(struct si_sm_data *smi, 111 int (*get_result)(struct si_sm_data *smi,
97 unsigned char *data, unsigned int length); 112 unsigned char *data, unsigned int length);
98 113
99 /* Call this periodically (for a polled interface) or upon 114 /*
100 receiving an interrupt (for a interrupt-driven interface). 115 * Call this periodically (for a polled interface) or upon
101 If interrupt driven, you should probably poll this 116 * receiving an interrupt (for a interrupt-driven interface).
102 periodically when not in idle state. This should be called 117 * If interrupt driven, you should probably poll this
103 with the time that passed since the last call, if it is 118 * periodically when not in idle state. This should be called
104 significant. Time is in microseconds. */ 119 * with the time that passed since the last call, if it is
120 * significant. Time is in microseconds.
121 */
105 enum si_sm_result (*event)(struct si_sm_data *smi, long time); 122 enum si_sm_result (*event)(struct si_sm_data *smi, long time);
106 123
107 /* Attempt to detect an SMI. Returns 0 on success or nonzero 124 /*
108 on failure. */ 125 * Attempt to detect an SMI. Returns 0 on success or nonzero
126 * on failure.
127 */
109 int (*detect)(struct si_sm_data *smi); 128 int (*detect)(struct si_sm_data *smi);
110 129
111 /* The interface is shutting down, so clean it up. */ 130 /* The interface is shutting down, so clean it up. */
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index e64ea7d25d24..faed92971907 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -85,6 +85,7 @@ enum smic_states {
85/* SMIC Flags Register Bits */ 85/* SMIC Flags Register Bits */
86#define SMIC_RX_DATA_READY 0x80 86#define SMIC_RX_DATA_READY 0x80
87#define SMIC_TX_DATA_READY 0x40 87#define SMIC_TX_DATA_READY 0x40
88
88/* 89/*
89 * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by 90 * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
90 * a few systems, and then only by Systems Management 91 * a few systems, and then only by Systems Management
@@ -104,23 +105,22 @@ enum smic_states {
104#define EC_ILLEGAL_COMMAND 0x04 105#define EC_ILLEGAL_COMMAND 0x04
105#define EC_BUFFER_FULL 0x05 106#define EC_BUFFER_FULL 0x05
106 107
107struct si_sm_data 108struct si_sm_data {
108{
109 enum smic_states state; 109 enum smic_states state;
110 struct si_sm_io *io; 110 struct si_sm_io *io;
111 unsigned char write_data[MAX_SMIC_WRITE_SIZE]; 111 unsigned char write_data[MAX_SMIC_WRITE_SIZE];
112 int write_pos; 112 int write_pos;
113 int write_count; 113 int write_count;
114 int orig_write_count; 114 int orig_write_count;
115 unsigned char read_data[MAX_SMIC_READ_SIZE]; 115 unsigned char read_data[MAX_SMIC_READ_SIZE];
116 int read_pos; 116 int read_pos;
117 int truncated; 117 int truncated;
118 unsigned int error_retries; 118 unsigned int error_retries;
119 long smic_timeout; 119 long smic_timeout;
120}; 120};
121 121
122static unsigned int init_smic_data (struct si_sm_data *smic, 122static unsigned int init_smic_data(struct si_sm_data *smic,
123 struct si_sm_io *io) 123 struct si_sm_io *io)
124{ 124{
125 smic->state = SMIC_IDLE; 125 smic->state = SMIC_IDLE;
126 smic->io = io; 126 smic->io = io;
@@ -150,11 +150,10 @@ static int start_smic_transaction(struct si_sm_data *smic,
150 return IPMI_NOT_IN_MY_STATE_ERR; 150 return IPMI_NOT_IN_MY_STATE_ERR;
151 151
152 if (smic_debug & SMIC_DEBUG_MSG) { 152 if (smic_debug & SMIC_DEBUG_MSG) {
153 printk(KERN_INFO "start_smic_transaction -"); 153 printk(KERN_DEBUG "start_smic_transaction -");
154 for (i = 0; i < size; i ++) { 154 for (i = 0; i < size; i++)
155 printk (" %02x", (unsigned char) (data [i])); 155 printk(" %02x", (unsigned char) data[i]);
156 } 156 printk("\n");
157 printk ("\n");
158 } 157 }
159 smic->error_retries = 0; 158 smic->error_retries = 0;
160 memcpy(smic->write_data, data, size); 159 memcpy(smic->write_data, data, size);
@@ -173,11 +172,10 @@ static int smic_get_result(struct si_sm_data *smic,
173 int i; 172 int i;
174 173
175 if (smic_debug & SMIC_DEBUG_MSG) { 174 if (smic_debug & SMIC_DEBUG_MSG) {
176 printk (KERN_INFO "smic_get result -"); 175 printk(KERN_DEBUG "smic_get result -");
177 for (i = 0; i < smic->read_pos; i ++) { 176 for (i = 0; i < smic->read_pos; i++)
178 printk (" %02x", (smic->read_data [i])); 177 printk(" %02x", smic->read_data[i]);
179 } 178 printk("\n");
180 printk ("\n");
181 } 179 }
182 if (length < smic->read_pos) { 180 if (length < smic->read_pos) {
183 smic->read_pos = length; 181 smic->read_pos = length;
@@ -223,8 +221,8 @@ static inline void write_smic_control(struct si_sm_data *smic,
223 smic->io->outputb(smic->io, 1, control); 221 smic->io->outputb(smic->io, 1, control);
224} 222}
225 223
226static inline void write_si_sm_data (struct si_sm_data *smic, 224static inline void write_si_sm_data(struct si_sm_data *smic,
227 unsigned char data) 225 unsigned char data)
228{ 226{
229 smic->io->outputb(smic->io, 0, data); 227 smic->io->outputb(smic->io, 0, data);
230} 228}
@@ -233,10 +231,9 @@ static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
233{ 231{
234 (smic->error_retries)++; 232 (smic->error_retries)++;
235 if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { 233 if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
236 if (smic_debug & SMIC_DEBUG_ENABLE) { 234 if (smic_debug & SMIC_DEBUG_ENABLE)
237 printk(KERN_WARNING 235 printk(KERN_WARNING
238 "ipmi_smic_drv: smic hosed: %s\n", reason); 236 "ipmi_smic_drv: smic hosed: %s\n", reason);
239 }
240 smic->state = SMIC_HOSED; 237 smic->state = SMIC_HOSED;
241 } else { 238 } else {
242 smic->write_count = smic->orig_write_count; 239 smic->write_count = smic->orig_write_count;
@@ -254,14 +251,14 @@ static inline void write_next_byte(struct si_sm_data *smic)
254 (smic->write_count)--; 251 (smic->write_count)--;
255} 252}
256 253
257static inline void read_next_byte (struct si_sm_data *smic) 254static inline void read_next_byte(struct si_sm_data *smic)
258{ 255{
259 if (smic->read_pos >= MAX_SMIC_READ_SIZE) { 256 if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
260 read_smic_data (smic); 257 read_smic_data(smic);
261 smic->truncated = 1; 258 smic->truncated = 1;
262 } else { 259 } else {
263 smic->read_data[smic->read_pos] = read_smic_data(smic); 260 smic->read_data[smic->read_pos] = read_smic_data(smic);
264 (smic->read_pos)++; 261 smic->read_pos++;
265 } 262 }
266} 263}
267 264
@@ -336,7 +333,7 @@ static inline void read_next_byte (struct si_sm_data *smic)
336 SMIC_SC_SMS_RD_END 0xC6 333 SMIC_SC_SMS_RD_END 0xC6
337*/ 334*/
338 335
339static enum si_sm_result smic_event (struct si_sm_data *smic, long time) 336static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
340{ 337{
341 unsigned char status; 338 unsigned char status;
342 unsigned char flags; 339 unsigned char flags;
@@ -347,13 +344,15 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
347 return SI_SM_HOSED; 344 return SI_SM_HOSED;
348 } 345 }
349 if (smic->state != SMIC_IDLE) { 346 if (smic->state != SMIC_IDLE) {
350 if (smic_debug & SMIC_DEBUG_STATES) { 347 if (smic_debug & SMIC_DEBUG_STATES)
351 printk(KERN_INFO 348 printk(KERN_DEBUG
352 "smic_event - smic->smic_timeout = %ld," 349 "smic_event - smic->smic_timeout = %ld,"
353 " time = %ld\n", 350 " time = %ld\n",
354 smic->smic_timeout, time); 351 smic->smic_timeout, time);
355 } 352 /*
356/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */ 353 * FIXME: smic_event is sometimes called with time >
354 * SMIC_RETRY_TIMEOUT
355 */
357 if (time < SMIC_RETRY_TIMEOUT) { 356 if (time < SMIC_RETRY_TIMEOUT) {
358 smic->smic_timeout -= time; 357 smic->smic_timeout -= time;
359 if (smic->smic_timeout < 0) { 358 if (smic->smic_timeout < 0) {
@@ -366,9 +365,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
366 if (flags & SMIC_FLAG_BSY) 365 if (flags & SMIC_FLAG_BSY)
367 return SI_SM_CALL_WITH_DELAY; 366 return SI_SM_CALL_WITH_DELAY;
368 367
369 status = read_smic_status (smic); 368 status = read_smic_status(smic);
370 if (smic_debug & SMIC_DEBUG_STATES) 369 if (smic_debug & SMIC_DEBUG_STATES)
371 printk(KERN_INFO 370 printk(KERN_DEBUG
372 "smic_event - state = %d, flags = 0x%02x," 371 "smic_event - state = %d, flags = 0x%02x,"
373 " status = 0x%02x\n", 372 " status = 0x%02x\n",
374 smic->state, flags, status); 373 smic->state, flags, status);
@@ -377,9 +376,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
377 case SMIC_IDLE: 376 case SMIC_IDLE:
378 /* in IDLE we check for available messages */ 377 /* in IDLE we check for available messages */
379 if (flags & SMIC_SMS_DATA_AVAIL) 378 if (flags & SMIC_SMS_DATA_AVAIL)
380 {
381 return SI_SM_ATTN; 379 return SI_SM_ATTN;
382 }
383 return SI_SM_IDLE; 380 return SI_SM_IDLE;
384 381
385 case SMIC_START_OP: 382 case SMIC_START_OP:
@@ -391,7 +388,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
391 388
392 case SMIC_OP_OK: 389 case SMIC_OP_OK:
393 if (status != SMIC_SC_SMS_READY) { 390 if (status != SMIC_SC_SMS_READY) {
394 /* this should not happen */ 391 /* this should not happen */
395 start_error_recovery(smic, 392 start_error_recovery(smic,
396 "state = SMIC_OP_OK," 393 "state = SMIC_OP_OK,"
397 " status != SMIC_SC_SMS_READY"); 394 " status != SMIC_SC_SMS_READY");
@@ -411,8 +408,10 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
411 "status != SMIC_SC_SMS_WR_START"); 408 "status != SMIC_SC_SMS_WR_START");
412 return SI_SM_CALL_WITH_DELAY; 409 return SI_SM_CALL_WITH_DELAY;
413 } 410 }
414 /* we must not issue WR_(NEXT|END) unless 411 /*
415 TX_DATA_READY is set */ 412 * we must not issue WR_(NEXT|END) unless
413 * TX_DATA_READY is set
414 * */
416 if (flags & SMIC_TX_DATA_READY) { 415 if (flags & SMIC_TX_DATA_READY) {
417 if (smic->write_count == 1) { 416 if (smic->write_count == 1) {
418 /* last byte */ 417 /* last byte */
@@ -424,10 +423,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
424 } 423 }
425 write_next_byte(smic); 424 write_next_byte(smic);
426 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 425 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
427 } 426 } else
428 else {
429 return SI_SM_CALL_WITH_DELAY; 427 return SI_SM_CALL_WITH_DELAY;
430 }
431 break; 428 break;
432 429
433 case SMIC_WRITE_NEXT: 430 case SMIC_WRITE_NEXT:
@@ -442,52 +439,48 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
442 if (smic->write_count == 1) { 439 if (smic->write_count == 1) {
443 write_smic_control(smic, SMIC_CC_SMS_WR_END); 440 write_smic_control(smic, SMIC_CC_SMS_WR_END);
444 smic->state = SMIC_WRITE_END; 441 smic->state = SMIC_WRITE_END;
445 } 442 } else {
446 else {
447 write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); 443 write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
448 smic->state = SMIC_WRITE_NEXT; 444 smic->state = SMIC_WRITE_NEXT;
449 } 445 }
450 write_next_byte(smic); 446 write_next_byte(smic);
451 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 447 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
452 } 448 } else
453 else {
454 return SI_SM_CALL_WITH_DELAY; 449 return SI_SM_CALL_WITH_DELAY;
455 }
456 break; 450 break;
457 451
458 case SMIC_WRITE_END: 452 case SMIC_WRITE_END:
459 if (status != SMIC_SC_SMS_WR_END) { 453 if (status != SMIC_SC_SMS_WR_END) {
460 start_error_recovery (smic, 454 start_error_recovery(smic,
461 "state = SMIC_WRITE_END, " 455 "state = SMIC_WRITE_END, "
462 "status != SMIC_SC_SMS_WR_END"); 456 "status != SMIC_SC_SMS_WR_END");
463 return SI_SM_CALL_WITH_DELAY; 457 return SI_SM_CALL_WITH_DELAY;
464 } 458 }
465 /* data register holds an error code */ 459 /* data register holds an error code */
466 data = read_smic_data(smic); 460 data = read_smic_data(smic);
467 if (data != 0) { 461 if (data != 0) {
468 if (smic_debug & SMIC_DEBUG_ENABLE) { 462 if (smic_debug & SMIC_DEBUG_ENABLE)
469 printk(KERN_INFO 463 printk(KERN_DEBUG
470 "SMIC_WRITE_END: data = %02x\n", data); 464 "SMIC_WRITE_END: data = %02x\n", data);
471 }
472 start_error_recovery(smic, 465 start_error_recovery(smic,
473 "state = SMIC_WRITE_END, " 466 "state = SMIC_WRITE_END, "
474 "data != SUCCESS"); 467 "data != SUCCESS");
475 return SI_SM_CALL_WITH_DELAY; 468 return SI_SM_CALL_WITH_DELAY;
476 } else { 469 } else
477 smic->state = SMIC_WRITE2READ; 470 smic->state = SMIC_WRITE2READ;
478 }
479 break; 471 break;
480 472
481 case SMIC_WRITE2READ: 473 case SMIC_WRITE2READ:
482 /* we must wait for RX_DATA_READY to be set before we 474 /*
483 can continue */ 475 * we must wait for RX_DATA_READY to be set before we
476 * can continue
477 */
484 if (flags & SMIC_RX_DATA_READY) { 478 if (flags & SMIC_RX_DATA_READY) {
485 write_smic_control(smic, SMIC_CC_SMS_RD_START); 479 write_smic_control(smic, SMIC_CC_SMS_RD_START);
486 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 480 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
487 smic->state = SMIC_READ_START; 481 smic->state = SMIC_READ_START;
488 } else { 482 } else
489 return SI_SM_CALL_WITH_DELAY; 483 return SI_SM_CALL_WITH_DELAY;
490 }
491 break; 484 break;
492 485
493 case SMIC_READ_START: 486 case SMIC_READ_START:
@@ -502,15 +495,16 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
502 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); 495 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
503 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 496 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
504 smic->state = SMIC_READ_NEXT; 497 smic->state = SMIC_READ_NEXT;
505 } else { 498 } else
506 return SI_SM_CALL_WITH_DELAY; 499 return SI_SM_CALL_WITH_DELAY;
507 }
508 break; 500 break;
509 501
510 case SMIC_READ_NEXT: 502 case SMIC_READ_NEXT:
511 switch (status) { 503 switch (status) {
512 /* smic tells us that this is the last byte to be read 504 /*
513 --> clean up */ 505 * smic tells us that this is the last byte to be read
506 * --> clean up
507 */
514 case SMIC_SC_SMS_RD_END: 508 case SMIC_SC_SMS_RD_END:
515 read_next_byte(smic); 509 read_next_byte(smic);
516 write_smic_control(smic, SMIC_CC_SMS_RD_END); 510 write_smic_control(smic, SMIC_CC_SMS_RD_END);
@@ -523,9 +517,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
523 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); 517 write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
524 write_smic_flags(smic, flags | SMIC_FLAG_BSY); 518 write_smic_flags(smic, flags | SMIC_FLAG_BSY);
525 smic->state = SMIC_READ_NEXT; 519 smic->state = SMIC_READ_NEXT;
526 } else { 520 } else
527 return SI_SM_CALL_WITH_DELAY; 521 return SI_SM_CALL_WITH_DELAY;
528 }
529 break; 522 break;
530 default: 523 default:
531 start_error_recovery( 524 start_error_recovery(
@@ -546,10 +539,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
546 data = read_smic_data(smic); 539 data = read_smic_data(smic);
547 /* data register holds an error code */ 540 /* data register holds an error code */
548 if (data != 0) { 541 if (data != 0) {
549 if (smic_debug & SMIC_DEBUG_ENABLE) { 542 if (smic_debug & SMIC_DEBUG_ENABLE)
550 printk(KERN_INFO 543 printk(KERN_DEBUG
551 "SMIC_READ_END: data = %02x\n", data); 544 "SMIC_READ_END: data = %02x\n", data);
552 }
553 start_error_recovery(smic, 545 start_error_recovery(smic,
554 "state = SMIC_READ_END, " 546 "state = SMIC_READ_END, "
555 "data != SUCCESS"); 547 "data != SUCCESS");
@@ -565,7 +557,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
565 557
566 default: 558 default:
567 if (smic_debug & SMIC_DEBUG_ENABLE) { 559 if (smic_debug & SMIC_DEBUG_ENABLE) {
568 printk(KERN_WARNING "smic->state = %d\n", smic->state); 560 printk(KERN_DEBUG "smic->state = %d\n", smic->state);
569 start_error_recovery(smic, "state = UNKNOWN"); 561 start_error_recovery(smic, "state = UNKNOWN");
570 return SI_SM_CALL_WITH_DELAY; 562 return SI_SM_CALL_WITH_DELAY;
571 } 563 }
@@ -576,10 +568,12 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
576 568
577static int smic_detect(struct si_sm_data *smic) 569static int smic_detect(struct si_sm_data *smic)
578{ 570{
579 /* It's impossible for the SMIC fnags register to be all 1's, 571 /*
580 (assuming a properly functioning, self-initialized BMC) 572 * It's impossible for the SMIC fnags register to be all 1's,
581 but that's what you get from reading a bogus address, so we 573 * (assuming a properly functioning, self-initialized BMC)
582 test that first. */ 574 * but that's what you get from reading a bogus address, so we
575 * test that first.
576 */
583 if (read_smic_flags(smic) == 0xff) 577 if (read_smic_flags(smic) == 0xff)
584 return 1; 578 return 1;
585 579
@@ -595,8 +589,7 @@ static int smic_size(void)
595 return sizeof(struct si_sm_data); 589 return sizeof(struct si_sm_data);
596} 590}
597 591
598struct si_sm_handlers smic_smi_handlers = 592struct si_sm_handlers smic_smi_handlers = {
599{
600 .init_data = init_smic_data, 593 .init_data = init_smic_data,
601 .start_transaction = start_smic_transaction, 594 .start_transaction = start_smic_transaction,
602 .get_result = smic_get_result, 595 .get_result = smic_get_result,