aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c38
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c48
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c952
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c183
-rw-r--r--drivers/char/ipmi/ipmi_si_sm.h1
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c15
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c259
8 files changed, 918 insertions, 582 deletions
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 33862670e285..58dcdee1cd71 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -28,6 +28,8 @@
28 28
29#include <linux/kernel.h> /* For printk. */ 29#include <linux/kernel.h> /* For printk. */
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
31#include <linux/ipmi_msgdefs.h> /* for completion codes */ 33#include <linux/ipmi_msgdefs.h> /* for completion codes */
32#include "ipmi_si_sm.h" 34#include "ipmi_si_sm.h"
33 35
@@ -36,6 +38,8 @@ static int bt_debug = 0x00; /* Production value 0, see following flags */
36#define BT_DEBUG_ENABLE 1 38#define BT_DEBUG_ENABLE 1
37#define BT_DEBUG_MSG 2 39#define BT_DEBUG_MSG 2
38#define BT_DEBUG_STATES 4 40#define BT_DEBUG_STATES 4
41module_param(bt_debug, int, 0644);
42MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
39 43
40/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, 44/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
41 and 64 byte buffers. However, one HP implementation wants 255 bytes of 45 and 64 byte buffers. However, one HP implementation wants 255 bytes of
@@ -43,7 +47,7 @@ static int bt_debug = 0x00; /* Production value 0, see following flags */
43 Since the Open IPMI architecture is single-message oriented at this 47 Since the Open IPMI architecture is single-message oriented at this
44 stage, the queue depth of BT is of no concern. */ 48 stage, the queue depth of BT is of no concern. */
45 49
46#define BT_NORMAL_TIMEOUT 2000000 /* seconds in microseconds */ 50#define BT_NORMAL_TIMEOUT 5000000 /* seconds in microseconds */
47#define BT_RETRY_LIMIT 2 51#define BT_RETRY_LIMIT 2
48#define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */ 52#define BT_RESET_DELAY 6000000 /* 6 seconds after warm reset */
49 53
@@ -202,7 +206,7 @@ static int bt_get_result(struct si_sm_data *bt,
202 msg_len = bt->read_count - 2; /* account for length & seq */ 206 msg_len = bt->read_count - 2; /* account for length & seq */
203 /* Always NetFn, Cmd, cCode */ 207 /* Always NetFn, Cmd, cCode */
204 if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { 208 if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) {
205 printk(KERN_WARNING "BT results: bad msg_len = %d\n", msg_len); 209 printk(KERN_DEBUG "BT results: bad msg_len = %d\n", msg_len);
206 data[0] = bt->write_data[1] | 0x4; /* Kludge a response */ 210 data[0] = bt->write_data[1] | 0x4; /* Kludge a response */
207 data[1] = bt->write_data[3]; 211 data[1] = bt->write_data[3];
208 data[2] = IPMI_ERR_UNSPECIFIED; 212 data[2] = IPMI_ERR_UNSPECIFIED;
@@ -240,7 +244,7 @@ static void reset_flags(struct si_sm_data *bt)
240 BT_CONTROL(BT_B_BUSY); 244 BT_CONTROL(BT_B_BUSY);
241 BT_CONTROL(BT_CLR_WR_PTR); 245 BT_CONTROL(BT_CLR_WR_PTR);
242 BT_CONTROL(BT_SMS_ATN); 246 BT_CONTROL(BT_SMS_ATN);
243#ifdef DEVELOPMENT_ONLY_NOT_FOR_PRODUCTION 247
244 if (BT_STATUS & BT_B2H_ATN) { 248 if (BT_STATUS & BT_B2H_ATN) {
245 int i; 249 int i;
246 BT_CONTROL(BT_H_BUSY); 250 BT_CONTROL(BT_H_BUSY);
@@ -250,7 +254,6 @@ static void reset_flags(struct si_sm_data *bt)
250 BMC2HOST; 254 BMC2HOST;
251 BT_CONTROL(BT_H_BUSY); 255 BT_CONTROL(BT_H_BUSY);
252 } 256 }
253#endif
254} 257}
255 258
256static inline void write_all_bytes(struct si_sm_data *bt) 259static inline void write_all_bytes(struct si_sm_data *bt)
@@ -295,7 +298,7 @@ static inline int read_all_bytes(struct si_sm_data *bt)
295 printk ("\n"); 298 printk ("\n");
296 } 299 }
297 if (bt->seq != bt->write_data[2]) /* idiot check */ 300 if (bt->seq != bt->write_data[2]) /* idiot check */
298 printk(KERN_WARNING "BT: internal error: sequence mismatch\n"); 301 printk(KERN_DEBUG "BT: internal error: sequence mismatch\n");
299 302
300 /* per the spec, the (NetFn, Seq, Cmd) tuples should match */ 303 /* per the spec, the (NetFn, Seq, Cmd) tuples should match */
301 if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */ 304 if ((bt->read_data[3] == bt->write_data[3]) && /* Cmd */
@@ -321,18 +324,17 @@ static void error_recovery(struct si_sm_data *bt, char *reason)
321 bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */ 324 bt->timeout = BT_NORMAL_TIMEOUT; /* various places want to retry */
322 325
323 status = BT_STATUS; 326 status = BT_STATUS;
324 printk(KERN_WARNING "BT: %s in %s %s ", reason, STATE2TXT, 327 printk(KERN_DEBUG "BT: %s in %s %s\n", reason, STATE2TXT,
325 STATUS2TXT(buf)); 328 STATUS2TXT(buf));
326 329
327 (bt->error_retries)++; 330 (bt->error_retries)++;
328 if (bt->error_retries > BT_RETRY_LIMIT) { 331 if (bt->error_retries > BT_RETRY_LIMIT) {
329 printk("retry limit (%d) exceeded\n", BT_RETRY_LIMIT); 332 printk(KERN_DEBUG "retry limit (%d) exceeded\n", BT_RETRY_LIMIT);
330 bt->state = BT_STATE_HOSED; 333 bt->state = BT_STATE_HOSED;
331 if (!bt->nonzero_status) 334 if (!bt->nonzero_status)
332 printk(KERN_ERR "IPMI: BT stuck, try power cycle\n"); 335 printk(KERN_ERR "IPMI: BT stuck, try power cycle\n");
333 else if (bt->seq == FIRST_SEQ + BT_RETRY_LIMIT) { 336 else if (bt->error_retries <= BT_RETRY_LIMIT + 1) {
334 /* most likely during insmod */ 337 printk(KERN_DEBUG "IPMI: BT reset (takes 5 secs)\n");
335 printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n");
336 bt->state = BT_STATE_RESET1; 338 bt->state = BT_STATE_RESET1;
337 } 339 }
338 return; 340 return;
@@ -340,11 +342,11 @@ static void error_recovery(struct si_sm_data *bt, char *reason)
340 342
341 /* Sometimes the BMC queues get in an "off-by-one" state...*/ 343 /* Sometimes the BMC queues get in an "off-by-one" state...*/
342 if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) { 344 if ((bt->state == BT_STATE_B2H_WAIT) && (status & BT_B2H_ATN)) {
343 printk("retry B2H_WAIT\n"); 345 printk(KERN_DEBUG "retry B2H_WAIT\n");
344 return; 346 return;
345 } 347 }
346 348
347 printk("restart command\n"); 349 printk(KERN_DEBUG "restart command\n");
348 bt->state = BT_STATE_RESTART; 350 bt->state = BT_STATE_RESTART;
349} 351}
350 352
@@ -372,17 +374,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
372 return SI_SM_HOSED; 374 return SI_SM_HOSED;
373 375
374 if (bt->state != BT_STATE_IDLE) { /* do timeout test */ 376 if (bt->state != BT_STATE_IDLE) { /* do timeout test */
375
376 /* Certain states, on error conditions, can lock up a CPU
377 because they are effectively in an infinite loop with
378 CALL_WITHOUT_DELAY (right back here with time == 0).
379 Prevent infinite lockup by ALWAYS decrementing timeout. */
380
381 /* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT
382 (noticed in ipmi_smic_sm.c January 2004) */
383
384 if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT))
385 time = 100;
386 bt->timeout -= time; 377 bt->timeout -= time;
387 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) { 378 if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
388 error_recovery(bt, "timed out"); 379 error_recovery(bt, "timed out");
@@ -483,6 +474,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
483 break; 474 break;
484 475
485 case BT_STATE_RESTART: /* don't reset retries! */ 476 case BT_STATE_RESTART: /* don't reset retries! */
477 reset_flags(bt);
486 bt->write_data[2] = ++bt->seq; 478 bt->write_data[2] = ++bt->seq;
487 bt->read_count = 0; 479 bt->read_count = 0;
488 bt->nonzero_status = 0; 480 bt->nonzero_status = 0;
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index d21853a594a3..da1554194d3d 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -38,16 +38,25 @@
38 */ 38 */
39 39
40#include <linux/kernel.h> /* For printk. */ 40#include <linux/kernel.h> /* For printk. */
41#include <linux/module.h>
42#include <linux/moduleparam.h>
41#include <linux/string.h> 43#include <linux/string.h>
44#include <linux/jiffies.h>
42#include <linux/ipmi_msgdefs.h> /* for completion codes */ 45#include <linux/ipmi_msgdefs.h> /* for completion codes */
43#include "ipmi_si_sm.h" 46#include "ipmi_si_sm.h"
44 47
45/* Set this if you want a printout of why the state machine was hosed 48/* kcs_debug is a bit-field
46 when it gets hosed. */ 49 * KCS_DEBUG_ENABLE - turned on for now
47#define DEBUG_HOSED_REASON 50 * KCS_DEBUG_MSG - commands and their responses
51 * KCS_DEBUG_STATES - state machine
52 */
53#define KCS_DEBUG_STATES 4
54#define KCS_DEBUG_MSG 2
55#define KCS_DEBUG_ENABLE 1
48 56
49/* Print the state machine state on entry every time. */ 57static int kcs_debug;
50#undef DEBUG_STATE 58module_param(kcs_debug, int, 0644);
59MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
51 60
52/* The states the KCS driver may be in. */ 61/* The states the KCS driver may be in. */
53enum kcs_states { 62enum kcs_states {
@@ -91,6 +100,7 @@ enum kcs_states {
91#define IBF_RETRY_TIMEOUT 1000000 100#define IBF_RETRY_TIMEOUT 1000000
92#define OBF_RETRY_TIMEOUT 1000000 101#define OBF_RETRY_TIMEOUT 1000000
93#define MAX_ERROR_RETRIES 10 102#define MAX_ERROR_RETRIES 10
103#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
94 104
95struct si_sm_data 105struct si_sm_data
96{ 106{
@@ -107,6 +117,7 @@ struct si_sm_data
107 unsigned int error_retries; 117 unsigned int error_retries;
108 long ibf_timeout; 118 long ibf_timeout;
109 long obf_timeout; 119 long obf_timeout;
120 unsigned long error0_timeout;
110}; 121};
111 122
112static unsigned int init_kcs_data(struct si_sm_data *kcs, 123static unsigned int init_kcs_data(struct si_sm_data *kcs,
@@ -175,11 +186,11 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
175{ 186{
176 (kcs->error_retries)++; 187 (kcs->error_retries)++;
177 if (kcs->error_retries > MAX_ERROR_RETRIES) { 188 if (kcs->error_retries > MAX_ERROR_RETRIES) {
178#ifdef DEBUG_HOSED_REASON 189 if (kcs_debug & KCS_DEBUG_ENABLE)
179 printk("ipmi_kcs_sm: kcs hosed: %s\n", reason); 190 printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason);
180#endif
181 kcs->state = KCS_HOSED; 191 kcs->state = KCS_HOSED;
182 } else { 192 } else {
193 kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
183 kcs->state = KCS_ERROR0; 194 kcs->state = KCS_ERROR0;
184 } 195 }
185} 196}
@@ -248,14 +259,21 @@ static void restart_kcs_transaction(struct si_sm_data *kcs)
248static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, 259static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
249 unsigned int size) 260 unsigned int size)
250{ 261{
262 unsigned int i;
263
251 if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) { 264 if ((size < 2) || (size > MAX_KCS_WRITE_SIZE)) {
252 return -1; 265 return -1;
253 } 266 }
254
255 if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { 267 if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
256 return -2; 268 return -2;
257 } 269 }
258 270 if (kcs_debug & KCS_DEBUG_MSG) {
271 printk(KERN_DEBUG "start_kcs_transaction -");
272 for (i = 0; i < size; i ++) {
273 printk(" %02x", (unsigned char) (data [i]));
274 }
275 printk ("\n");
276 }
259 kcs->error_retries = 0; 277 kcs->error_retries = 0;
260 memcpy(kcs->write_data, data, size); 278 memcpy(kcs->write_data, data, size);
261 kcs->write_count = size; 279 kcs->write_count = size;
@@ -305,9 +323,9 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
305 323
306 status = read_status(kcs); 324 status = read_status(kcs);
307 325
308#ifdef DEBUG_STATE 326 if (kcs_debug & KCS_DEBUG_STATES)
309 printk(" State = %d, %x\n", kcs->state, status); 327 printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status);
310#endif 328
311 /* All states wait for ibf, so just do it here. */ 329 /* All states wait for ibf, so just do it here. */
312 if (!check_ibf(kcs, status, time)) 330 if (!check_ibf(kcs, status, time))
313 return SI_SM_CALL_WITH_DELAY; 331 return SI_SM_CALL_WITH_DELAY;
@@ -409,6 +427,10 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
409 427
410 case KCS_ERROR0: 428 case KCS_ERROR0:
411 clear_obf(kcs, status); 429 clear_obf(kcs, status);
430 status = read_status(kcs);
431 if (GET_STATUS_OBF(status)) /* controller isn't responding */
432 if (time_before(jiffies, kcs->error0_timeout))
433 return SI_SM_CALL_WITH_TICK_DELAY;
412 write_cmd(kcs, KCS_GET_STATUS_ABORT); 434 write_cmd(kcs, KCS_GET_STATUS_ABORT);
413 kcs->state = KCS_ERROR1; 435 kcs->state = KCS_ERROR1;
414 break; 436 break;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 32fa82c78c73..c1d06ba449b6 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,13 +38,13 @@
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/poll.h> 39#include <linux/poll.h>
40#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/rwsem.h>
42#include <linux/slab.h> 41#include <linux/slab.h>
43#include <linux/ipmi.h> 42#include <linux/ipmi.h>
44#include <linux/ipmi_smi.h> 43#include <linux/ipmi_smi.h>
45#include <linux/notifier.h> 44#include <linux/notifier.h>
46#include <linux/init.h> 45#include <linux/init.h>
47#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/rcupdate.h>
48 48
49#define PFX "IPMI message handler: " 49#define PFX "IPMI message handler: "
50 50
@@ -65,10 +65,19 @@ struct proc_dir_entry *proc_ipmi_root = NULL;
65 the max message timer. This is in milliseconds. */ 65 the max message timer. This is in milliseconds. */
66#define MAX_MSG_TIMEOUT 60000 66#define MAX_MSG_TIMEOUT 60000
67 67
68
69/*
70 * The main "user" data structure.
71 */
68struct ipmi_user 72struct ipmi_user
69{ 73{
70 struct list_head link; 74 struct list_head link;
71 75
76 /* Set to "0" when the user is destroyed. */
77 int valid;
78
79 struct kref refcount;
80
72 /* The upper layer that handles receive messages. */ 81 /* The upper layer that handles receive messages. */
73 struct ipmi_user_hndl *handler; 82 struct ipmi_user_hndl *handler;
74 void *handler_data; 83 void *handler_data;
@@ -87,6 +96,15 @@ struct cmd_rcvr
87 ipmi_user_t user; 96 ipmi_user_t user;
88 unsigned char netfn; 97 unsigned char netfn;
89 unsigned char cmd; 98 unsigned char cmd;
99
100 /*
101 * This is used to form a linked lised during mass deletion.
102 * Since this is in an RCU list, we cannot use the link above
103 * or change any data until the RCU period completes. So we
104 * use this next variable during mass deletion so we can have
105 * a list and don't have to wait and restart the search on
106 * every individual deletion of a command. */
107 struct cmd_rcvr *next;
90}; 108};
91 109
92struct seq_table 110struct seq_table
@@ -150,13 +168,11 @@ struct ipmi_smi
150 /* What interface number are we? */ 168 /* What interface number are we? */
151 int intf_num; 169 int intf_num;
152 170
153 /* The list of upper layers that are using me. We read-lock 171 struct kref refcount;
154 this when delivering messages to the upper layer to keep 172
155 the user from going away while we are processing the 173 /* The list of upper layers that are using me. seq_lock
156 message. This means that you cannot add or delete a user 174 * protects this. */
157 from the receive callback. */ 175 struct list_head users;
158 rwlock_t users_lock;
159 struct list_head users;
160 176
161 /* Used for wake ups at startup. */ 177 /* Used for wake ups at startup. */
162 wait_queue_head_t waitq; 178 wait_queue_head_t waitq;
@@ -193,7 +209,7 @@ struct ipmi_smi
193 209
194 /* The list of command receivers that are registered for commands 210 /* The list of command receivers that are registered for commands
195 on this interface. */ 211 on this interface. */
196 rwlock_t cmd_rcvr_lock; 212 struct semaphore cmd_rcvrs_lock;
197 struct list_head cmd_rcvrs; 213 struct list_head cmd_rcvrs;
198 214
199 /* Events that were queues because no one was there to receive 215 /* Events that were queues because no one was there to receive
@@ -296,16 +312,17 @@ struct ipmi_smi
296 unsigned int events; 312 unsigned int events;
297}; 313};
298 314
315/* Used to mark an interface entry that cannot be used but is not a
316 * free entry, either, primarily used at creation and deletion time so
317 * a slot doesn't get reused too quickly. */
318#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
319#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
320 || (i == IPMI_INVALID_INTERFACE_ENTRY))
321
299#define MAX_IPMI_INTERFACES 4 322#define MAX_IPMI_INTERFACES 4
300static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; 323static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
301 324
302/* Used to keep interfaces from going away while operations are 325/* Directly protects the ipmi_interfaces data structure. */
303 operating on interfaces. Grab read if you are not modifying the
304 interfaces, write if you are. */
305static DECLARE_RWSEM(interfaces_sem);
306
307/* Directly protects the ipmi_interfaces data structure. This is
308 claimed in the timer interrupt. */
309static DEFINE_SPINLOCK(interfaces_lock); 326static DEFINE_SPINLOCK(interfaces_lock);
310 327
311/* List of watchers that want to know when smi's are added and 328/* List of watchers that want to know when smi's are added and
@@ -313,20 +330,72 @@ static DEFINE_SPINLOCK(interfaces_lock);
313static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); 330static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
314static DECLARE_RWSEM(smi_watchers_sem); 331static DECLARE_RWSEM(smi_watchers_sem);
315 332
333
334static void free_recv_msg_list(struct list_head *q)
335{
336 struct ipmi_recv_msg *msg, *msg2;
337
338 list_for_each_entry_safe(msg, msg2, q, link) {
339 list_del(&msg->link);
340 ipmi_free_recv_msg(msg);
341 }
342}
343
344static void clean_up_interface_data(ipmi_smi_t intf)
345{
346 int i;
347 struct cmd_rcvr *rcvr, *rcvr2;
348 struct list_head list;
349
350 free_recv_msg_list(&intf->waiting_msgs);
351 free_recv_msg_list(&intf->waiting_events);
352
353 /* Wholesale remove all the entries from the list in the
354 * interface and wait for RCU to know that none are in use. */
355 down(&intf->cmd_rcvrs_lock);
356 list_add_rcu(&list, &intf->cmd_rcvrs);
357 list_del_rcu(&intf->cmd_rcvrs);
358 up(&intf->cmd_rcvrs_lock);
359 synchronize_rcu();
360
361 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
362 kfree(rcvr);
363
364 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
365 if ((intf->seq_table[i].inuse)
366 && (intf->seq_table[i].recv_msg))
367 {
368 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
369 }
370 }
371}
372
373static void intf_free(struct kref *ref)
374{
375 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
376
377 clean_up_interface_data(intf);
378 kfree(intf);
379}
380
316int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 381int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
317{ 382{
318 int i; 383 int i;
384 unsigned long flags;
319 385
320 down_read(&interfaces_sem);
321 down_write(&smi_watchers_sem); 386 down_write(&smi_watchers_sem);
322 list_add(&(watcher->link), &smi_watchers); 387 list_add(&(watcher->link), &smi_watchers);
388 up_write(&smi_watchers_sem);
389 spin_lock_irqsave(&interfaces_lock, flags);
323 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 390 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
324 if (ipmi_interfaces[i] != NULL) { 391 ipmi_smi_t intf = ipmi_interfaces[i];
325 watcher->new_smi(i); 392 if (IPMI_INVALID_INTERFACE(intf))
326 } 393 continue;
394 spin_unlock_irqrestore(&interfaces_lock, flags);
395 watcher->new_smi(i);
396 spin_lock_irqsave(&interfaces_lock, flags);
327 } 397 }
328 up_write(&smi_watchers_sem); 398 spin_unlock_irqrestore(&interfaces_lock, flags);
329 up_read(&interfaces_sem);
330 return 0; 399 return 0;
331} 400}
332 401
@@ -471,8 +540,8 @@ static void deliver_response(struct ipmi_recv_msg *msg)
471 } 540 }
472 ipmi_free_recv_msg(msg); 541 ipmi_free_recv_msg(msg);
473 } else { 542 } else {
474 msg->user->handler->ipmi_recv_hndl(msg, 543 ipmi_user_t user = msg->user;
475 msg->user->handler_data); 544 user->handler->ipmi_recv_hndl(msg, user->handler_data);
476 } 545 }
477} 546}
478 547
@@ -662,15 +731,18 @@ int ipmi_create_user(unsigned int if_num,
662 if (! new_user) 731 if (! new_user)
663 return -ENOMEM; 732 return -ENOMEM;
664 733
665 down_read(&interfaces_sem); 734 spin_lock_irqsave(&interfaces_lock, flags);
666 if ((if_num >= MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL) 735 intf = ipmi_interfaces[if_num];
667 { 736 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
668 rv = -EINVAL; 737 spin_unlock_irqrestore(&interfaces_lock, flags);
669 goto out_unlock; 738 return -EINVAL;
670 } 739 }
671 740
672 intf = ipmi_interfaces[if_num]; 741 /* Note that each existing user holds a refcount to the interface. */
742 kref_get(&intf->refcount);
743 spin_unlock_irqrestore(&interfaces_lock, flags);
673 744
745 kref_init(&new_user->refcount);
674 new_user->handler = handler; 746 new_user->handler = handler;
675 new_user->handler_data = handler_data; 747 new_user->handler_data = handler_data;
676 new_user->intf = intf; 748 new_user->intf = intf;
@@ -678,98 +750,92 @@ int ipmi_create_user(unsigned int if_num,
678 750
679 if (!try_module_get(intf->handlers->owner)) { 751 if (!try_module_get(intf->handlers->owner)) {
680 rv = -ENODEV; 752 rv = -ENODEV;
681 goto out_unlock; 753 goto out_err;
682 } 754 }
683 755
684 if (intf->handlers->inc_usecount) { 756 if (intf->handlers->inc_usecount) {
685 rv = intf->handlers->inc_usecount(intf->send_info); 757 rv = intf->handlers->inc_usecount(intf->send_info);
686 if (rv) { 758 if (rv) {
687 module_put(intf->handlers->owner); 759 module_put(intf->handlers->owner);
688 goto out_unlock; 760 goto out_err;
689 } 761 }
690 } 762 }
691 763
692 write_lock_irqsave(&intf->users_lock, flags); 764 new_user->valid = 1;
693 list_add_tail(&new_user->link, &intf->users); 765 spin_lock_irqsave(&intf->seq_lock, flags);
694 write_unlock_irqrestore(&intf->users_lock, flags); 766 list_add_rcu(&new_user->link, &intf->users);
695 767 spin_unlock_irqrestore(&intf->seq_lock, flags);
696 out_unlock: 768 *user = new_user;
697 if (rv) { 769 return 0;
698 kfree(new_user);
699 } else {
700 *user = new_user;
701 }
702 770
703 up_read(&interfaces_sem); 771 out_err:
772 kfree(new_user);
773 kref_put(&intf->refcount, intf_free);
704 return rv; 774 return rv;
705} 775}
706 776
707static int ipmi_destroy_user_nolock(ipmi_user_t user) 777static void free_user(struct kref *ref)
778{
779 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
780 kfree(user);
781}
782
783int ipmi_destroy_user(ipmi_user_t user)
708{ 784{
709 int rv = -ENODEV; 785 int rv = -ENODEV;
710 ipmi_user_t t_user; 786 ipmi_smi_t intf = user->intf;
711 struct cmd_rcvr *rcvr, *rcvr2;
712 int i; 787 int i;
713 unsigned long flags; 788 unsigned long flags;
789 struct cmd_rcvr *rcvr;
790 struct list_head *entry1, *entry2;
791 struct cmd_rcvr *rcvrs = NULL;
714 792
715 /* Find the user and delete them from the list. */ 793 user->valid = 1;
716 list_for_each_entry(t_user, &(user->intf->users), link) {
717 if (t_user == user) {
718 list_del(&t_user->link);
719 rv = 0;
720 break;
721 }
722 }
723 794
724 if (rv) { 795 /* Remove the user from the interface's sequence table. */
725 goto out_unlock; 796 spin_lock_irqsave(&intf->seq_lock, flags);
726 } 797 list_del_rcu(&user->link);
727 798
728 /* Remove the user from the interfaces sequence table. */
729 spin_lock_irqsave(&(user->intf->seq_lock), flags);
730 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 799 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
731 if (user->intf->seq_table[i].inuse 800 if (intf->seq_table[i].inuse
732 && (user->intf->seq_table[i].recv_msg->user == user)) 801 && (intf->seq_table[i].recv_msg->user == user))
733 { 802 {
734 user->intf->seq_table[i].inuse = 0; 803 intf->seq_table[i].inuse = 0;
735 } 804 }
736 } 805 }
737 spin_unlock_irqrestore(&(user->intf->seq_lock), flags); 806 spin_unlock_irqrestore(&intf->seq_lock, flags);
738 807
739 /* Remove the user from the command receiver's table. */ 808 /*
740 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); 809 * Remove the user from the command receiver's table. First
741 list_for_each_entry_safe(rcvr, rcvr2, &(user->intf->cmd_rcvrs), link) { 810 * we build a list of everything (not using the standard link,
811 * since other things may be using it till we do
812 * synchronize_rcu()) then free everything in that list.
813 */
814 down(&intf->cmd_rcvrs_lock);
815 list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) {
816 rcvr = list_entry(entry1, struct cmd_rcvr, link);
742 if (rcvr->user == user) { 817 if (rcvr->user == user) {
743 list_del(&rcvr->link); 818 list_del_rcu(&rcvr->link);
744 kfree(rcvr); 819 rcvr->next = rcvrs;
820 rcvrs = rcvr;
745 } 821 }
746 } 822 }
747 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); 823 up(&intf->cmd_rcvrs_lock);
824 synchronize_rcu();
825 while (rcvrs) {
826 rcvr = rcvrs;
827 rcvrs = rcvr->next;
828 kfree(rcvr);
829 }
748 830
749 kfree(user); 831 module_put(intf->handlers->owner);
832 if (intf->handlers->dec_usecount)
833 intf->handlers->dec_usecount(intf->send_info);
750 834
751 out_unlock: 835 kref_put(&intf->refcount, intf_free);
752 836
753 return rv; 837 kref_put(&user->refcount, free_user);
754}
755
756int ipmi_destroy_user(ipmi_user_t user)
757{
758 int rv;
759 ipmi_smi_t intf = user->intf;
760 unsigned long flags;
761 838
762 down_read(&interfaces_sem);
763 write_lock_irqsave(&intf->users_lock, flags);
764 rv = ipmi_destroy_user_nolock(user);
765 if (!rv) {
766 module_put(intf->handlers->owner);
767 if (intf->handlers->dec_usecount)
768 intf->handlers->dec_usecount(intf->send_info);
769 }
770
771 write_unlock_irqrestore(&intf->users_lock, flags);
772 up_read(&interfaces_sem);
773 return rv; 839 return rv;
774} 840}
775 841
@@ -823,62 +889,78 @@ int ipmi_get_my_LUN(ipmi_user_t user,
823 889
824int ipmi_set_gets_events(ipmi_user_t user, int val) 890int ipmi_set_gets_events(ipmi_user_t user, int val)
825{ 891{
826 unsigned long flags; 892 unsigned long flags;
827 struct ipmi_recv_msg *msg, *msg2; 893 ipmi_smi_t intf = user->intf;
894 struct ipmi_recv_msg *msg, *msg2;
895 struct list_head msgs;
828 896
829 read_lock(&(user->intf->users_lock)); 897 INIT_LIST_HEAD(&msgs);
830 spin_lock_irqsave(&(user->intf->events_lock), flags); 898
899 spin_lock_irqsave(&intf->events_lock, flags);
831 user->gets_events = val; 900 user->gets_events = val;
832 901
833 if (val) { 902 if (val) {
834 /* Deliver any queued events. */ 903 /* Deliver any queued events. */
835 list_for_each_entry_safe(msg, msg2, &(user->intf->waiting_events), link) { 904 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
836 list_del(&msg->link); 905 list_del(&msg->link);
837 msg->user = user; 906 list_add_tail(&msg->link, &msgs);
838 deliver_response(msg);
839 } 907 }
840 } 908 }
841 909
842 spin_unlock_irqrestore(&(user->intf->events_lock), flags); 910 /* Hold the events lock while doing this to preserve order. */
843 read_unlock(&(user->intf->users_lock)); 911 list_for_each_entry_safe(msg, msg2, &msgs, link) {
912 msg->user = user;
913 kref_get(&user->refcount);
914 deliver_response(msg);
915 }
916
917 spin_unlock_irqrestore(&intf->events_lock, flags);
844 918
845 return 0; 919 return 0;
846} 920}
847 921
922static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
923 unsigned char netfn,
924 unsigned char cmd)
925{
926 struct cmd_rcvr *rcvr;
927
928 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
929 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
930 return rcvr;
931 }
932 return NULL;
933}
934
848int ipmi_register_for_cmd(ipmi_user_t user, 935int ipmi_register_for_cmd(ipmi_user_t user,
849 unsigned char netfn, 936 unsigned char netfn,
850 unsigned char cmd) 937 unsigned char cmd)
851{ 938{
852 struct cmd_rcvr *cmp; 939 ipmi_smi_t intf = user->intf;
853 unsigned long flags; 940 struct cmd_rcvr *rcvr;
854 struct cmd_rcvr *rcvr; 941 struct cmd_rcvr *entry;
855 int rv = 0; 942 int rv = 0;
856 943
857 944
858 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 945 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
859 if (! rcvr) 946 if (! rcvr)
860 return -ENOMEM; 947 return -ENOMEM;
948 rcvr->cmd = cmd;
949 rcvr->netfn = netfn;
950 rcvr->user = user;
861 951
862 read_lock(&(user->intf->users_lock)); 952 down(&intf->cmd_rcvrs_lock);
863 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
864 /* Make sure the command/netfn is not already registered. */ 953 /* Make sure the command/netfn is not already registered. */
865 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) { 954 entry = find_cmd_rcvr(intf, netfn, cmd);
866 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) { 955 if (entry) {
867 rv = -EBUSY; 956 rv = -EBUSY;
868 break; 957 goto out_unlock;
869 }
870 }
871
872 if (! rv) {
873 rcvr->cmd = cmd;
874 rcvr->netfn = netfn;
875 rcvr->user = user;
876 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
877 } 958 }
878 959
879 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); 960 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
880 read_unlock(&(user->intf->users_lock));
881 961
962 out_unlock:
963 up(&intf->cmd_rcvrs_lock);
882 if (rv) 964 if (rv)
883 kfree(rcvr); 965 kfree(rcvr);
884 966
@@ -889,31 +971,28 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
889 unsigned char netfn, 971 unsigned char netfn,
890 unsigned char cmd) 972 unsigned char cmd)
891{ 973{
892 unsigned long flags; 974 ipmi_smi_t intf = user->intf;
893 struct cmd_rcvr *rcvr; 975 struct cmd_rcvr *rcvr;
894 int rv = -ENOENT;
895 976
896 read_lock(&(user->intf->users_lock)); 977 down(&intf->cmd_rcvrs_lock);
897 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
898 /* Make sure the command/netfn is not already registered. */ 978 /* Make sure the command/netfn is not already registered. */
899 list_for_each_entry(rcvr, &(user->intf->cmd_rcvrs), link) { 979 rcvr = find_cmd_rcvr(intf, netfn, cmd);
900 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { 980 if ((rcvr) && (rcvr->user == user)) {
901 rv = 0; 981 list_del_rcu(&rcvr->link);
902 list_del(&rcvr->link); 982 up(&intf->cmd_rcvrs_lock);
903 kfree(rcvr); 983 synchronize_rcu();
904 break; 984 kfree(rcvr);
905 } 985 return 0;
986 } else {
987 up(&intf->cmd_rcvrs_lock);
988 return -ENOENT;
906 } 989 }
907 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
908 read_unlock(&(user->intf->users_lock));
909
910 return rv;
911} 990}
912 991
913void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) 992void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
914{ 993{
915 user->intf->handlers->set_run_to_completion(user->intf->send_info, 994 ipmi_smi_t intf = user->intf;
916 val); 995 intf->handlers->set_run_to_completion(intf->send_info, val);
917} 996}
918 997
919static unsigned char 998static unsigned char
@@ -1010,19 +1089,19 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1010 supplied in certain circumstances (mainly at panic time). If 1089 supplied in certain circumstances (mainly at panic time). If
1011 messages are supplied, they will be freed, even if an error 1090 messages are supplied, they will be freed, even if an error
1012 occurs. */ 1091 occurs. */
1013static inline int i_ipmi_request(ipmi_user_t user, 1092static int i_ipmi_request(ipmi_user_t user,
1014 ipmi_smi_t intf, 1093 ipmi_smi_t intf,
1015 struct ipmi_addr *addr, 1094 struct ipmi_addr *addr,
1016 long msgid, 1095 long msgid,
1017 struct kernel_ipmi_msg *msg, 1096 struct kernel_ipmi_msg *msg,
1018 void *user_msg_data, 1097 void *user_msg_data,
1019 void *supplied_smi, 1098 void *supplied_smi,
1020 struct ipmi_recv_msg *supplied_recv, 1099 struct ipmi_recv_msg *supplied_recv,
1021 int priority, 1100 int priority,
1022 unsigned char source_address, 1101 unsigned char source_address,
1023 unsigned char source_lun, 1102 unsigned char source_lun,
1024 int retries, 1103 int retries,
1025 unsigned int retry_time_ms) 1104 unsigned int retry_time_ms)
1026{ 1105{
1027 int rv = 0; 1106 int rv = 0;
1028 struct ipmi_smi_msg *smi_msg; 1107 struct ipmi_smi_msg *smi_msg;
@@ -1051,6 +1130,8 @@ static inline int i_ipmi_request(ipmi_user_t user,
1051 } 1130 }
1052 1131
1053 recv_msg->user = user; 1132 recv_msg->user = user;
1133 if (user)
1134 kref_get(&user->refcount);
1054 recv_msg->msgid = msgid; 1135 recv_msg->msgid = msgid;
1055 /* Store the message to send in the receive message so timeout 1136 /* Store the message to send in the receive message so timeout
1056 responses can get the proper response data. */ 1137 responses can get the proper response data. */
@@ -1725,11 +1806,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1725 unsigned char version_major, 1806 unsigned char version_major,
1726 unsigned char version_minor, 1807 unsigned char version_minor,
1727 unsigned char slave_addr, 1808 unsigned char slave_addr,
1728 ipmi_smi_t *intf) 1809 ipmi_smi_t *new_intf)
1729{ 1810{
1730 int i, j; 1811 int i, j;
1731 int rv; 1812 int rv;
1732 ipmi_smi_t new_intf; 1813 ipmi_smi_t intf;
1733 unsigned long flags; 1814 unsigned long flags;
1734 1815
1735 1816
@@ -1745,189 +1826,142 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1745 return -ENODEV; 1826 return -ENODEV;
1746 } 1827 }
1747 1828
1748 new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL); 1829 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
1749 if (!new_intf) 1830 if (!intf)
1750 return -ENOMEM; 1831 return -ENOMEM;
1751 memset(new_intf, 0, sizeof(*new_intf)); 1832 memset(intf, 0, sizeof(*intf));
1752 1833 intf->intf_num = -1;
1753 new_intf->proc_dir = NULL; 1834 kref_init(&intf->refcount);
1835 intf->version_major = version_major;
1836 intf->version_minor = version_minor;
1837 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1838 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
1839 intf->channels[j].lun = 2;
1840 }
1841 if (slave_addr != 0)
1842 intf->channels[0].address = slave_addr;
1843 INIT_LIST_HEAD(&intf->users);
1844 intf->handlers = handlers;
1845 intf->send_info = send_info;
1846 spin_lock_init(&intf->seq_lock);
1847 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1848 intf->seq_table[j].inuse = 0;
1849 intf->seq_table[j].seqid = 0;
1850 }
1851 intf->curr_seq = 0;
1852#ifdef CONFIG_PROC_FS
1853 spin_lock_init(&intf->proc_entry_lock);
1854#endif
1855 spin_lock_init(&intf->waiting_msgs_lock);
1856 INIT_LIST_HEAD(&intf->waiting_msgs);
1857 spin_lock_init(&intf->events_lock);
1858 INIT_LIST_HEAD(&intf->waiting_events);
1859 intf->waiting_events_count = 0;
1860 init_MUTEX(&intf->cmd_rcvrs_lock);
1861 INIT_LIST_HEAD(&intf->cmd_rcvrs);
1862 init_waitqueue_head(&intf->waitq);
1863
1864 spin_lock_init(&intf->counter_lock);
1865 intf->proc_dir = NULL;
1754 1866
1755 rv = -ENOMEM; 1867 rv = -ENOMEM;
1756 1868 spin_lock_irqsave(&interfaces_lock, flags);
1757 down_write(&interfaces_sem);
1758 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 1869 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1759 if (ipmi_interfaces[i] == NULL) { 1870 if (ipmi_interfaces[i] == NULL) {
1760 new_intf->intf_num = i; 1871 intf->intf_num = i;
1761 new_intf->version_major = version_major; 1872 /* Reserve the entry till we are done. */
1762 new_intf->version_minor = version_minor; 1873 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1763 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1764 new_intf->channels[j].address
1765 = IPMI_BMC_SLAVE_ADDR;
1766 new_intf->channels[j].lun = 2;
1767 }
1768 if (slave_addr != 0)
1769 new_intf->channels[0].address = slave_addr;
1770 rwlock_init(&(new_intf->users_lock));
1771 INIT_LIST_HEAD(&(new_intf->users));
1772 new_intf->handlers = handlers;
1773 new_intf->send_info = send_info;
1774 spin_lock_init(&(new_intf->seq_lock));
1775 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1776 new_intf->seq_table[j].inuse = 0;
1777 new_intf->seq_table[j].seqid = 0;
1778 }
1779 new_intf->curr_seq = 0;
1780#ifdef CONFIG_PROC_FS
1781 spin_lock_init(&(new_intf->proc_entry_lock));
1782#endif
1783 spin_lock_init(&(new_intf->waiting_msgs_lock));
1784 INIT_LIST_HEAD(&(new_intf->waiting_msgs));
1785 spin_lock_init(&(new_intf->events_lock));
1786 INIT_LIST_HEAD(&(new_intf->waiting_events));
1787 new_intf->waiting_events_count = 0;
1788 rwlock_init(&(new_intf->cmd_rcvr_lock));
1789 init_waitqueue_head(&new_intf->waitq);
1790 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
1791
1792 spin_lock_init(&(new_intf->counter_lock));
1793
1794 spin_lock_irqsave(&interfaces_lock, flags);
1795 ipmi_interfaces[i] = new_intf;
1796 spin_unlock_irqrestore(&interfaces_lock, flags);
1797
1798 rv = 0; 1874 rv = 0;
1799 *intf = new_intf;
1800 break; 1875 break;
1801 } 1876 }
1802 } 1877 }
1878 spin_unlock_irqrestore(&interfaces_lock, flags);
1879 if (rv)
1880 goto out;
1803 1881
1804 downgrade_write(&interfaces_sem); 1882 /* FIXME - this is an ugly kludge, this sets the intf for the
1805 1883 caller before sending any messages with it. */
1806 if (rv == 0) 1884 *new_intf = intf;
1807 rv = add_proc_entries(*intf, i);
1808
1809 if (rv == 0) {
1810 if ((version_major > 1)
1811 || ((version_major == 1) && (version_minor >= 5)))
1812 {
1813 /* Start scanning the channels to see what is
1814 available. */
1815 (*intf)->null_user_handler = channel_handler;
1816 (*intf)->curr_channel = 0;
1817 rv = send_channel_info_cmd(*intf, 0);
1818 if (rv)
1819 goto out;
1820 1885
1821 /* Wait for the channel info to be read. */ 1886 if ((version_major > 1)
1822 up_read(&interfaces_sem); 1887 || ((version_major == 1) && (version_minor >= 5)))
1823 wait_event((*intf)->waitq, 1888 {
1824 ((*intf)->curr_channel>=IPMI_MAX_CHANNELS)); 1889 /* Start scanning the channels to see what is
1825 down_read(&interfaces_sem); 1890 available. */
1891 intf->null_user_handler = channel_handler;
1892 intf->curr_channel = 0;
1893 rv = send_channel_info_cmd(intf, 0);
1894 if (rv)
1895 goto out;
1826 1896
1827 if (ipmi_interfaces[i] != new_intf) 1897 /* Wait for the channel info to be read. */
1828 /* Well, it went away. Just return. */ 1898 wait_event(intf->waitq,
1829 goto out; 1899 intf->curr_channel >= IPMI_MAX_CHANNELS);
1830 } else { 1900 } else {
1831 /* Assume a single IPMB channel at zero. */ 1901 /* Assume a single IPMB channel at zero. */
1832 (*intf)->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 1902 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
1833 (*intf)->channels[0].protocol 1903 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
1834 = IPMI_CHANNEL_PROTOCOL_IPMB;
1835 }
1836
1837 /* Call all the watcher interfaces to tell
1838 them that a new interface is available. */
1839 call_smi_watchers(i);
1840 } 1904 }
1841 1905
1842 out: 1906 if (rv == 0)
1843 up_read(&interfaces_sem); 1907 rv = add_proc_entries(intf, i);
1844 1908
1909 out:
1845 if (rv) { 1910 if (rv) {
1846 if (new_intf->proc_dir) 1911 if (intf->proc_dir)
1847 remove_proc_entries(new_intf); 1912 remove_proc_entries(intf);
1848 kfree(new_intf); 1913 kref_put(&intf->refcount, intf_free);
1914 if (i < MAX_IPMI_INTERFACES) {
1915 spin_lock_irqsave(&interfaces_lock, flags);
1916 ipmi_interfaces[i] = NULL;
1917 spin_unlock_irqrestore(&interfaces_lock, flags);
1918 }
1919 } else {
1920 spin_lock_irqsave(&interfaces_lock, flags);
1921 ipmi_interfaces[i] = intf;
1922 spin_unlock_irqrestore(&interfaces_lock, flags);
1923 call_smi_watchers(i);
1849 } 1924 }
1850 1925
1851 return rv; 1926 return rv;
1852} 1927}
1853 1928
1854static void free_recv_msg_list(struct list_head *q)
1855{
1856 struct ipmi_recv_msg *msg, *msg2;
1857
1858 list_for_each_entry_safe(msg, msg2, q, link) {
1859 list_del(&msg->link);
1860 ipmi_free_recv_msg(msg);
1861 }
1862}
1863
1864static void free_cmd_rcvr_list(struct list_head *q)
1865{
1866 struct cmd_rcvr *rcvr, *rcvr2;
1867
1868 list_for_each_entry_safe(rcvr, rcvr2, q, link) {
1869 list_del(&rcvr->link);
1870 kfree(rcvr);
1871 }
1872}
1873
1874static void clean_up_interface_data(ipmi_smi_t intf)
1875{
1876 int i;
1877
1878 free_recv_msg_list(&(intf->waiting_msgs));
1879 free_recv_msg_list(&(intf->waiting_events));
1880 free_cmd_rcvr_list(&(intf->cmd_rcvrs));
1881
1882 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1883 if ((intf->seq_table[i].inuse)
1884 && (intf->seq_table[i].recv_msg))
1885 {
1886 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1887 }
1888 }
1889}
1890
1891int ipmi_unregister_smi(ipmi_smi_t intf) 1929int ipmi_unregister_smi(ipmi_smi_t intf)
1892{ 1930{
1893 int rv = -ENODEV;
1894 int i; 1931 int i;
1895 struct ipmi_smi_watcher *w; 1932 struct ipmi_smi_watcher *w;
1896 unsigned long flags; 1933 unsigned long flags;
1897 1934
1898 down_write(&interfaces_sem); 1935 spin_lock_irqsave(&interfaces_lock, flags);
1899 if (list_empty(&(intf->users))) 1936 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1900 { 1937 if (ipmi_interfaces[i] == intf) {
1901 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 1938 /* Set the interface number reserved until we
1902 if (ipmi_interfaces[i] == intf) { 1939 * are done. */
1903 remove_proc_entries(intf); 1940 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1904 spin_lock_irqsave(&interfaces_lock, flags); 1941 intf->intf_num = -1;
1905 ipmi_interfaces[i] = NULL; 1942 break;
1906 clean_up_interface_data(intf);
1907 spin_unlock_irqrestore(&interfaces_lock,flags);
1908 kfree(intf);
1909 rv = 0;
1910 goto out_call_watcher;
1911 }
1912 } 1943 }
1913 } else {
1914 rv = -EBUSY;
1915 } 1944 }
1916 up_write(&interfaces_sem); 1945 spin_unlock_irqrestore(&interfaces_lock,flags);
1917 1946
1918 return rv; 1947 if (i == MAX_IPMI_INTERFACES)
1948 return -ENODEV;
1919 1949
1920 out_call_watcher: 1950 remove_proc_entries(intf);
1921 downgrade_write(&interfaces_sem);
1922 1951
1923 /* Call all the watcher interfaces to tell them that 1952 /* Call all the watcher interfaces to tell them that
1924 an interface is gone. */ 1953 an interface is gone. */
1925 down_read(&smi_watchers_sem); 1954 down_read(&smi_watchers_sem);
1926 list_for_each_entry(w, &smi_watchers, link) { 1955 list_for_each_entry(w, &smi_watchers, link)
1927 w->smi_gone(i); 1956 w->smi_gone(i);
1928 }
1929 up_read(&smi_watchers_sem); 1957 up_read(&smi_watchers_sem);
1930 up_read(&interfaces_sem); 1958
1959 /* Allow the entry to be reused now. */
1960 spin_lock_irqsave(&interfaces_lock, flags);
1961 ipmi_interfaces[i] = NULL;
1962 spin_unlock_irqrestore(&interfaces_lock,flags);
1963
1964 kref_put(&intf->refcount, intf_free);
1931 return 0; 1965 return 0;
1932} 1966}
1933 1967
@@ -1998,14 +2032,14 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
1998static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, 2032static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1999 struct ipmi_smi_msg *msg) 2033 struct ipmi_smi_msg *msg)
2000{ 2034{
2001 struct cmd_rcvr *rcvr; 2035 struct cmd_rcvr *rcvr;
2002 int rv = 0; 2036 int rv = 0;
2003 unsigned char netfn; 2037 unsigned char netfn;
2004 unsigned char cmd; 2038 unsigned char cmd;
2005 ipmi_user_t user = NULL; 2039 ipmi_user_t user = NULL;
2006 struct ipmi_ipmb_addr *ipmb_addr; 2040 struct ipmi_ipmb_addr *ipmb_addr;
2007 struct ipmi_recv_msg *recv_msg; 2041 struct ipmi_recv_msg *recv_msg;
2008 unsigned long flags; 2042 unsigned long flags;
2009 2043
2010 if (msg->rsp_size < 10) { 2044 if (msg->rsp_size < 10) {
2011 /* Message not big enough, just ignore it. */ 2045 /* Message not big enough, just ignore it. */
@@ -2023,16 +2057,14 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2023 netfn = msg->rsp[4] >> 2; 2057 netfn = msg->rsp[4] >> 2;
2024 cmd = msg->rsp[8]; 2058 cmd = msg->rsp[8];
2025 2059
2026 read_lock(&(intf->cmd_rcvr_lock)); 2060 rcu_read_lock();
2027 2061 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2028 /* Find the command/netfn. */ 2062 if (rcvr) {
2029 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2063 user = rcvr->user;
2030 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { 2064 kref_get(&user->refcount);
2031 user = rcvr->user; 2065 } else
2032 break; 2066 user = NULL;
2033 } 2067 rcu_read_unlock();
2034 }
2035 read_unlock(&(intf->cmd_rcvr_lock));
2036 2068
2037 if (user == NULL) { 2069 if (user == NULL) {
2038 /* We didn't find a user, deliver an error response. */ 2070 /* We didn't find a user, deliver an error response. */
@@ -2079,6 +2111,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2079 message, so requeue it for handling 2111 message, so requeue it for handling
2080 later. */ 2112 later. */
2081 rv = 1; 2113 rv = 1;
2114 kref_put(&user->refcount, free_user);
2082 } else { 2115 } else {
2083 /* Extract the source address from the data. */ 2116 /* Extract the source address from the data. */
2084 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 2117 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
@@ -2179,14 +2212,14 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2179static int handle_lan_get_msg_cmd(ipmi_smi_t intf, 2212static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2180 struct ipmi_smi_msg *msg) 2213 struct ipmi_smi_msg *msg)
2181{ 2214{
2182 struct cmd_rcvr *rcvr; 2215 struct cmd_rcvr *rcvr;
2183 int rv = 0; 2216 int rv = 0;
2184 unsigned char netfn; 2217 unsigned char netfn;
2185 unsigned char cmd; 2218 unsigned char cmd;
2186 ipmi_user_t user = NULL; 2219 ipmi_user_t user = NULL;
2187 struct ipmi_lan_addr *lan_addr; 2220 struct ipmi_lan_addr *lan_addr;
2188 struct ipmi_recv_msg *recv_msg; 2221 struct ipmi_recv_msg *recv_msg;
2189 unsigned long flags; 2222 unsigned long flags;
2190 2223
2191 if (msg->rsp_size < 12) { 2224 if (msg->rsp_size < 12) {
2192 /* Message not big enough, just ignore it. */ 2225 /* Message not big enough, just ignore it. */
@@ -2204,19 +2237,17 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2204 netfn = msg->rsp[6] >> 2; 2237 netfn = msg->rsp[6] >> 2;
2205 cmd = msg->rsp[10]; 2238 cmd = msg->rsp[10];
2206 2239
2207 read_lock(&(intf->cmd_rcvr_lock)); 2240 rcu_read_lock();
2208 2241 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2209 /* Find the command/netfn. */ 2242 if (rcvr) {
2210 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2243 user = rcvr->user;
2211 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { 2244 kref_get(&user->refcount);
2212 user = rcvr->user; 2245 } else
2213 break; 2246 user = NULL;
2214 } 2247 rcu_read_unlock();
2215 }
2216 read_unlock(&(intf->cmd_rcvr_lock));
2217 2248
2218 if (user == NULL) { 2249 if (user == NULL) {
2219 /* We didn't find a user, deliver an error response. */ 2250 /* We didn't find a user, just give up. */
2220 spin_lock_irqsave(&intf->counter_lock, flags); 2251 spin_lock_irqsave(&intf->counter_lock, flags);
2221 intf->unhandled_commands++; 2252 intf->unhandled_commands++;
2222 spin_unlock_irqrestore(&intf->counter_lock, flags); 2253 spin_unlock_irqrestore(&intf->counter_lock, flags);
@@ -2235,6 +2266,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2235 message, so requeue it for handling 2266 message, so requeue it for handling
2236 later. */ 2267 later. */
2237 rv = 1; 2268 rv = 1;
2269 kref_put(&user->refcount, free_user);
2238 } else { 2270 } else {
2239 /* Extract the source address from the data. */ 2271 /* Extract the source address from the data. */
2240 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 2272 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
@@ -2286,8 +2318,6 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2286 recv_msg->msg.data_len = msg->rsp_size - 3; 2318 recv_msg->msg.data_len = msg->rsp_size - 3;
2287} 2319}
2288 2320
2289/* This will be called with the intf->users_lock read-locked, so no need
2290 to do that here. */
2291static int handle_read_event_rsp(ipmi_smi_t intf, 2321static int handle_read_event_rsp(ipmi_smi_t intf,
2292 struct ipmi_smi_msg *msg) 2322 struct ipmi_smi_msg *msg)
2293{ 2323{
@@ -2313,7 +2343,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2313 2343
2314 INIT_LIST_HEAD(&msgs); 2344 INIT_LIST_HEAD(&msgs);
2315 2345
2316 spin_lock_irqsave(&(intf->events_lock), flags); 2346 spin_lock_irqsave(&intf->events_lock, flags);
2317 2347
2318 spin_lock(&intf->counter_lock); 2348 spin_lock(&intf->counter_lock);
2319 intf->events++; 2349 intf->events++;
@@ -2321,12 +2351,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2321 2351
2322 /* Allocate and fill in one message for every user that is getting 2352 /* Allocate and fill in one message for every user that is getting
2323 events. */ 2353 events. */
2324 list_for_each_entry(user, &(intf->users), link) { 2354 rcu_read_lock();
2355 list_for_each_entry_rcu(user, &intf->users, link) {
2325 if (! user->gets_events) 2356 if (! user->gets_events)
2326 continue; 2357 continue;
2327 2358
2328 recv_msg = ipmi_alloc_recv_msg(); 2359 recv_msg = ipmi_alloc_recv_msg();
2329 if (! recv_msg) { 2360 if (! recv_msg) {
2361 rcu_read_unlock();
2330 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 2362 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2331 list_del(&recv_msg->link); 2363 list_del(&recv_msg->link);
2332 ipmi_free_recv_msg(recv_msg); 2364 ipmi_free_recv_msg(recv_msg);
@@ -2342,8 +2374,10 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2342 2374
2343 copy_event_into_recv_msg(recv_msg, msg); 2375 copy_event_into_recv_msg(recv_msg, msg);
2344 recv_msg->user = user; 2376 recv_msg->user = user;
2377 kref_get(&user->refcount);
2345 list_add_tail(&(recv_msg->link), &msgs); 2378 list_add_tail(&(recv_msg->link), &msgs);
2346 } 2379 }
2380 rcu_read_unlock();
2347 2381
2348 if (deliver_count) { 2382 if (deliver_count) {
2349 /* Now deliver all the messages. */ 2383 /* Now deliver all the messages. */
@@ -2382,9 +2416,8 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2382 struct ipmi_smi_msg *msg) 2416 struct ipmi_smi_msg *msg)
2383{ 2417{
2384 struct ipmi_recv_msg *recv_msg; 2418 struct ipmi_recv_msg *recv_msg;
2385 int found = 0;
2386 struct ipmi_user *user;
2387 unsigned long flags; 2419 unsigned long flags;
2420 struct ipmi_user *user;
2388 2421
2389 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 2422 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2390 if (recv_msg == NULL) 2423 if (recv_msg == NULL)
@@ -2396,16 +2429,9 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2396 return 0; 2429 return 0;
2397 } 2430 }
2398 2431
2432 user = recv_msg->user;
2399 /* Make sure the user still exists. */ 2433 /* Make sure the user still exists. */
2400 list_for_each_entry(user, &(intf->users), link) { 2434 if (user && !user->valid) {
2401 if (user == recv_msg->user) {
2402 /* Found it, so we can deliver it */
2403 found = 1;
2404 break;
2405 }
2406 }
2407
2408 if ((! found) && recv_msg->user) {
2409 /* The user for the message went away, so give up. */ 2435 /* The user for the message went away, so give up. */
2410 spin_lock_irqsave(&intf->counter_lock, flags); 2436 spin_lock_irqsave(&intf->counter_lock, flags);
2411 intf->unhandled_local_responses++; 2437 intf->unhandled_local_responses++;
@@ -2486,7 +2512,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
2486 { 2512 {
2487 /* It's a response to a response we sent. For this we 2513 /* It's a response to a response we sent. For this we
2488 deliver a send message response to the user. */ 2514 deliver a send message response to the user. */
2489 struct ipmi_recv_msg *recv_msg = msg->user_data; 2515 struct ipmi_recv_msg *recv_msg = msg->user_data;
2490 2516
2491 requeue = 0; 2517 requeue = 0;
2492 if (msg->rsp_size < 2) 2518 if (msg->rsp_size < 2)
@@ -2498,13 +2524,18 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
2498 /* Invalid channel number */ 2524 /* Invalid channel number */
2499 goto out; 2525 goto out;
2500 2526
2501 if (recv_msg) { 2527 if (!recv_msg)
2502 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 2528 goto out;
2503 recv_msg->msg.data = recv_msg->msg_data; 2529
2504 recv_msg->msg.data_len = 1; 2530 /* Make sure the user still exists. */
2505 recv_msg->msg_data[0] = msg->rsp[2]; 2531 if (!recv_msg->user || !recv_msg->user->valid)
2506 deliver_response(recv_msg); 2532 goto out;
2507 } 2533
2534 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
2535 recv_msg->msg.data = recv_msg->msg_data;
2536 recv_msg->msg.data_len = 1;
2537 recv_msg->msg_data[0] = msg->rsp[2];
2538 deliver_response(recv_msg);
2508 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 2539 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2509 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) 2540 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
2510 { 2541 {
@@ -2570,14 +2601,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2570 int rv; 2601 int rv;
2571 2602
2572 2603
2573 /* Lock the user lock so the user can't go away while we are
2574 working on it. */
2575 read_lock(&(intf->users_lock));
2576
2577 if ((msg->data_size >= 2) 2604 if ((msg->data_size >= 2)
2578 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 2605 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2579 && (msg->data[1] == IPMI_SEND_MSG_CMD) 2606 && (msg->data[1] == IPMI_SEND_MSG_CMD)
2580 && (msg->user_data == NULL)) { 2607 && (msg->user_data == NULL))
2608 {
2581 /* This is the local response to a command send, start 2609 /* This is the local response to a command send, start
2582 the timer for these. The user_data will not be 2610 the timer for these. The user_data will not be
2583 NULL if this is a response send, and we will let 2611 NULL if this is a response send, and we will let
@@ -2612,46 +2640,46 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2612 } 2640 }
2613 2641
2614 ipmi_free_smi_msg(msg); 2642 ipmi_free_smi_msg(msg);
2615 goto out_unlock; 2643 goto out;
2616 } 2644 }
2617 2645
2618 /* To preserve message order, if the list is not empty, we 2646 /* To preserve message order, if the list is not empty, we
2619 tack this message onto the end of the list. */ 2647 tack this message onto the end of the list. */
2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2648 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2621 if (!list_empty(&(intf->waiting_msgs))) { 2649 if (!list_empty(&intf->waiting_msgs)) {
2622 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2650 list_add_tail(&msg->link, &intf->waiting_msgs);
2623 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2651 spin_unlock(&intf->waiting_msgs_lock);
2624 goto out_unlock; 2652 goto out;
2625 } 2653 }
2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2654 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2627 2655
2628 rv = handle_new_recv_msg(intf, msg); 2656 rv = handle_new_recv_msg(intf, msg);
2629 if (rv > 0) { 2657 if (rv > 0) {
2630 /* Could not handle the message now, just add it to a 2658 /* Could not handle the message now, just add it to a
2631 list to handle later. */ 2659 list to handle later. */
2632 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2660 spin_lock(&intf->waiting_msgs_lock);
2633 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2661 list_add_tail(&msg->link, &intf->waiting_msgs);
2634 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2662 spin_unlock(&intf->waiting_msgs_lock);
2635 } else if (rv == 0) { 2663 } else if (rv == 0) {
2636 ipmi_free_smi_msg(msg); 2664 ipmi_free_smi_msg(msg);
2637 } 2665 }
2638 2666
2639 out_unlock: 2667 out:
2640 read_unlock(&(intf->users_lock)); 2668 return;
2641} 2669}
2642 2670
2643void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 2671void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
2644{ 2672{
2645 ipmi_user_t user; 2673 ipmi_user_t user;
2646 2674
2647 read_lock(&(intf->users_lock)); 2675 rcu_read_lock();
2648 list_for_each_entry(user, &(intf->users), link) { 2676 list_for_each_entry_rcu(user, &intf->users, link) {
2649 if (! user->handler->ipmi_watchdog_pretimeout) 2677 if (! user->handler->ipmi_watchdog_pretimeout)
2650 continue; 2678 continue;
2651 2679
2652 user->handler->ipmi_watchdog_pretimeout(user->handler_data); 2680 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
2653 } 2681 }
2654 read_unlock(&(intf->users_lock)); 2682 rcu_read_unlock();
2655} 2683}
2656 2684
2657static void 2685static void
@@ -2691,8 +2719,65 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2691 return smi_msg; 2719 return smi_msg;
2692} 2720}
2693 2721
2694static void 2722static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
2695ipmi_timeout_handler(long timeout_period) 2723 struct list_head *timeouts, long timeout_period,
2724 int slot, unsigned long *flags)
2725{
2726 struct ipmi_recv_msg *msg;
2727
2728 if (!ent->inuse)
2729 return;
2730
2731 ent->timeout -= timeout_period;
2732 if (ent->timeout > 0)
2733 return;
2734
2735 if (ent->retries_left == 0) {
2736 /* The message has used all its retries. */
2737 ent->inuse = 0;
2738 msg = ent->recv_msg;
2739 list_add_tail(&msg->link, timeouts);
2740 spin_lock(&intf->counter_lock);
2741 if (ent->broadcast)
2742 intf->timed_out_ipmb_broadcasts++;
2743 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2744 intf->timed_out_lan_commands++;
2745 else
2746 intf->timed_out_ipmb_commands++;
2747 spin_unlock(&intf->counter_lock);
2748 } else {
2749 struct ipmi_smi_msg *smi_msg;
2750 /* More retries, send again. */
2751
2752 /* Start with the max timer, set to normal
2753 timer after the message is sent. */
2754 ent->timeout = MAX_MSG_TIMEOUT;
2755 ent->retries_left--;
2756 spin_lock(&intf->counter_lock);
2757 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2758 intf->retransmitted_lan_commands++;
2759 else
2760 intf->retransmitted_ipmb_commands++;
2761 spin_unlock(&intf->counter_lock);
2762
2763 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
2764 ent->seqid);
2765 if (! smi_msg)
2766 return;
2767
2768 spin_unlock_irqrestore(&intf->seq_lock, *flags);
2769 /* Send the new message. We send with a zero
2770 * priority. It timed out, I doubt time is
2771 * that critical now, and high priority
2772 * messages are really only for messages to the
2773 * local MC, which don't get resent. */
2774 intf->handlers->sender(intf->send_info,
2775 smi_msg, 0);
2776 spin_lock_irqsave(&intf->seq_lock, *flags);
2777 }
2778}
2779
2780static void ipmi_timeout_handler(long timeout_period)
2696{ 2781{
2697 ipmi_smi_t intf; 2782 ipmi_smi_t intf;
2698 struct list_head timeouts; 2783 struct list_head timeouts;
@@ -2706,14 +2791,14 @@ ipmi_timeout_handler(long timeout_period)
2706 spin_lock(&interfaces_lock); 2791 spin_lock(&interfaces_lock);
2707 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2792 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2708 intf = ipmi_interfaces[i]; 2793 intf = ipmi_interfaces[i];
2709 if (intf == NULL) 2794 if (IPMI_INVALID_INTERFACE(intf))
2710 continue; 2795 continue;
2711 2796 kref_get(&intf->refcount);
2712 read_lock(&(intf->users_lock)); 2797 spin_unlock(&interfaces_lock);
2713 2798
2714 /* See if any waiting messages need to be processed. */ 2799 /* See if any waiting messages need to be processed. */
2715 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2800 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2716 list_for_each_entry_safe(smi_msg, smi_msg2, &(intf->waiting_msgs), link) { 2801 list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
2717 if (! handle_new_recv_msg(intf, smi_msg)) { 2802 if (! handle_new_recv_msg(intf, smi_msg)) {
2718 list_del(&smi_msg->link); 2803 list_del(&smi_msg->link);
2719 ipmi_free_smi_msg(smi_msg); 2804 ipmi_free_smi_msg(smi_msg);
@@ -2723,73 +2808,23 @@ ipmi_timeout_handler(long timeout_period)
2723 break; 2808 break;
2724 } 2809 }
2725 } 2810 }
2726 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2811 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2727 2812
2728 /* Go through the seq table and find any messages that 2813 /* Go through the seq table and find any messages that
2729 have timed out, putting them in the timeouts 2814 have timed out, putting them in the timeouts
2730 list. */ 2815 list. */
2731 spin_lock_irqsave(&(intf->seq_lock), flags); 2816 spin_lock_irqsave(&intf->seq_lock, flags);
2732 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 2817 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
2733 struct seq_table *ent = &(intf->seq_table[j]); 2818 check_msg_timeout(intf, &(intf->seq_table[j]),
2734 if (!ent->inuse) 2819 &timeouts, timeout_period, j,
2735 continue; 2820 &flags);
2736 2821 spin_unlock_irqrestore(&intf->seq_lock, flags);
2737 ent->timeout -= timeout_period; 2822
2738 if (ent->timeout > 0) 2823 list_for_each_entry_safe(msg, msg2, &timeouts, link)
2739 continue;
2740
2741 if (ent->retries_left == 0) {
2742 /* The message has used all its retries. */
2743 ent->inuse = 0;
2744 msg = ent->recv_msg;
2745 list_add_tail(&(msg->link), &timeouts);
2746 spin_lock(&intf->counter_lock);
2747 if (ent->broadcast)
2748 intf->timed_out_ipmb_broadcasts++;
2749 else if (ent->recv_msg->addr.addr_type
2750 == IPMI_LAN_ADDR_TYPE)
2751 intf->timed_out_lan_commands++;
2752 else
2753 intf->timed_out_ipmb_commands++;
2754 spin_unlock(&intf->counter_lock);
2755 } else {
2756 struct ipmi_smi_msg *smi_msg;
2757 /* More retries, send again. */
2758
2759 /* Start with the max timer, set to normal
2760 timer after the message is sent. */
2761 ent->timeout = MAX_MSG_TIMEOUT;
2762 ent->retries_left--;
2763 spin_lock(&intf->counter_lock);
2764 if (ent->recv_msg->addr.addr_type
2765 == IPMI_LAN_ADDR_TYPE)
2766 intf->retransmitted_lan_commands++;
2767 else
2768 intf->retransmitted_ipmb_commands++;
2769 spin_unlock(&intf->counter_lock);
2770 smi_msg = smi_from_recv_msg(intf,
2771 ent->recv_msg, j, ent->seqid);
2772 if (! smi_msg)
2773 continue;
2774
2775 spin_unlock_irqrestore(&(intf->seq_lock),flags);
2776 /* Send the new message. We send with a zero
2777 * priority. It timed out, I doubt time is
2778 * that critical now, and high priority
2779 * messages are really only for messages to the
2780 * local MC, which don't get resent. */
2781 intf->handlers->sender(intf->send_info,
2782 smi_msg, 0);
2783 spin_lock_irqsave(&(intf->seq_lock), flags);
2784 }
2785 }
2786 spin_unlock_irqrestore(&(intf->seq_lock), flags);
2787
2788 list_for_each_entry_safe(msg, msg2, &timeouts, link) {
2789 handle_msg_timeout(msg); 2824 handle_msg_timeout(msg);
2790 }
2791 2825
2792 read_unlock(&(intf->users_lock)); 2826 kref_put(&intf->refcount, intf_free);
2827 spin_lock(&interfaces_lock);
2793 } 2828 }
2794 spin_unlock(&interfaces_lock); 2829 spin_unlock(&interfaces_lock);
2795} 2830}
@@ -2802,7 +2837,7 @@ static void ipmi_request_event(void)
2802 spin_lock(&interfaces_lock); 2837 spin_lock(&interfaces_lock);
2803 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2838 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2804 intf = ipmi_interfaces[i]; 2839 intf = ipmi_interfaces[i];
2805 if (intf == NULL) 2840 if (IPMI_INVALID_INTERFACE(intf))
2806 continue; 2841 continue;
2807 2842
2808 intf->handlers->request_events(intf->send_info); 2843 intf->handlers->request_events(intf->send_info);
@@ -2884,6 +2919,13 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
2884 return rv; 2919 return rv;
2885} 2920}
2886 2921
2922void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
2923{
2924 if (msg->user)
2925 kref_put(&msg->user->refcount, free_user);
2926 msg->done(msg);
2927}
2928
2887#ifdef CONFIG_IPMI_PANIC_EVENT 2929#ifdef CONFIG_IPMI_PANIC_EVENT
2888 2930
2889static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 2931static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
@@ -2964,7 +3006,7 @@ static void send_panic_events(char *str)
2964 /* For every registered interface, send the event. */ 3006 /* For every registered interface, send the event. */
2965 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3007 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2966 intf = ipmi_interfaces[i]; 3008 intf = ipmi_interfaces[i];
2967 if (intf == NULL) 3009 if (IPMI_INVALID_INTERFACE(intf))
2968 continue; 3010 continue;
2969 3011
2970 /* Send the event announcing the panic. */ 3012 /* Send the event announcing the panic. */
@@ -2995,7 +3037,7 @@ static void send_panic_events(char *str)
2995 int j; 3037 int j;
2996 3038
2997 intf = ipmi_interfaces[i]; 3039 intf = ipmi_interfaces[i];
2998 if (intf == NULL) 3040 if (IPMI_INVALID_INTERFACE(intf))
2999 continue; 3041 continue;
3000 3042
3001 /* First job here is to figure out where to send the 3043 /* First job here is to figure out where to send the
@@ -3131,7 +3173,7 @@ static int panic_event(struct notifier_block *this,
3131 /* For every registered interface, set it to run to completion. */ 3173 /* For every registered interface, set it to run to completion. */
3132 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3174 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3133 intf = ipmi_interfaces[i]; 3175 intf = ipmi_interfaces[i];
3134 if (intf == NULL) 3176 if (IPMI_INVALID_INTERFACE(intf))
3135 continue; 3177 continue;
3136 3178
3137 intf->handlers->set_run_to_completion(intf->send_info, 1); 3179 intf->handlers->set_run_to_completion(intf->send_info, 1);
@@ -3160,9 +3202,8 @@ static int ipmi_init_msghandler(void)
3160 printk(KERN_INFO "ipmi message handler version " 3202 printk(KERN_INFO "ipmi message handler version "
3161 IPMI_DRIVER_VERSION "\n"); 3203 IPMI_DRIVER_VERSION "\n");
3162 3204
3163 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3205 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3164 ipmi_interfaces[i] = NULL; 3206 ipmi_interfaces[i] = NULL;
3165 }
3166 3207
3167#ifdef CONFIG_PROC_FS 3208#ifdef CONFIG_PROC_FS
3168 proc_ipmi_root = proc_mkdir("ipmi", NULL); 3209 proc_ipmi_root = proc_mkdir("ipmi", NULL);
@@ -3258,3 +3299,4 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
3258EXPORT_SYMBOL(ipmi_smi_add_proc_entry); 3299EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3259EXPORT_SYMBOL(proc_ipmi_root); 3300EXPORT_SYMBOL(proc_ipmi_root);
3260EXPORT_SYMBOL(ipmi_user_set_run_to_completion); 3301EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3302EXPORT_SYMBOL(ipmi_free_recv_msg);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index f66947722e12..e053eade0366 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -56,7 +56,7 @@ static int poweroff_powercycle;
56 56
57/* parameter definition to allow user to flag power cycle */ 57/* parameter definition to allow user to flag power cycle */
58module_param(poweroff_powercycle, int, 0644); 58module_param(poweroff_powercycle, int, 0644);
59MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); 59MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
60 60
61/* Stuff from the get device id command. */ 61/* Stuff from the get device id command. */
62static unsigned int mfg_id; 62static unsigned int mfg_id;
@@ -611,9 +611,7 @@ static int ipmi_poweroff_init (void)
611 } 611 }
612#endif 612#endif
613 613
614#ifdef CONFIG_PROC_FS
615 rv = ipmi_smi_watcher_register(&smi_watcher); 614 rv = ipmi_smi_watcher_register(&smi_watcher);
616#endif
617 if (rv) { 615 if (rv) {
618 unregister_sysctl_table(ipmi_table_header); 616 unregister_sysctl_table(ipmi_table_header);
619 printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv); 617 printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b6e5cbfb09f8..ea89dca3dbb5 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -51,6 +51,8 @@
51#include <linux/list.h> 51#include <linux/list.h>
52#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/ioport.h> 53#include <linux/ioport.h>
54#include <linux/notifier.h>
55#include <linux/kthread.h>
54#include <asm/irq.h> 56#include <asm/irq.h>
55#ifdef CONFIG_HIGH_RES_TIMERS 57#ifdef CONFIG_HIGH_RES_TIMERS
56#include <linux/hrtime.h> 58#include <linux/hrtime.h>
@@ -125,6 +127,7 @@ struct ipmi_device_id {
125 127
126struct smi_info 128struct smi_info
127{ 129{
130 int intf_num;
128 ipmi_smi_t intf; 131 ipmi_smi_t intf;
129 struct si_sm_data *si_sm; 132 struct si_sm_data *si_sm;
130 struct si_sm_handlers *handlers; 133 struct si_sm_handlers *handlers;
@@ -192,8 +195,7 @@ struct smi_info
192 unsigned long last_timeout_jiffies; 195 unsigned long last_timeout_jiffies;
193 196
194 /* Used to gracefully stop the timer without race conditions. */ 197 /* Used to gracefully stop the timer without race conditions. */
195 volatile int stop_operation; 198 atomic_t stop_operation;
196 volatile int timer_stopped;
197 199
198 /* The driver will disable interrupts when it gets into a 200 /* The driver will disable interrupts when it gets into a
199 situation where it cannot handle messages due to lack of 201 situation where it cannot handle messages due to lack of
@@ -220,8 +222,16 @@ struct smi_info
220 unsigned long events; 222 unsigned long events;
221 unsigned long watchdog_pretimeouts; 223 unsigned long watchdog_pretimeouts;
222 unsigned long incoming_messages; 224 unsigned long incoming_messages;
225
226 struct task_struct *thread;
223}; 227};
224 228
229static struct notifier_block *xaction_notifier_list;
230static int register_xaction_notifier(struct notifier_block * nb)
231{
232 return notifier_chain_register(&xaction_notifier_list, nb);
233}
234
225static void si_restart_short_timer(struct smi_info *smi_info); 235static void si_restart_short_timer(struct smi_info *smi_info);
226 236
227static void deliver_recv_msg(struct smi_info *smi_info, 237static void deliver_recv_msg(struct smi_info *smi_info,
@@ -281,6 +291,11 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
281 do_gettimeofday(&t); 291 do_gettimeofday(&t);
282 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 292 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
283#endif 293#endif
294 err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
295 if (err & NOTIFY_STOP_MASK) {
296 rv = SI_SM_CALL_WITHOUT_DELAY;
297 goto out;
298 }
284 err = smi_info->handlers->start_transaction( 299 err = smi_info->handlers->start_transaction(
285 smi_info->si_sm, 300 smi_info->si_sm,
286 smi_info->curr_msg->data, 301 smi_info->curr_msg->data,
@@ -291,6 +306,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
291 306
292 rv = SI_SM_CALL_WITHOUT_DELAY; 307 rv = SI_SM_CALL_WITHOUT_DELAY;
293 } 308 }
309 out:
294 spin_unlock(&(smi_info->msg_lock)); 310 spin_unlock(&(smi_info->msg_lock));
295 311
296 return rv; 312 return rv;
@@ -766,6 +782,29 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
766 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 782 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
767} 783}
768 784
785static int ipmi_thread(void *data)
786{
787 struct smi_info *smi_info = data;
788 unsigned long flags;
789 enum si_sm_result smi_result;
790
791 set_user_nice(current, 19);
792 while (!kthread_should_stop()) {
793 spin_lock_irqsave(&(smi_info->si_lock), flags);
794 smi_result=smi_event_handler(smi_info, 0);
795 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
796 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
797 /* do nothing */
798 }
799 else if (smi_result == SI_SM_CALL_WITH_DELAY)
800 udelay(1);
801 else
802 schedule_timeout_interruptible(1);
803 }
804 return 0;
805}
806
807
769static void poll(void *send_info) 808static void poll(void *send_info)
770{ 809{
771 struct smi_info *smi_info = send_info; 810 struct smi_info *smi_info = send_info;
@@ -819,15 +858,13 @@ static void smi_timeout(unsigned long data)
819 enum si_sm_result smi_result; 858 enum si_sm_result smi_result;
820 unsigned long flags; 859 unsigned long flags;
821 unsigned long jiffies_now; 860 unsigned long jiffies_now;
822 unsigned long time_diff; 861 long time_diff;
823#ifdef DEBUG_TIMING 862#ifdef DEBUG_TIMING
824 struct timeval t; 863 struct timeval t;
825#endif 864#endif
826 865
827 if (smi_info->stop_operation) { 866 if (atomic_read(&smi_info->stop_operation))
828 smi_info->timer_stopped = 1;
829 return; 867 return;
830 }
831 868
832 spin_lock_irqsave(&(smi_info->si_lock), flags); 869 spin_lock_irqsave(&(smi_info->si_lock), flags);
833#ifdef DEBUG_TIMING 870#ifdef DEBUG_TIMING
@@ -835,7 +872,7 @@ static void smi_timeout(unsigned long data)
835 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); 872 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
836#endif 873#endif
837 jiffies_now = jiffies; 874 jiffies_now = jiffies;
838 time_diff = ((jiffies_now - smi_info->last_timeout_jiffies) 875 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
839 * SI_USEC_PER_JIFFY); 876 * SI_USEC_PER_JIFFY);
840 smi_result = smi_event_handler(smi_info, time_diff); 877 smi_result = smi_event_handler(smi_info, time_diff);
841 878
@@ -900,7 +937,7 @@ static irqreturn_t si_irq_handler(int irq, void *data, struct pt_regs *regs)
900 smi_info->interrupts++; 937 smi_info->interrupts++;
901 spin_unlock(&smi_info->count_lock); 938 spin_unlock(&smi_info->count_lock);
902 939
903 if (smi_info->stop_operation) 940 if (atomic_read(&smi_info->stop_operation))
904 goto out; 941 goto out;
905 942
906#ifdef DEBUG_TIMING 943#ifdef DEBUG_TIMING
@@ -1419,7 +1456,7 @@ static u32 ipmi_acpi_gpe(void *context)
1419 smi_info->interrupts++; 1456 smi_info->interrupts++;
1420 spin_unlock(&smi_info->count_lock); 1457 spin_unlock(&smi_info->count_lock);
1421 1458
1422 if (smi_info->stop_operation) 1459 if (atomic_read(&smi_info->stop_operation))
1423 goto out; 1460 goto out;
1424 1461
1425#ifdef DEBUG_TIMING 1462#ifdef DEBUG_TIMING
@@ -1919,7 +1956,8 @@ static int try_get_dev_id(struct smi_info *smi_info)
1919 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); 1956 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1920 for (;;) 1957 for (;;)
1921 { 1958 {
1922 if (smi_result == SI_SM_CALL_WITH_DELAY) { 1959 if (smi_result == SI_SM_CALL_WITH_DELAY ||
1960 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1923 schedule_timeout_uninterruptible(1); 1961 schedule_timeout_uninterruptible(1);
1924 smi_result = smi_info->handlers->event( 1962 smi_result = smi_info->handlers->event(
1925 smi_info->si_sm, 100); 1963 smi_info->si_sm, 100);
@@ -2052,6 +2090,9 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2052 * IPMI Version = 0x51 IPMI 1.5 2090 * IPMI Version = 0x51 IPMI 1.5
2053 * Manufacturer ID = A2 02 00 Dell IANA 2091 * Manufacturer ID = A2 02 00 Dell IANA
2054 * 2092 *
2093 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2094 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2095 *
2055 */ 2096 */
2056#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 2097#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2057#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 2098#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
@@ -2061,16 +2102,87 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2061{ 2102{
2062 struct ipmi_device_id *id = &smi_info->device_id; 2103 struct ipmi_device_id *id = &smi_info->device_id;
2063 const char mfr[3]=DELL_IANA_MFR_ID; 2104 const char mfr[3]=DELL_IANA_MFR_ID;
2064 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) 2105 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) {
2065 && (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID) 2106 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2066 && (id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV) 2107 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2067 && (id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION)) 2108 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2068 { 2109 smi_info->oem_data_avail_handler =
2069 smi_info->oem_data_avail_handler = 2110 oem_data_avail_to_receive_msg_avail;
2070 oem_data_avail_to_receive_msg_avail; 2111 }
2112 else if (ipmi_version_major(id) < 1 ||
2113 (ipmi_version_major(id) == 1 &&
2114 ipmi_version_minor(id) < 5)) {
2115 smi_info->oem_data_avail_handler =
2116 oem_data_avail_to_receive_msg_avail;
2117 }
2071 } 2118 }
2072} 2119}
2073 2120
2121#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2122static void return_hosed_msg_badsize(struct smi_info *smi_info)
2123{
2124 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2125
2126 /* Make it a reponse */
2127 msg->rsp[0] = msg->data[0] | 4;
2128 msg->rsp[1] = msg->data[1];
2129 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2130 msg->rsp_size = 3;
2131 smi_info->curr_msg = NULL;
2132 deliver_recv_msg(smi_info, msg);
2133}
2134
2135/*
2136 * dell_poweredge_bt_xaction_handler
2137 * @info - smi_info.device_id must be populated
2138 *
2139 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2140 * not respond to a Get SDR command if the length of the data
2141 * requested is exactly 0x3A, which leads to command timeouts and no
2142 * data returned. This intercepts such commands, and causes userspace
2143 * callers to try again with a different-sized buffer, which succeeds.
2144 */
2145
2146#define STORAGE_NETFN 0x0A
2147#define STORAGE_CMD_GET_SDR 0x23
2148static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2149 unsigned long unused,
2150 void *in)
2151{
2152 struct smi_info *smi_info = in;
2153 unsigned char *data = smi_info->curr_msg->data;
2154 unsigned int size = smi_info->curr_msg->data_size;
2155 if (size >= 8 &&
2156 (data[0]>>2) == STORAGE_NETFN &&
2157 data[1] == STORAGE_CMD_GET_SDR &&
2158 data[7] == 0x3A) {
2159 return_hosed_msg_badsize(smi_info);
2160 return NOTIFY_STOP;
2161 }
2162 return NOTIFY_DONE;
2163}
2164
2165static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2166 .notifier_call = dell_poweredge_bt_xaction_handler,
2167};
2168
2169/*
2170 * setup_dell_poweredge_bt_xaction_handler
2171 * @info - smi_info.device_id must be filled in already
2172 *
2173 * Fills in smi_info.device_id.start_transaction_pre_hook
2174 * when we know what function to use there.
2175 */
2176static void
2177setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2178{
2179 struct ipmi_device_id *id = &smi_info->device_id;
2180 const char mfr[3]=DELL_IANA_MFR_ID;
2181 if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) &&
2182 smi_info->si_type == SI_BT)
2183 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2184}
2185
2074/* 2186/*
2075 * setup_oem_data_handler 2187 * setup_oem_data_handler
2076 * @info - smi_info.device_id must be filled in already 2188 * @info - smi_info.device_id must be filled in already
@@ -2084,6 +2196,18 @@ static void setup_oem_data_handler(struct smi_info *smi_info)
2084 setup_dell_poweredge_oem_data_handler(smi_info); 2196 setup_dell_poweredge_oem_data_handler(smi_info);
2085} 2197}
2086 2198
2199static void setup_xaction_handlers(struct smi_info *smi_info)
2200{
2201 setup_dell_poweredge_bt_xaction_handler(smi_info);
2202}
2203
2204static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2205{
2206 if (smi_info->thread != ERR_PTR(-ENOMEM))
2207 kthread_stop(smi_info->thread);
2208 del_timer_sync(&smi_info->si_timer);
2209}
2210
2087/* Returns 0 if initialized, or negative on an error. */ 2211/* Returns 0 if initialized, or negative on an error. */
2088static int init_one_smi(int intf_num, struct smi_info **smi) 2212static int init_one_smi(int intf_num, struct smi_info **smi)
2089{ 2213{
@@ -2179,6 +2303,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2179 goto out_err; 2303 goto out_err;
2180 2304
2181 setup_oem_data_handler(new_smi); 2305 setup_oem_data_handler(new_smi);
2306 setup_xaction_handlers(new_smi);
2182 2307
2183 /* Try to claim any interrupts. */ 2308 /* Try to claim any interrupts. */
2184 new_smi->irq_setup(new_smi); 2309 new_smi->irq_setup(new_smi);
@@ -2190,8 +2315,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2190 new_smi->run_to_completion = 0; 2315 new_smi->run_to_completion = 0;
2191 2316
2192 new_smi->interrupt_disabled = 0; 2317 new_smi->interrupt_disabled = 0;
2193 new_smi->timer_stopped = 0; 2318 atomic_set(&new_smi->stop_operation, 0);
2194 new_smi->stop_operation = 0; 2319 new_smi->intf_num = intf_num;
2195 2320
2196 /* Start clearing the flags before we enable interrupts or the 2321 /* Start clearing the flags before we enable interrupts or the
2197 timer to avoid racing with the timer. */ 2322 timer to avoid racing with the timer. */
@@ -2209,7 +2334,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2209 new_smi->si_timer.function = smi_timeout; 2334 new_smi->si_timer.function = smi_timeout;
2210 new_smi->last_timeout_jiffies = jiffies; 2335 new_smi->last_timeout_jiffies = jiffies;
2211 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; 2336 new_smi->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
2337
2212 add_timer(&(new_smi->si_timer)); 2338 add_timer(&(new_smi->si_timer));
2339 if (new_smi->si_type != SI_BT)
2340 new_smi->thread = kthread_run(ipmi_thread, new_smi,
2341 "kipmi%d", new_smi->intf_num);
2213 2342
2214 rv = ipmi_register_smi(&handlers, 2343 rv = ipmi_register_smi(&handlers,
2215 new_smi, 2344 new_smi,
@@ -2251,12 +2380,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
2251 return 0; 2380 return 0;
2252 2381
2253 out_err_stop_timer: 2382 out_err_stop_timer:
2254 new_smi->stop_operation = 1; 2383 atomic_inc(&new_smi->stop_operation);
2255 2384 wait_for_timer_and_thread(new_smi);
2256 /* Wait for the timer to stop. This avoids problems with race
2257 conditions removing the timer here. */
2258 while (!new_smi->timer_stopped)
2259 schedule_timeout_uninterruptible(1);
2260 2385
2261 out_err: 2386 out_err:
2262 if (new_smi->intf) 2387 if (new_smi->intf)
@@ -2362,8 +2487,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2362 spin_lock_irqsave(&(to_clean->si_lock), flags); 2487 spin_lock_irqsave(&(to_clean->si_lock), flags);
2363 spin_lock(&(to_clean->msg_lock)); 2488 spin_lock(&(to_clean->msg_lock));
2364 2489
2365 to_clean->stop_operation = 1; 2490 atomic_inc(&to_clean->stop_operation);
2366
2367 to_clean->irq_cleanup(to_clean); 2491 to_clean->irq_cleanup(to_clean);
2368 2492
2369 spin_unlock(&(to_clean->msg_lock)); 2493 spin_unlock(&(to_clean->msg_lock));
@@ -2374,10 +2498,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
2374 interrupt. */ 2498 interrupt. */
2375 synchronize_sched(); 2499 synchronize_sched();
2376 2500
2377 /* Wait for the timer to stop. This avoids problems with race 2501 wait_for_timer_and_thread(to_clean);
2378 conditions removing the timer here. */
2379 while (!to_clean->timer_stopped)
2380 schedule_timeout_uninterruptible(1);
2381 2502
2382 /* Interrupts and timeouts are stopped, now make sure the 2503 /* Interrupts and timeouts are stopped, now make sure the
2383 interface is in a clean state. */ 2504 interface is in a clean state. */
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h
index 62791dd42985..bf3d4962d6a5 100644
--- a/drivers/char/ipmi/ipmi_si_sm.h
+++ b/drivers/char/ipmi/ipmi_si_sm.h
@@ -62,6 +62,7 @@ enum si_sm_result
62{ 62{
63 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ 63 SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
64 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ 64 SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */
65 SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */
65 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ 66 SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
66 SI_SM_IDLE, /* The SM is in idle state. */ 67 SI_SM_IDLE, /* The SM is in idle state. */
67 SI_SM_HOSED, /* The hardware violated the state machine. */ 68 SI_SM_HOSED, /* The hardware violated the state machine. */
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index add2aa2732f0..39d7e5ef1a2b 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -43,6 +43,8 @@
43 43
44#include <linux/kernel.h> /* For printk. */ 44#include <linux/kernel.h> /* For printk. */
45#include <linux/string.h> 45#include <linux/string.h>
46#include <linux/module.h>
47#include <linux/moduleparam.h>
46#include <linux/ipmi_msgdefs.h> /* for completion codes */ 48#include <linux/ipmi_msgdefs.h> /* for completion codes */
47#include "ipmi_si_sm.h" 49#include "ipmi_si_sm.h"
48 50
@@ -56,6 +58,8 @@
56#define SMIC_DEBUG_ENABLE 1 58#define SMIC_DEBUG_ENABLE 1
57 59
58static int smic_debug = 1; 60static int smic_debug = 1;
61module_param(smic_debug, int, 0644);
62MODULE_PARM_DESC(smic_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
59 63
60enum smic_states { 64enum smic_states {
61 SMIC_IDLE, 65 SMIC_IDLE,
@@ -76,11 +80,17 @@ enum smic_states {
76#define SMIC_MAX_ERROR_RETRIES 3 80#define SMIC_MAX_ERROR_RETRIES 3
77 81
78/* Timeouts in microseconds. */ 82/* Timeouts in microseconds. */
79#define SMIC_RETRY_TIMEOUT 100000 83#define SMIC_RETRY_TIMEOUT 2000000
80 84
81/* SMIC Flags Register Bits */ 85/* SMIC Flags Register Bits */
82#define SMIC_RX_DATA_READY 0x80 86#define SMIC_RX_DATA_READY 0x80
83#define SMIC_TX_DATA_READY 0x40 87#define SMIC_TX_DATA_READY 0x40
88/*
89 * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
90 * a few systems, and then only by Systems Management
91 * Interrupts, not by the OS. Always ignore these bits.
92 *
93 */
84#define SMIC_SMI 0x10 94#define SMIC_SMI 0x10
85#define SMIC_EVM_DATA_AVAIL 0x08 95#define SMIC_EVM_DATA_AVAIL 0x08
86#define SMIC_SMS_DATA_AVAIL 0x04 96#define SMIC_SMS_DATA_AVAIL 0x04
@@ -364,8 +374,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
364 switch (smic->state) { 374 switch (smic->state) {
365 case SMIC_IDLE: 375 case SMIC_IDLE:
366 /* in IDLE we check for available messages */ 376 /* in IDLE we check for available messages */
367 if (flags & (SMIC_SMI | 377 if (flags & SMIC_SMS_DATA_AVAIL)
368 SMIC_EVM_DATA_AVAIL | SMIC_SMS_DATA_AVAIL))
369 { 378 {
370 return SI_SM_ATTN; 379 return SI_SM_ATTN;
371 } 380 }
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 2da64bf7469c..1f3159eb1ede 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -47,6 +47,9 @@
47#include <linux/reboot.h> 47#include <linux/reboot.h>
48#include <linux/wait.h> 48#include <linux/wait.h>
49#include <linux/poll.h> 49#include <linux/poll.h>
50#include <linux/string.h>
51#include <linux/ctype.h>
52#include <asm/atomic.h>
50#ifdef CONFIG_X86_LOCAL_APIC 53#ifdef CONFIG_X86_LOCAL_APIC
51#include <asm/apic.h> 54#include <asm/apic.h>
52#endif 55#endif
@@ -158,27 +161,120 @@ static struct fasync_struct *fasync_q = NULL;
158static char pretimeout_since_last_heartbeat = 0; 161static char pretimeout_since_last_heartbeat = 0;
159static char expect_close; 162static char expect_close;
160 163
164static DECLARE_RWSEM(register_sem);
165
166/* Parameters to ipmi_set_timeout */
167#define IPMI_SET_TIMEOUT_NO_HB 0
168#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1
169#define IPMI_SET_TIMEOUT_FORCE_HB 2
170
171static int ipmi_set_timeout(int do_heartbeat);
172
161/* If true, the driver will start running as soon as it is configured 173/* If true, the driver will start running as soon as it is configured
162 and ready. */ 174 and ready. */
163static int start_now = 0; 175static int start_now = 0;
164 176
165module_param(timeout, int, 0); 177static int set_param_int(const char *val, struct kernel_param *kp)
178{
179 char *endp;
180 int l;
181 int rv = 0;
182
183 if (!val)
184 return -EINVAL;
185 l = simple_strtoul(val, &endp, 0);
186 if (endp == val)
187 return -EINVAL;
188
189 down_read(&register_sem);
190 *((int *)kp->arg) = l;
191 if (watchdog_user)
192 rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
193 up_read(&register_sem);
194
195 return rv;
196}
197
198static int get_param_int(char *buffer, struct kernel_param *kp)
199{
200 return sprintf(buffer, "%i", *((int *)kp->arg));
201}
202
203typedef int (*action_fn)(const char *intval, char *outval);
204
205static int action_op(const char *inval, char *outval);
206static int preaction_op(const char *inval, char *outval);
207static int preop_op(const char *inval, char *outval);
208static void check_parms(void);
209
210static int set_param_str(const char *val, struct kernel_param *kp)
211{
212 action_fn fn = (action_fn) kp->arg;
213 int rv = 0;
214 const char *end;
215 char valcp[16];
216 int len;
217
218 /* Truncate leading and trailing spaces. */
219 while (isspace(*val))
220 val++;
221 end = val + strlen(val) - 1;
222 while ((end >= val) && isspace(*end))
223 end--;
224 len = end - val + 1;
225 if (len > sizeof(valcp) - 1)
226 return -EINVAL;
227 memcpy(valcp, val, len);
228 valcp[len] = '\0';
229
230 down_read(&register_sem);
231 rv = fn(valcp, NULL);
232 if (rv)
233 goto out_unlock;
234
235 check_parms();
236 if (watchdog_user)
237 rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
238
239 out_unlock:
240 up_read(&register_sem);
241 return rv;
242}
243
244static int get_param_str(char *buffer, struct kernel_param *kp)
245{
246 action_fn fn = (action_fn) kp->arg;
247 int rv;
248
249 rv = fn(NULL, buffer);
250 if (rv)
251 return rv;
252 return strlen(buffer);
253}
254
255module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
166MODULE_PARM_DESC(timeout, "Timeout value in seconds."); 256MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
167module_param(pretimeout, int, 0); 257
258module_param_call(pretimeout, set_param_int, get_param_int, &pretimeout, 0644);
168MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); 259MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
169module_param_string(action, action, sizeof(action), 0); 260
261module_param_call(action, set_param_str, get_param_str, action_op, 0644);
170MODULE_PARM_DESC(action, "Timeout action. One of: " 262MODULE_PARM_DESC(action, "Timeout action. One of: "
171 "reset, none, power_cycle, power_off."); 263 "reset, none, power_cycle, power_off.");
172module_param_string(preaction, preaction, sizeof(preaction), 0); 264
265module_param_call(preaction, set_param_str, get_param_str, preaction_op, 0644);
173MODULE_PARM_DESC(preaction, "Pretimeout action. One of: " 266MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
174 "pre_none, pre_smi, pre_nmi, pre_int."); 267 "pre_none, pre_smi, pre_nmi, pre_int.");
175module_param_string(preop, preop, sizeof(preop), 0); 268
269module_param_call(preop, set_param_str, get_param_str, preop_op, 0644);
176MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " 270MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
177 "preop_none, preop_panic, preop_give_data."); 271 "preop_none, preop_panic, preop_give_data.");
272
178module_param(start_now, int, 0); 273module_param(start_now, int, 0);
179MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" 274MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
180 "soon as the driver is loaded."); 275 "soon as the driver is loaded.");
181module_param(nowayout, int, 0); 276
277module_param(nowayout, int, 0644);
182MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 278MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)");
183 279
184/* Default state of the timer. */ 280/* Default state of the timer. */
@@ -200,6 +296,8 @@ static int ipmi_start_timer_on_heartbeat = 0;
200static unsigned char ipmi_version_major; 296static unsigned char ipmi_version_major;
201static unsigned char ipmi_version_minor; 297static unsigned char ipmi_version_minor;
202 298
299/* If a pretimeout occurs, this is used to allow only one panic to happen. */
300static atomic_t preop_panic_excl = ATOMIC_INIT(-1);
203 301
204static int ipmi_heartbeat(void); 302static int ipmi_heartbeat(void);
205static void panic_halt_ipmi_heartbeat(void); 303static void panic_halt_ipmi_heartbeat(void);
@@ -294,11 +392,6 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
294 return rv; 392 return rv;
295} 393}
296 394
297/* Parameters to ipmi_set_timeout */
298#define IPMI_SET_TIMEOUT_NO_HB 0
299#define IPMI_SET_TIMEOUT_HB_IF_NECESSARY 1
300#define IPMI_SET_TIMEOUT_FORCE_HB 2
301
302static int ipmi_set_timeout(int do_heartbeat) 395static int ipmi_set_timeout(int do_heartbeat)
303{ 396{
304 int send_heartbeat_now; 397 int send_heartbeat_now;
@@ -732,8 +825,6 @@ static struct miscdevice ipmi_wdog_miscdev = {
732 .fops = &ipmi_wdog_fops 825 .fops = &ipmi_wdog_fops
733}; 826};
734 827
735static DECLARE_RWSEM(register_sem);
736
737static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, 828static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
738 void *handler_data) 829 void *handler_data)
739{ 830{
@@ -749,9 +840,10 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
749static void ipmi_wdog_pretimeout_handler(void *handler_data) 840static void ipmi_wdog_pretimeout_handler(void *handler_data)
750{ 841{
751 if (preaction_val != WDOG_PRETIMEOUT_NONE) { 842 if (preaction_val != WDOG_PRETIMEOUT_NONE) {
752 if (preop_val == WDOG_PREOP_PANIC) 843 if (preop_val == WDOG_PREOP_PANIC) {
753 panic("Watchdog pre-timeout"); 844 if (atomic_inc_and_test(&preop_panic_excl))
754 else if (preop_val == WDOG_PREOP_GIVE_DATA) { 845 panic("Watchdog pre-timeout");
846 } else if (preop_val == WDOG_PREOP_GIVE_DATA) {
755 spin_lock(&ipmi_read_lock); 847 spin_lock(&ipmi_read_lock);
756 data_to_read = 1; 848 data_to_read = 1;
757 wake_up_interruptible(&read_q); 849 wake_up_interruptible(&read_q);
@@ -825,7 +917,8 @@ ipmi_nmi(void *dev_id, struct pt_regs *regs, int cpu, int handled)
825 an error and not work unless we re-enable 917 an error and not work unless we re-enable
826 the timer. So do so. */ 918 the timer. So do so. */
827 pretimeout_since_last_heartbeat = 1; 919 pretimeout_since_last_heartbeat = 1;
828 panic(PFX "pre-timeout"); 920 if (atomic_inc_and_test(&preop_panic_excl))
921 panic(PFX "pre-timeout");
829 } 922 }
830 923
831 return NOTIFY_DONE; 924 return NOTIFY_DONE;
@@ -839,6 +932,7 @@ static struct nmi_handler ipmi_nmi_handler =
839 .handler = ipmi_nmi, 932 .handler = ipmi_nmi,
840 .priority = 0, /* Call us last. */ 933 .priority = 0, /* Call us last. */
841}; 934};
935int nmi_handler_registered;
842#endif 936#endif
843 937
844static int wdog_reboot_handler(struct notifier_block *this, 938static int wdog_reboot_handler(struct notifier_block *this,
@@ -921,59 +1015,86 @@ static struct ipmi_smi_watcher smi_watcher =
921 .smi_gone = ipmi_smi_gone 1015 .smi_gone = ipmi_smi_gone
922}; 1016};
923 1017
924static int __init ipmi_wdog_init(void) 1018static int action_op(const char *inval, char *outval)
925{ 1019{
926 int rv; 1020 if (outval)
1021 strcpy(outval, action);
1022
1023 if (!inval)
1024 return 0;
927 1025
928 if (strcmp(action, "reset") == 0) { 1026 if (strcmp(inval, "reset") == 0)
929 action_val = WDOG_TIMEOUT_RESET; 1027 action_val = WDOG_TIMEOUT_RESET;
930 } else if (strcmp(action, "none") == 0) { 1028 else if (strcmp(inval, "none") == 0)
931 action_val = WDOG_TIMEOUT_NONE; 1029 action_val = WDOG_TIMEOUT_NONE;
932 } else if (strcmp(action, "power_cycle") == 0) { 1030 else if (strcmp(inval, "power_cycle") == 0)
933 action_val = WDOG_TIMEOUT_POWER_CYCLE; 1031 action_val = WDOG_TIMEOUT_POWER_CYCLE;
934 } else if (strcmp(action, "power_off") == 0) { 1032 else if (strcmp(inval, "power_off") == 0)
935 action_val = WDOG_TIMEOUT_POWER_DOWN; 1033 action_val = WDOG_TIMEOUT_POWER_DOWN;
936 } else { 1034 else
937 action_val = WDOG_TIMEOUT_RESET; 1035 return -EINVAL;
938 printk(KERN_INFO PFX "Unknown action '%s', defaulting to" 1036 strcpy(action, inval);
939 " reset\n", action); 1037 return 0;
940 } 1038}
1039
1040static int preaction_op(const char *inval, char *outval)
1041{
1042 if (outval)
1043 strcpy(outval, preaction);
941 1044
942 if (strcmp(preaction, "pre_none") == 0) { 1045 if (!inval)
1046 return 0;
1047
1048 if (strcmp(inval, "pre_none") == 0)
943 preaction_val = WDOG_PRETIMEOUT_NONE; 1049 preaction_val = WDOG_PRETIMEOUT_NONE;
944 } else if (strcmp(preaction, "pre_smi") == 0) { 1050 else if (strcmp(inval, "pre_smi") == 0)
945 preaction_val = WDOG_PRETIMEOUT_SMI; 1051 preaction_val = WDOG_PRETIMEOUT_SMI;
946#ifdef HAVE_NMI_HANDLER 1052#ifdef HAVE_NMI_HANDLER
947 } else if (strcmp(preaction, "pre_nmi") == 0) { 1053 else if (strcmp(inval, "pre_nmi") == 0)
948 preaction_val = WDOG_PRETIMEOUT_NMI; 1054 preaction_val = WDOG_PRETIMEOUT_NMI;
949#endif 1055#endif
950 } else if (strcmp(preaction, "pre_int") == 0) { 1056 else if (strcmp(inval, "pre_int") == 0)
951 preaction_val = WDOG_PRETIMEOUT_MSG_INT; 1057 preaction_val = WDOG_PRETIMEOUT_MSG_INT;
952 } else { 1058 else
953 preaction_val = WDOG_PRETIMEOUT_NONE; 1059 return -EINVAL;
954 printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to" 1060 strcpy(preaction, inval);
955 " none\n", preaction); 1061 return 0;
956 } 1062}
1063
1064static int preop_op(const char *inval, char *outval)
1065{
1066 if (outval)
1067 strcpy(outval, preop);
957 1068
958 if (strcmp(preop, "preop_none") == 0) { 1069 if (!inval)
1070 return 0;
1071
1072 if (strcmp(inval, "preop_none") == 0)
959 preop_val = WDOG_PREOP_NONE; 1073 preop_val = WDOG_PREOP_NONE;
960 } else if (strcmp(preop, "preop_panic") == 0) { 1074 else if (strcmp(inval, "preop_panic") == 0)
961 preop_val = WDOG_PREOP_PANIC; 1075 preop_val = WDOG_PREOP_PANIC;
962 } else if (strcmp(preop, "preop_give_data") == 0) { 1076 else if (strcmp(inval, "preop_give_data") == 0)
963 preop_val = WDOG_PREOP_GIVE_DATA; 1077 preop_val = WDOG_PREOP_GIVE_DATA;
964 } else { 1078 else
965 preop_val = WDOG_PREOP_NONE; 1079 return -EINVAL;
966 printk(KERN_INFO PFX "Unknown preop '%s', defaulting to" 1080 strcpy(preop, inval);
967 " none\n", preop); 1081 return 0;
968 } 1082}
969 1083
1084static void check_parms(void)
1085{
970#ifdef HAVE_NMI_HANDLER 1086#ifdef HAVE_NMI_HANDLER
1087 int do_nmi = 0;
1088 int rv;
1089
971 if (preaction_val == WDOG_PRETIMEOUT_NMI) { 1090 if (preaction_val == WDOG_PRETIMEOUT_NMI) {
1091 do_nmi = 1;
972 if (preop_val == WDOG_PREOP_GIVE_DATA) { 1092 if (preop_val == WDOG_PREOP_GIVE_DATA) {
973 printk(KERN_WARNING PFX "Pretimeout op is to give data" 1093 printk(KERN_WARNING PFX "Pretimeout op is to give data"
974 " but NMI pretimeout is enabled, setting" 1094 " but NMI pretimeout is enabled, setting"
975 " pretimeout op to none\n"); 1095 " pretimeout op to none\n");
976 preop_val = WDOG_PREOP_NONE; 1096 preop_op("preop_none", NULL);
1097 do_nmi = 0;
977 } 1098 }
978#ifdef CONFIG_X86_LOCAL_APIC 1099#ifdef CONFIG_X86_LOCAL_APIC
979 if (nmi_watchdog == NMI_IO_APIC) { 1100 if (nmi_watchdog == NMI_IO_APIC) {
@@ -983,18 +1104,48 @@ static int __init ipmi_wdog_init(void)
983 " Disabling IPMI nmi pretimeout.\n", 1104 " Disabling IPMI nmi pretimeout.\n",
984 nmi_watchdog); 1105 nmi_watchdog);
985 preaction_val = WDOG_PRETIMEOUT_NONE; 1106 preaction_val = WDOG_PRETIMEOUT_NONE;
986 } else { 1107 do_nmi = 0;
1108 }
987#endif 1109#endif
1110 }
1111 if (do_nmi && !nmi_handler_registered) {
988 rv = request_nmi(&ipmi_nmi_handler); 1112 rv = request_nmi(&ipmi_nmi_handler);
989 if (rv) { 1113 if (rv) {
990 printk(KERN_WARNING PFX "Can't register nmi handler\n"); 1114 printk(KERN_WARNING PFX
991 return rv; 1115 "Can't register nmi handler\n");
992 } 1116 return;
993#ifdef CONFIG_X86_LOCAL_APIC 1117 } else
994 } 1118 nmi_handler_registered = 1;
995#endif 1119 } else if (!do_nmi && nmi_handler_registered) {
1120 release_nmi(&ipmi_nmi_handler);
1121 nmi_handler_registered = 0;
996 } 1122 }
997#endif 1123#endif
1124}
1125
1126static int __init ipmi_wdog_init(void)
1127{
1128 int rv;
1129
1130 if (action_op(action, NULL)) {
1131 action_op("reset", NULL);
1132 printk(KERN_INFO PFX "Unknown action '%s', defaulting to"
1133 " reset\n", action);
1134 }
1135
1136 if (preaction_op(preaction, NULL)) {
1137 preaction_op("pre_none", NULL);
1138 printk(KERN_INFO PFX "Unknown preaction '%s', defaulting to"
1139 " none\n", preaction);
1140 }
1141
1142 if (preop_op(preop, NULL)) {
1143 preop_op("preop_none", NULL);
1144 printk(KERN_INFO PFX "Unknown preop '%s', defaulting to"
1145 " none\n", preop);
1146 }
1147
1148 check_parms();
998 1149
999 rv = ipmi_smi_watcher_register(&smi_watcher); 1150 rv = ipmi_smi_watcher_register(&smi_watcher);
1000 if (rv) { 1151 if (rv) {
@@ -1021,7 +1172,7 @@ static __exit void ipmi_unregister_watchdog(void)
1021 down_write(&register_sem); 1172 down_write(&register_sem);
1022 1173
1023#ifdef HAVE_NMI_HANDLER 1174#ifdef HAVE_NMI_HANDLER
1024 if (preaction_val == WDOG_PRETIMEOUT_NMI) 1175 if (nmi_handler_registered)
1025 release_nmi(&ipmi_nmi_handler); 1176 release_nmi(&ipmi_nmi_handler);
1026#endif 1177#endif
1027 1178