aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 20:19:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 20:19:28 -0400
commit532bfc851a7475fb6a36c1e953aa395798a7cca7 (patch)
treea7892e5a31330dd59f31959efbe9fda1803784fd /drivers
parent0195c00244dc2e9f522475868fa278c473ba7339 (diff)
parent8da00edc1069f01c34510fa405dc15d96c090a3f (diff)
Merge branch 'akpm' (Andrew's patch-bomb)
Merge third batch of patches from Andrew Morton: - Some MM stragglers - core SMP library cleanups (on_each_cpu_mask) - Some IPI optimisations - kexec - kdump - IPMI - the radix-tree iterator work - various other misc bits. "That'll do for -rc1. I still have ~10 patches for 3.4, will send those along when they've baked a little more." * emailed from Andrew Morton <akpm@linux-foundation.org>: (35 commits) backlight: fix typo in tosa_lcd.c crc32: add help text for the algorithm select option mm: move hugepage test examples to tools/testing/selftests/vm mm: move slabinfo.c to tools/vm mm: move page-types.c from Documentation to tools/vm selftests/Makefile: make `run_tests' depend on `all' selftests: launch individual selftests from the main Makefile radix-tree: use iterators in find_get_pages* functions radix-tree: rewrite gang lookup using iterator radix-tree: introduce bit-optimized iterator fs/proc/namespaces.c: prevent crash when ns_entries[] is empty nbd: rename the nbd_device variable from lo to nbd pidns: add reboot_pid_ns() to handle the reboot syscall sysctl: use bitmap library functions ipmi: use locks on watchdog timeout set on reboot ipmi: simplify locking ipmi: fix message handling during panics ipmi: use a tasklet for handling received messages ipmi: increase KCS timeouts ipmi: decrease the IPMI message transaction time in interrupt mode ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/nbd.c295
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c242
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c72
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c21
-rw-r--r--drivers/video/backlight/tosa_lcd.c2
6 files changed, 320 insertions, 316 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c7ba11f9b203..061427a75d37 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -38,7 +38,7 @@
38 38
39#include <linux/nbd.h> 39#include <linux/nbd.h>
40 40
41#define LO_MAGIC 0x68797548 41#define NBD_MAGIC 0x68797548
42 42
43#ifdef NDEBUG 43#ifdef NDEBUG
44#define dprintk(flags, fmt...) 44#define dprintk(flags, fmt...)
@@ -115,7 +115,7 @@ static void nbd_end_request(struct request *req)
115 spin_unlock_irqrestore(q->queue_lock, flags); 115 spin_unlock_irqrestore(q->queue_lock, flags);
116} 116}
117 117
118static void sock_shutdown(struct nbd_device *lo, int lock) 118static void sock_shutdown(struct nbd_device *nbd, int lock)
119{ 119{
120 /* Forcibly shutdown the socket causing all listeners 120 /* Forcibly shutdown the socket causing all listeners
121 * to error 121 * to error
@@ -124,14 +124,14 @@ static void sock_shutdown(struct nbd_device *lo, int lock)
124 * there should be a more generic interface rather than 124 * there should be a more generic interface rather than
125 * calling socket ops directly here */ 125 * calling socket ops directly here */
126 if (lock) 126 if (lock)
127 mutex_lock(&lo->tx_lock); 127 mutex_lock(&nbd->tx_lock);
128 if (lo->sock) { 128 if (nbd->sock) {
129 dev_warn(disk_to_dev(lo->disk), "shutting down socket\n"); 129 dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
130 kernel_sock_shutdown(lo->sock, SHUT_RDWR); 130 kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
131 lo->sock = NULL; 131 nbd->sock = NULL;
132 } 132 }
133 if (lock) 133 if (lock)
134 mutex_unlock(&lo->tx_lock); 134 mutex_unlock(&nbd->tx_lock);
135} 135}
136 136
137static void nbd_xmit_timeout(unsigned long arg) 137static void nbd_xmit_timeout(unsigned long arg)
@@ -146,17 +146,17 @@ static void nbd_xmit_timeout(unsigned long arg)
146/* 146/*
147 * Send or receive packet. 147 * Send or receive packet.
148 */ 148 */
149static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, 149static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size,
150 int msg_flags) 150 int msg_flags)
151{ 151{
152 struct socket *sock = lo->sock; 152 struct socket *sock = nbd->sock;
153 int result; 153 int result;
154 struct msghdr msg; 154 struct msghdr msg;
155 struct kvec iov; 155 struct kvec iov;
156 sigset_t blocked, oldset; 156 sigset_t blocked, oldset;
157 157
158 if (unlikely(!sock)) { 158 if (unlikely(!sock)) {
159 dev_err(disk_to_dev(lo->disk), 159 dev_err(disk_to_dev(nbd->disk),
160 "Attempted %s on closed socket in sock_xmit\n", 160 "Attempted %s on closed socket in sock_xmit\n",
161 (send ? "send" : "recv")); 161 (send ? "send" : "recv"));
162 return -EINVAL; 162 return -EINVAL;
@@ -180,15 +180,15 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
180 if (send) { 180 if (send) {
181 struct timer_list ti; 181 struct timer_list ti;
182 182
183 if (lo->xmit_timeout) { 183 if (nbd->xmit_timeout) {
184 init_timer(&ti); 184 init_timer(&ti);
185 ti.function = nbd_xmit_timeout; 185 ti.function = nbd_xmit_timeout;
186 ti.data = (unsigned long)current; 186 ti.data = (unsigned long)current;
187 ti.expires = jiffies + lo->xmit_timeout; 187 ti.expires = jiffies + nbd->xmit_timeout;
188 add_timer(&ti); 188 add_timer(&ti);
189 } 189 }
190 result = kernel_sendmsg(sock, &msg, &iov, 1, size); 190 result = kernel_sendmsg(sock, &msg, &iov, 1, size);
191 if (lo->xmit_timeout) 191 if (nbd->xmit_timeout)
192 del_timer_sync(&ti); 192 del_timer_sync(&ti);
193 } else 193 } else
194 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 194 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
@@ -200,7 +200,7 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
200 task_pid_nr(current), current->comm, 200 task_pid_nr(current), current->comm,
201 dequeue_signal_lock(current, &current->blocked, &info)); 201 dequeue_signal_lock(current, &current->blocked, &info));
202 result = -EINTR; 202 result = -EINTR;
203 sock_shutdown(lo, !send); 203 sock_shutdown(nbd, !send);
204 break; 204 break;
205 } 205 }
206 206
@@ -218,18 +218,19 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
218 return result; 218 return result;
219} 219}
220 220
221static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec, 221static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec,
222 int flags) 222 int flags)
223{ 223{
224 int result; 224 int result;
225 void *kaddr = kmap(bvec->bv_page); 225 void *kaddr = kmap(bvec->bv_page);
226 result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); 226 result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset,
227 bvec->bv_len, flags);
227 kunmap(bvec->bv_page); 228 kunmap(bvec->bv_page);
228 return result; 229 return result;
229} 230}
230 231
231/* always call with the tx_lock held */ 232/* always call with the tx_lock held */
232static int nbd_send_req(struct nbd_device *lo, struct request *req) 233static int nbd_send_req(struct nbd_device *nbd, struct request *req)
233{ 234{
234 int result, flags; 235 int result, flags;
235 struct nbd_request request; 236 struct nbd_request request;
@@ -242,14 +243,14 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
242 memcpy(request.handle, &req, sizeof(req)); 243 memcpy(request.handle, &req, sizeof(req));
243 244
244 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 245 dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
245 lo->disk->disk_name, req, 246 nbd->disk->disk_name, req,
246 nbdcmd_to_ascii(nbd_cmd(req)), 247 nbdcmd_to_ascii(nbd_cmd(req)),
247 (unsigned long long)blk_rq_pos(req) << 9, 248 (unsigned long long)blk_rq_pos(req) << 9,
248 blk_rq_bytes(req)); 249 blk_rq_bytes(req));
249 result = sock_xmit(lo, 1, &request, sizeof(request), 250 result = sock_xmit(nbd, 1, &request, sizeof(request),
250 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 251 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
251 if (result <= 0) { 252 if (result <= 0) {
252 dev_err(disk_to_dev(lo->disk), 253 dev_err(disk_to_dev(nbd->disk),
253 "Send control failed (result %d)\n", result); 254 "Send control failed (result %d)\n", result);
254 goto error_out; 255 goto error_out;
255 } 256 }
@@ -266,10 +267,10 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
266 if (!rq_iter_last(req, iter)) 267 if (!rq_iter_last(req, iter))
267 flags = MSG_MORE; 268 flags = MSG_MORE;
268 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 269 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
269 lo->disk->disk_name, req, bvec->bv_len); 270 nbd->disk->disk_name, req, bvec->bv_len);
270 result = sock_send_bvec(lo, bvec, flags); 271 result = sock_send_bvec(nbd, bvec, flags);
271 if (result <= 0) { 272 if (result <= 0) {
272 dev_err(disk_to_dev(lo->disk), 273 dev_err(disk_to_dev(nbd->disk),
273 "Send data failed (result %d)\n", 274 "Send data failed (result %d)\n",
274 result); 275 result);
275 goto error_out; 276 goto error_out;
@@ -282,25 +283,25 @@ error_out:
282 return -EIO; 283 return -EIO;
283} 284}
284 285
285static struct request *nbd_find_request(struct nbd_device *lo, 286static struct request *nbd_find_request(struct nbd_device *nbd,
286 struct request *xreq) 287 struct request *xreq)
287{ 288{
288 struct request *req, *tmp; 289 struct request *req, *tmp;
289 int err; 290 int err;
290 291
291 err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); 292 err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq);
292 if (unlikely(err)) 293 if (unlikely(err))
293 goto out; 294 goto out;
294 295
295 spin_lock(&lo->queue_lock); 296 spin_lock(&nbd->queue_lock);
296 list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) { 297 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) {
297 if (req != xreq) 298 if (req != xreq)
298 continue; 299 continue;
299 list_del_init(&req->queuelist); 300 list_del_init(&req->queuelist);
300 spin_unlock(&lo->queue_lock); 301 spin_unlock(&nbd->queue_lock);
301 return req; 302 return req;
302 } 303 }
303 spin_unlock(&lo->queue_lock); 304 spin_unlock(&nbd->queue_lock);
304 305
305 err = -ENOENT; 306 err = -ENOENT;
306 307
@@ -308,78 +309,78 @@ out:
308 return ERR_PTR(err); 309 return ERR_PTR(err);
309} 310}
310 311
311static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec) 312static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec)
312{ 313{
313 int result; 314 int result;
314 void *kaddr = kmap(bvec->bv_page); 315 void *kaddr = kmap(bvec->bv_page);
315 result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len, 316 result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len,
316 MSG_WAITALL); 317 MSG_WAITALL);
317 kunmap(bvec->bv_page); 318 kunmap(bvec->bv_page);
318 return result; 319 return result;
319} 320}
320 321
321/* NULL returned = something went wrong, inform userspace */ 322/* NULL returned = something went wrong, inform userspace */
322static struct request *nbd_read_stat(struct nbd_device *lo) 323static struct request *nbd_read_stat(struct nbd_device *nbd)
323{ 324{
324 int result; 325 int result;
325 struct nbd_reply reply; 326 struct nbd_reply reply;
326 struct request *req; 327 struct request *req;
327 328
328 reply.magic = 0; 329 reply.magic = 0;
329 result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL); 330 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL);
330 if (result <= 0) { 331 if (result <= 0) {
331 dev_err(disk_to_dev(lo->disk), 332 dev_err(disk_to_dev(nbd->disk),
332 "Receive control failed (result %d)\n", result); 333 "Receive control failed (result %d)\n", result);
333 goto harderror; 334 goto harderror;
334 } 335 }
335 336
336 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { 337 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
337 dev_err(disk_to_dev(lo->disk), "Wrong magic (0x%lx)\n", 338 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
338 (unsigned long)ntohl(reply.magic)); 339 (unsigned long)ntohl(reply.magic));
339 result = -EPROTO; 340 result = -EPROTO;
340 goto harderror; 341 goto harderror;
341 } 342 }
342 343
343 req = nbd_find_request(lo, *(struct request **)reply.handle); 344 req = nbd_find_request(nbd, *(struct request **)reply.handle);
344 if (IS_ERR(req)) { 345 if (IS_ERR(req)) {
345 result = PTR_ERR(req); 346 result = PTR_ERR(req);
346 if (result != -ENOENT) 347 if (result != -ENOENT)
347 goto harderror; 348 goto harderror;
348 349
349 dev_err(disk_to_dev(lo->disk), "Unexpected reply (%p)\n", 350 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
350 reply.handle); 351 reply.handle);
351 result = -EBADR; 352 result = -EBADR;
352 goto harderror; 353 goto harderror;
353 } 354 }
354 355
355 if (ntohl(reply.error)) { 356 if (ntohl(reply.error)) {
356 dev_err(disk_to_dev(lo->disk), "Other side returned error (%d)\n", 357 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
357 ntohl(reply.error)); 358 ntohl(reply.error));
358 req->errors++; 359 req->errors++;
359 return req; 360 return req;
360 } 361 }
361 362
362 dprintk(DBG_RX, "%s: request %p: got reply\n", 363 dprintk(DBG_RX, "%s: request %p: got reply\n",
363 lo->disk->disk_name, req); 364 nbd->disk->disk_name, req);
364 if (nbd_cmd(req) == NBD_CMD_READ) { 365 if (nbd_cmd(req) == NBD_CMD_READ) {
365 struct req_iterator iter; 366 struct req_iterator iter;
366 struct bio_vec *bvec; 367 struct bio_vec *bvec;
367 368
368 rq_for_each_segment(bvec, req, iter) { 369 rq_for_each_segment(bvec, req, iter) {
369 result = sock_recv_bvec(lo, bvec); 370 result = sock_recv_bvec(nbd, bvec);
370 if (result <= 0) { 371 if (result <= 0) {
371 dev_err(disk_to_dev(lo->disk), "Receive data failed (result %d)\n", 372 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
372 result); 373 result);
373 req->errors++; 374 req->errors++;
374 return req; 375 return req;
375 } 376 }
376 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 377 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
377 lo->disk->disk_name, req, bvec->bv_len); 378 nbd->disk->disk_name, req, bvec->bv_len);
378 } 379 }
379 } 380 }
380 return req; 381 return req;
381harderror: 382harderror:
382 lo->harderror = result; 383 nbd->harderror = result;
383 return NULL; 384 return NULL;
384} 385}
385 386
@@ -397,48 +398,48 @@ static struct device_attribute pid_attr = {
397 .show = pid_show, 398 .show = pid_show,
398}; 399};
399 400
400static int nbd_do_it(struct nbd_device *lo) 401static int nbd_do_it(struct nbd_device *nbd)
401{ 402{
402 struct request *req; 403 struct request *req;
403 int ret; 404 int ret;
404 405
405 BUG_ON(lo->magic != LO_MAGIC); 406 BUG_ON(nbd->magic != NBD_MAGIC);
406 407
407 lo->pid = task_pid_nr(current); 408 nbd->pid = task_pid_nr(current);
408 ret = device_create_file(disk_to_dev(lo->disk), &pid_attr); 409 ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
409 if (ret) { 410 if (ret) {
410 dev_err(disk_to_dev(lo->disk), "device_create_file failed!\n"); 411 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
411 lo->pid = 0; 412 nbd->pid = 0;
412 return ret; 413 return ret;
413 } 414 }
414 415
415 while ((req = nbd_read_stat(lo)) != NULL) 416 while ((req = nbd_read_stat(nbd)) != NULL)
416 nbd_end_request(req); 417 nbd_end_request(req);
417 418
418 device_remove_file(disk_to_dev(lo->disk), &pid_attr); 419 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
419 lo->pid = 0; 420 nbd->pid = 0;
420 return 0; 421 return 0;
421} 422}
422 423
423static void nbd_clear_que(struct nbd_device *lo) 424static void nbd_clear_que(struct nbd_device *nbd)
424{ 425{
425 struct request *req; 426 struct request *req;
426 427
427 BUG_ON(lo->magic != LO_MAGIC); 428 BUG_ON(nbd->magic != NBD_MAGIC);
428 429
429 /* 430 /*
430 * Because we have set lo->sock to NULL under the tx_lock, all 431 * Because we have set nbd->sock to NULL under the tx_lock, all
431 * modifications to the list must have completed by now. For 432 * modifications to the list must have completed by now. For
432 * the same reason, the active_req must be NULL. 433 * the same reason, the active_req must be NULL.
433 * 434 *
434 * As a consequence, we don't need to take the spin lock while 435 * As a consequence, we don't need to take the spin lock while
435 * purging the list here. 436 * purging the list here.
436 */ 437 */
437 BUG_ON(lo->sock); 438 BUG_ON(nbd->sock);
438 BUG_ON(lo->active_req); 439 BUG_ON(nbd->active_req);
439 440
440 while (!list_empty(&lo->queue_head)) { 441 while (!list_empty(&nbd->queue_head)) {
441 req = list_entry(lo->queue_head.next, struct request, 442 req = list_entry(nbd->queue_head.next, struct request,
442 queuelist); 443 queuelist);
443 list_del_init(&req->queuelist); 444 list_del_init(&req->queuelist);
444 req->errors++; 445 req->errors++;
@@ -447,7 +448,7 @@ static void nbd_clear_que(struct nbd_device *lo)
447} 448}
448 449
449 450
450static void nbd_handle_req(struct nbd_device *lo, struct request *req) 451static void nbd_handle_req(struct nbd_device *nbd, struct request *req)
451{ 452{
452 if (req->cmd_type != REQ_TYPE_FS) 453 if (req->cmd_type != REQ_TYPE_FS)
453 goto error_out; 454 goto error_out;
@@ -455,8 +456,8 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req)
455 nbd_cmd(req) = NBD_CMD_READ; 456 nbd_cmd(req) = NBD_CMD_READ;
456 if (rq_data_dir(req) == WRITE) { 457 if (rq_data_dir(req) == WRITE) {
457 nbd_cmd(req) = NBD_CMD_WRITE; 458 nbd_cmd(req) = NBD_CMD_WRITE;
458 if (lo->flags & NBD_READ_ONLY) { 459 if (nbd->flags & NBD_READ_ONLY) {
459 dev_err(disk_to_dev(lo->disk), 460 dev_err(disk_to_dev(nbd->disk),
460 "Write on read-only\n"); 461 "Write on read-only\n");
461 goto error_out; 462 goto error_out;
462 } 463 }
@@ -464,29 +465,29 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req)
464 465
465 req->errors = 0; 466 req->errors = 0;
466 467
467 mutex_lock(&lo->tx_lock); 468 mutex_lock(&nbd->tx_lock);
468 if (unlikely(!lo->sock)) { 469 if (unlikely(!nbd->sock)) {
469 mutex_unlock(&lo->tx_lock); 470 mutex_unlock(&nbd->tx_lock);
470 dev_err(disk_to_dev(lo->disk), 471 dev_err(disk_to_dev(nbd->disk),
471 "Attempted send on closed socket\n"); 472 "Attempted send on closed socket\n");
472 goto error_out; 473 goto error_out;
473 } 474 }
474 475
475 lo->active_req = req; 476 nbd->active_req = req;
476 477
477 if (nbd_send_req(lo, req) != 0) { 478 if (nbd_send_req(nbd, req) != 0) {
478 dev_err(disk_to_dev(lo->disk), "Request send failed\n"); 479 dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
479 req->errors++; 480 req->errors++;
480 nbd_end_request(req); 481 nbd_end_request(req);
481 } else { 482 } else {
482 spin_lock(&lo->queue_lock); 483 spin_lock(&nbd->queue_lock);
483 list_add(&req->queuelist, &lo->queue_head); 484 list_add(&req->queuelist, &nbd->queue_head);
484 spin_unlock(&lo->queue_lock); 485 spin_unlock(&nbd->queue_lock);
485 } 486 }
486 487
487 lo->active_req = NULL; 488 nbd->active_req = NULL;
488 mutex_unlock(&lo->tx_lock); 489 mutex_unlock(&nbd->tx_lock);
489 wake_up_all(&lo->active_wq); 490 wake_up_all(&nbd->active_wq);
490 491
491 return; 492 return;
492 493
@@ -497,28 +498,28 @@ error_out:
497 498
498static int nbd_thread(void *data) 499static int nbd_thread(void *data)
499{ 500{
500 struct nbd_device *lo = data; 501 struct nbd_device *nbd = data;
501 struct request *req; 502 struct request *req;
502 503
503 set_user_nice(current, -20); 504 set_user_nice(current, -20);
504 while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) { 505 while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
505 /* wait for something to do */ 506 /* wait for something to do */
506 wait_event_interruptible(lo->waiting_wq, 507 wait_event_interruptible(nbd->waiting_wq,
507 kthread_should_stop() || 508 kthread_should_stop() ||
508 !list_empty(&lo->waiting_queue)); 509 !list_empty(&nbd->waiting_queue));
509 510
510 /* extract request */ 511 /* extract request */
511 if (list_empty(&lo->waiting_queue)) 512 if (list_empty(&nbd->waiting_queue))
512 continue; 513 continue;
513 514
514 spin_lock_irq(&lo->queue_lock); 515 spin_lock_irq(&nbd->queue_lock);
515 req = list_entry(lo->waiting_queue.next, struct request, 516 req = list_entry(nbd->waiting_queue.next, struct request,
516 queuelist); 517 queuelist);
517 list_del_init(&req->queuelist); 518 list_del_init(&req->queuelist);
518 spin_unlock_irq(&lo->queue_lock); 519 spin_unlock_irq(&nbd->queue_lock);
519 520
520 /* handle request */ 521 /* handle request */
521 nbd_handle_req(lo, req); 522 nbd_handle_req(nbd, req);
522 } 523 }
523 return 0; 524 return 0;
524} 525}
@@ -526,7 +527,7 @@ static int nbd_thread(void *data)
526/* 527/*
527 * We always wait for result of write, for now. It would be nice to make it optional 528 * We always wait for result of write, for now. It would be nice to make it optional
528 * in future 529 * in future
529 * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK)) 530 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
530 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } 531 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
531 */ 532 */
532 533
@@ -535,19 +536,19 @@ static void do_nbd_request(struct request_queue *q)
535 struct request *req; 536 struct request *req;
536 537
537 while ((req = blk_fetch_request(q)) != NULL) { 538 while ((req = blk_fetch_request(q)) != NULL) {
538 struct nbd_device *lo; 539 struct nbd_device *nbd;
539 540
540 spin_unlock_irq(q->queue_lock); 541 spin_unlock_irq(q->queue_lock);
541 542
542 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", 543 dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
543 req->rq_disk->disk_name, req, req->cmd_type); 544 req->rq_disk->disk_name, req, req->cmd_type);
544 545
545 lo = req->rq_disk->private_data; 546 nbd = req->rq_disk->private_data;
546 547
547 BUG_ON(lo->magic != LO_MAGIC); 548 BUG_ON(nbd->magic != NBD_MAGIC);
548 549
549 if (unlikely(!lo->sock)) { 550 if (unlikely(!nbd->sock)) {
550 dev_err(disk_to_dev(lo->disk), 551 dev_err(disk_to_dev(nbd->disk),
551 "Attempted send on closed socket\n"); 552 "Attempted send on closed socket\n");
552 req->errors++; 553 req->errors++;
553 nbd_end_request(req); 554 nbd_end_request(req);
@@ -555,11 +556,11 @@ static void do_nbd_request(struct request_queue *q)
555 continue; 556 continue;
556 } 557 }
557 558
558 spin_lock_irq(&lo->queue_lock); 559 spin_lock_irq(&nbd->queue_lock);
559 list_add_tail(&req->queuelist, &lo->waiting_queue); 560 list_add_tail(&req->queuelist, &nbd->waiting_queue);
560 spin_unlock_irq(&lo->queue_lock); 561 spin_unlock_irq(&nbd->queue_lock);
561 562
562 wake_up(&lo->waiting_wq); 563 wake_up(&nbd->waiting_wq);
563 564
564 spin_lock_irq(q->queue_lock); 565 spin_lock_irq(q->queue_lock);
565 } 566 }
@@ -567,32 +568,32 @@ static void do_nbd_request(struct request_queue *q)
567 568
568/* Must be called with tx_lock held */ 569/* Must be called with tx_lock held */
569 570
570static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, 571static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
571 unsigned int cmd, unsigned long arg) 572 unsigned int cmd, unsigned long arg)
572{ 573{
573 switch (cmd) { 574 switch (cmd) {
574 case NBD_DISCONNECT: { 575 case NBD_DISCONNECT: {
575 struct request sreq; 576 struct request sreq;
576 577
577 dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n"); 578 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
578 579
579 blk_rq_init(NULL, &sreq); 580 blk_rq_init(NULL, &sreq);
580 sreq.cmd_type = REQ_TYPE_SPECIAL; 581 sreq.cmd_type = REQ_TYPE_SPECIAL;
581 nbd_cmd(&sreq) = NBD_CMD_DISC; 582 nbd_cmd(&sreq) = NBD_CMD_DISC;
582 if (!lo->sock) 583 if (!nbd->sock)
583 return -EINVAL; 584 return -EINVAL;
584 nbd_send_req(lo, &sreq); 585 nbd_send_req(nbd, &sreq);
585 return 0; 586 return 0;
586 } 587 }
587 588
588 case NBD_CLEAR_SOCK: { 589 case NBD_CLEAR_SOCK: {
589 struct file *file; 590 struct file *file;
590 591
591 lo->sock = NULL; 592 nbd->sock = NULL;
592 file = lo->file; 593 file = nbd->file;
593 lo->file = NULL; 594 nbd->file = NULL;
594 nbd_clear_que(lo); 595 nbd_clear_que(nbd);
595 BUG_ON(!list_empty(&lo->queue_head)); 596 BUG_ON(!list_empty(&nbd->queue_head));
596 if (file) 597 if (file)
597 fput(file); 598 fput(file);
598 return 0; 599 return 0;
@@ -600,14 +601,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
600 601
601 case NBD_SET_SOCK: { 602 case NBD_SET_SOCK: {
602 struct file *file; 603 struct file *file;
603 if (lo->file) 604 if (nbd->file)
604 return -EBUSY; 605 return -EBUSY;
605 file = fget(arg); 606 file = fget(arg);
606 if (file) { 607 if (file) {
607 struct inode *inode = file->f_path.dentry->d_inode; 608 struct inode *inode = file->f_path.dentry->d_inode;
608 if (S_ISSOCK(inode->i_mode)) { 609 if (S_ISSOCK(inode->i_mode)) {
609 lo->file = file; 610 nbd->file = file;
610 lo->sock = SOCKET_I(inode); 611 nbd->sock = SOCKET_I(inode);
611 if (max_part > 0) 612 if (max_part > 0)
612 bdev->bd_invalidated = 1; 613 bdev->bd_invalidated = 1;
613 return 0; 614 return 0;
@@ -619,29 +620,29 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
619 } 620 }
620 621
621 case NBD_SET_BLKSIZE: 622 case NBD_SET_BLKSIZE:
622 lo->blksize = arg; 623 nbd->blksize = arg;
623 lo->bytesize &= ~(lo->blksize-1); 624 nbd->bytesize &= ~(nbd->blksize-1);
624 bdev->bd_inode->i_size = lo->bytesize; 625 bdev->bd_inode->i_size = nbd->bytesize;
625 set_blocksize(bdev, lo->blksize); 626 set_blocksize(bdev, nbd->blksize);
626 set_capacity(lo->disk, lo->bytesize >> 9); 627 set_capacity(nbd->disk, nbd->bytesize >> 9);
627 return 0; 628 return 0;
628 629
629 case NBD_SET_SIZE: 630 case NBD_SET_SIZE:
630 lo->bytesize = arg & ~(lo->blksize-1); 631 nbd->bytesize = arg & ~(nbd->blksize-1);
631 bdev->bd_inode->i_size = lo->bytesize; 632 bdev->bd_inode->i_size = nbd->bytesize;
632 set_blocksize(bdev, lo->blksize); 633 set_blocksize(bdev, nbd->blksize);
633 set_capacity(lo->disk, lo->bytesize >> 9); 634 set_capacity(nbd->disk, nbd->bytesize >> 9);
634 return 0; 635 return 0;
635 636
636 case NBD_SET_TIMEOUT: 637 case NBD_SET_TIMEOUT:
637 lo->xmit_timeout = arg * HZ; 638 nbd->xmit_timeout = arg * HZ;
638 return 0; 639 return 0;
639 640
640 case NBD_SET_SIZE_BLOCKS: 641 case NBD_SET_SIZE_BLOCKS:
641 lo->bytesize = ((u64) arg) * lo->blksize; 642 nbd->bytesize = ((u64) arg) * nbd->blksize;
642 bdev->bd_inode->i_size = lo->bytesize; 643 bdev->bd_inode->i_size = nbd->bytesize;
643 set_blocksize(bdev, lo->blksize); 644 set_blocksize(bdev, nbd->blksize);
644 set_capacity(lo->disk, lo->bytesize >> 9); 645 set_capacity(nbd->disk, nbd->bytesize >> 9);
645 return 0; 646 return 0;
646 647
647 case NBD_DO_IT: { 648 case NBD_DO_IT: {
@@ -649,38 +650,38 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
649 struct file *file; 650 struct file *file;
650 int error; 651 int error;
651 652
652 if (lo->pid) 653 if (nbd->pid)
653 return -EBUSY; 654 return -EBUSY;
654 if (!lo->file) 655 if (!nbd->file)
655 return -EINVAL; 656 return -EINVAL;
656 657
657 mutex_unlock(&lo->tx_lock); 658 mutex_unlock(&nbd->tx_lock);
658 659
659 thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); 660 thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name);
660 if (IS_ERR(thread)) { 661 if (IS_ERR(thread)) {
661 mutex_lock(&lo->tx_lock); 662 mutex_lock(&nbd->tx_lock);
662 return PTR_ERR(thread); 663 return PTR_ERR(thread);
663 } 664 }
664 wake_up_process(thread); 665 wake_up_process(thread);
665 error = nbd_do_it(lo); 666 error = nbd_do_it(nbd);
666 kthread_stop(thread); 667 kthread_stop(thread);
667 668
668 mutex_lock(&lo->tx_lock); 669 mutex_lock(&nbd->tx_lock);
669 if (error) 670 if (error)
670 return error; 671 return error;
671 sock_shutdown(lo, 0); 672 sock_shutdown(nbd, 0);
672 file = lo->file; 673 file = nbd->file;
673 lo->file = NULL; 674 nbd->file = NULL;
674 nbd_clear_que(lo); 675 nbd_clear_que(nbd);
675 dev_warn(disk_to_dev(lo->disk), "queue cleared\n"); 676 dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
676 if (file) 677 if (file)
677 fput(file); 678 fput(file);
678 lo->bytesize = 0; 679 nbd->bytesize = 0;
679 bdev->bd_inode->i_size = 0; 680 bdev->bd_inode->i_size = 0;
680 set_capacity(lo->disk, 0); 681 set_capacity(nbd->disk, 0);
681 if (max_part > 0) 682 if (max_part > 0)
682 ioctl_by_bdev(bdev, BLKRRPART, 0); 683 ioctl_by_bdev(bdev, BLKRRPART, 0);
683 return lo->harderror; 684 return nbd->harderror;
684 } 685 }
685 686
686 case NBD_CLEAR_QUE: 687 case NBD_CLEAR_QUE:
@@ -688,14 +689,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
688 * This is for compatibility only. The queue is always cleared 689 * This is for compatibility only. The queue is always cleared
689 * by NBD_DO_IT or NBD_CLEAR_SOCK. 690 * by NBD_DO_IT or NBD_CLEAR_SOCK.
690 */ 691 */
691 BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); 692 BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head));
692 return 0; 693 return 0;
693 694
694 case NBD_PRINT_DEBUG: 695 case NBD_PRINT_DEBUG:
695 dev_info(disk_to_dev(lo->disk), 696 dev_info(disk_to_dev(nbd->disk),
696 "next = %p, prev = %p, head = %p\n", 697 "next = %p, prev = %p, head = %p\n",
697 lo->queue_head.next, lo->queue_head.prev, 698 nbd->queue_head.next, nbd->queue_head.prev,
698 &lo->queue_head); 699 &nbd->queue_head);
699 return 0; 700 return 0;
700 } 701 }
701 return -ENOTTY; 702 return -ENOTTY;
@@ -704,21 +705,21 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
704static int nbd_ioctl(struct block_device *bdev, fmode_t mode, 705static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
705 unsigned int cmd, unsigned long arg) 706 unsigned int cmd, unsigned long arg)
706{ 707{
707 struct nbd_device *lo = bdev->bd_disk->private_data; 708 struct nbd_device *nbd = bdev->bd_disk->private_data;
708 int error; 709 int error;
709 710
710 if (!capable(CAP_SYS_ADMIN)) 711 if (!capable(CAP_SYS_ADMIN))
711 return -EPERM; 712 return -EPERM;
712 713
713 BUG_ON(lo->magic != LO_MAGIC); 714 BUG_ON(nbd->magic != NBD_MAGIC);
714 715
715 /* Anyone capable of this syscall can do *real bad* things */ 716 /* Anyone capable of this syscall can do *real bad* things */
716 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", 717 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
717 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); 718 nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
718 719
719 mutex_lock(&lo->tx_lock); 720 mutex_lock(&nbd->tx_lock);
720 error = __nbd_ioctl(bdev, lo, cmd, arg); 721 error = __nbd_ioctl(bdev, nbd, cmd, arg);
721 mutex_unlock(&lo->tx_lock); 722 mutex_unlock(&nbd->tx_lock);
722 723
723 return error; 724 return error;
724} 725}
@@ -804,7 +805,7 @@ static int __init nbd_init(void)
804 for (i = 0; i < nbds_max; i++) { 805 for (i = 0; i < nbds_max; i++) {
805 struct gendisk *disk = nbd_dev[i].disk; 806 struct gendisk *disk = nbd_dev[i].disk;
806 nbd_dev[i].file = NULL; 807 nbd_dev[i].file = NULL;
807 nbd_dev[i].magic = LO_MAGIC; 808 nbd_dev[i].magic = NBD_MAGIC;
808 nbd_dev[i].flags = 0; 809 nbd_dev[i].flags = 0;
809 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); 810 INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
810 spin_lock_init(&nbd_dev[i].queue_lock); 811 spin_lock_init(&nbd_dev[i].queue_lock);
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index cf82fedae099..e53fc24c6af3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
118#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH 118#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
119 119
120/* Timeouts in microseconds. */ 120/* Timeouts in microseconds. */
121#define IBF_RETRY_TIMEOUT 1000000 121#define IBF_RETRY_TIMEOUT 5000000
122#define OBF_RETRY_TIMEOUT 1000000 122#define OBF_RETRY_TIMEOUT 5000000
123#define MAX_ERROR_RETRIES 10 123#define MAX_ERROR_RETRIES 10
124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ) 124#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
125 125
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c90e9390b78c..2c29942b1326 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/rcupdate.h> 47#include <linux/rcupdate.h>
48#include <linux/interrupt.h>
48 49
49#define PFX "IPMI message handler: " 50#define PFX "IPMI message handler: "
50 51
@@ -52,6 +53,8 @@
52 53
53static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); 54static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
54static int ipmi_init_msghandler(void); 55static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf);
55 58
56static int initialized; 59static int initialized;
57 60
@@ -354,12 +357,15 @@ struct ipmi_smi {
354 int curr_seq; 357 int curr_seq;
355 358
356 /* 359 /*
357 * Messages that were delayed for some reason (out of memory, 360 * Messages queued for delivery. If delivery fails (out of memory
358 * for instance), will go in here to be processed later in a 361 * for instance), They will stay in here to be processed later in a
359 * periodic timer interrupt. 362 * periodic timer interrupt. The tasklet is for handling received
363 * messages directly from the handler.
360 */ 364 */
361 spinlock_t waiting_msgs_lock; 365 spinlock_t waiting_msgs_lock;
362 struct list_head waiting_msgs; 366 struct list_head waiting_msgs;
367 atomic_t watchdog_pretimeouts_to_deliver;
368 struct tasklet_struct recv_tasklet;
363 369
364 /* 370 /*
365 * The list of command receivers that are registered for commands 371 * The list of command receivers that are registered for commands
@@ -492,6 +498,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
492 struct cmd_rcvr *rcvr, *rcvr2; 498 struct cmd_rcvr *rcvr, *rcvr2;
493 struct list_head list; 499 struct list_head list;
494 500
501 tasklet_kill(&intf->recv_tasklet);
502
495 free_smi_msg_list(&intf->waiting_msgs); 503 free_smi_msg_list(&intf->waiting_msgs);
496 free_recv_msg_list(&intf->waiting_events); 504 free_recv_msg_list(&intf->waiting_events);
497 505
@@ -2785,12 +2793,17 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2785 return; 2793 return;
2786} 2794}
2787 2795
2788void ipmi_poll_interface(ipmi_user_t user) 2796static void ipmi_poll(ipmi_smi_t intf)
2789{ 2797{
2790 ipmi_smi_t intf = user->intf;
2791
2792 if (intf->handlers->poll) 2798 if (intf->handlers->poll)
2793 intf->handlers->poll(intf->send_info); 2799 intf->handlers->poll(intf->send_info);
2800 /* In case something came in */
2801 handle_new_recv_msgs(intf);
2802}
2803
2804void ipmi_poll_interface(ipmi_user_t user)
2805{
2806 ipmi_poll(user->intf);
2794} 2807}
2795EXPORT_SYMBOL(ipmi_poll_interface); 2808EXPORT_SYMBOL(ipmi_poll_interface);
2796 2809
@@ -2859,6 +2872,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2859#endif 2872#endif
2860 spin_lock_init(&intf->waiting_msgs_lock); 2873 spin_lock_init(&intf->waiting_msgs_lock);
2861 INIT_LIST_HEAD(&intf->waiting_msgs); 2874 INIT_LIST_HEAD(&intf->waiting_msgs);
2875 tasklet_init(&intf->recv_tasklet,
2876 smi_recv_tasklet,
2877 (unsigned long) intf);
2878 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2862 spin_lock_init(&intf->events_lock); 2879 spin_lock_init(&intf->events_lock);
2863 INIT_LIST_HEAD(&intf->waiting_events); 2880 INIT_LIST_HEAD(&intf->waiting_events);
2864 intf->waiting_events_count = 0; 2881 intf->waiting_events_count = 0;
@@ -3621,11 +3638,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
3621} 3638}
3622 3639
3623/* 3640/*
3624 * Handle a new message. Return 1 if the message should be requeued, 3641 * Handle a received message. Return 1 if the message should be requeued,
3625 * 0 if the message should be freed, or -1 if the message should not 3642 * 0 if the message should be freed, or -1 if the message should not
3626 * be freed or requeued. 3643 * be freed or requeued.
3627 */ 3644 */
3628static int handle_new_recv_msg(ipmi_smi_t intf, 3645static int handle_one_recv_msg(ipmi_smi_t intf,
3629 struct ipmi_smi_msg *msg) 3646 struct ipmi_smi_msg *msg)
3630{ 3647{
3631 int requeue; 3648 int requeue;
@@ -3783,12 +3800,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
3783 return requeue; 3800 return requeue;
3784} 3801}
3785 3802
3803/*
3804 * If there are messages in the queue or pretimeouts, handle them.
3805 */
3806static void handle_new_recv_msgs(ipmi_smi_t intf)
3807{
3808 struct ipmi_smi_msg *smi_msg;
3809 unsigned long flags = 0;
3810 int rv;
3811 int run_to_completion = intf->run_to_completion;
3812
3813 /* See if any waiting messages need to be processed. */
3814 if (!run_to_completion)
3815 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3816 while (!list_empty(&intf->waiting_msgs)) {
3817 smi_msg = list_entry(intf->waiting_msgs.next,
3818 struct ipmi_smi_msg, link);
3819 list_del(&smi_msg->link);
3820 if (!run_to_completion)
3821 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3822 rv = handle_one_recv_msg(intf, smi_msg);
3823 if (!run_to_completion)
3824 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3825 if (rv == 0) {
3826 /* Message handled */
3827 ipmi_free_smi_msg(smi_msg);
3828 } else if (rv < 0) {
3829 /* Fatal error on the message, del but don't free. */
3830 } else {
3831 /*
3832 * To preserve message order, quit if we
3833 * can't handle a message.
3834 */
3835 list_add(&smi_msg->link, &intf->waiting_msgs);
3836 break;
3837 }
3838 }
3839 if (!run_to_completion)
3840 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3841
3842 /*
3843 * If the pretimout count is non-zero, decrement one from it and
3844 * deliver pretimeouts to all the users.
3845 */
3846 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
3847 ipmi_user_t user;
3848
3849 rcu_read_lock();
3850 list_for_each_entry_rcu(user, &intf->users, link) {
3851 if (user->handler->ipmi_watchdog_pretimeout)
3852 user->handler->ipmi_watchdog_pretimeout(
3853 user->handler_data);
3854 }
3855 rcu_read_unlock();
3856 }
3857}
3858
3859static void smi_recv_tasklet(unsigned long val)
3860{
3861 handle_new_recv_msgs((ipmi_smi_t) val);
3862}
3863
3786/* Handle a new message from the lower layer. */ 3864/* Handle a new message from the lower layer. */
3787void ipmi_smi_msg_received(ipmi_smi_t intf, 3865void ipmi_smi_msg_received(ipmi_smi_t intf,
3788 struct ipmi_smi_msg *msg) 3866 struct ipmi_smi_msg *msg)
3789{ 3867{
3790 unsigned long flags = 0; /* keep us warning-free. */ 3868 unsigned long flags = 0; /* keep us warning-free. */
3791 int rv;
3792 int run_to_completion; 3869 int run_to_completion;
3793 3870
3794 3871
@@ -3842,31 +3919,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3842 run_to_completion = intf->run_to_completion; 3919 run_to_completion = intf->run_to_completion;
3843 if (!run_to_completion) 3920 if (!run_to_completion)
3844 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3921 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3845 if (!list_empty(&intf->waiting_msgs)) { 3922 list_add_tail(&msg->link, &intf->waiting_msgs);
3846 list_add_tail(&msg->link, &intf->waiting_msgs);
3847 if (!run_to_completion)
3848 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3849 goto out;
3850 }
3851 if (!run_to_completion) 3923 if (!run_to_completion)
3852 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3924 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3853 3925
3854 rv = handle_new_recv_msg(intf, msg); 3926 tasklet_schedule(&intf->recv_tasklet);
3855 if (rv > 0) {
3856 /*
3857 * Could not handle the message now, just add it to a
3858 * list to handle later.
3859 */
3860 run_to_completion = intf->run_to_completion;
3861 if (!run_to_completion)
3862 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3863 list_add_tail(&msg->link, &intf->waiting_msgs);
3864 if (!run_to_completion)
3865 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3866 } else if (rv == 0) {
3867 ipmi_free_smi_msg(msg);
3868 }
3869
3870 out: 3927 out:
3871 return; 3928 return;
3872} 3929}
@@ -3874,16 +3931,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received);
3874 3931
3875void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3932void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3876{ 3933{
3877 ipmi_user_t user; 3934 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
3878 3935 tasklet_schedule(&intf->recv_tasklet);
3879 rcu_read_lock();
3880 list_for_each_entry_rcu(user, &intf->users, link) {
3881 if (!user->handler->ipmi_watchdog_pretimeout)
3882 continue;
3883
3884 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3885 }
3886 rcu_read_unlock();
3887} 3936}
3888EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); 3937EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
3889 3938
@@ -3997,28 +4046,12 @@ static void ipmi_timeout_handler(long timeout_period)
3997 ipmi_smi_t intf; 4046 ipmi_smi_t intf;
3998 struct list_head timeouts; 4047 struct list_head timeouts;
3999 struct ipmi_recv_msg *msg, *msg2; 4048 struct ipmi_recv_msg *msg, *msg2;
4000 struct ipmi_smi_msg *smi_msg, *smi_msg2;
4001 unsigned long flags; 4049 unsigned long flags;
4002 int i; 4050 int i;
4003 4051
4004 rcu_read_lock(); 4052 rcu_read_lock();
4005 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4053 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4006 /* See if any waiting messages need to be processed. */ 4054 tasklet_schedule(&intf->recv_tasklet);
4007 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
4008 list_for_each_entry_safe(smi_msg, smi_msg2,
4009 &intf->waiting_msgs, link) {
4010 if (!handle_new_recv_msg(intf, smi_msg)) {
4011 list_del(&smi_msg->link);
4012 ipmi_free_smi_msg(smi_msg);
4013 } else {
4014 /*
4015 * To preserve message order, quit if we
4016 * can't handle a message.
4017 */
4018 break;
4019 }
4020 }
4021 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
4022 4055
4023 /* 4056 /*
4024 * Go through the seq table and find any messages that 4057 * Go through the seq table and find any messages that
@@ -4172,12 +4205,48 @@ EXPORT_SYMBOL(ipmi_free_recv_msg);
4172 4205
4173#ifdef CONFIG_IPMI_PANIC_EVENT 4206#ifdef CONFIG_IPMI_PANIC_EVENT
4174 4207
4208static atomic_t panic_done_count = ATOMIC_INIT(0);
4209
4175static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 4210static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4176{ 4211{
4212 atomic_dec(&panic_done_count);
4177} 4213}
4178 4214
4179static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) 4215static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4180{ 4216{
4217 atomic_dec(&panic_done_count);
4218}
4219
4220/*
4221 * Inside a panic, send a message and wait for a response.
4222 */
4223static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
4224 struct ipmi_addr *addr,
4225 struct kernel_ipmi_msg *msg)
4226{
4227 struct ipmi_smi_msg smi_msg;
4228 struct ipmi_recv_msg recv_msg;
4229 int rv;
4230
4231 smi_msg.done = dummy_smi_done_handler;
4232 recv_msg.done = dummy_recv_done_handler;
4233 atomic_add(2, &panic_done_count);
4234 rv = i_ipmi_request(NULL,
4235 intf,
4236 addr,
4237 0,
4238 msg,
4239 intf,
4240 &smi_msg,
4241 &recv_msg,
4242 0,
4243 intf->channels[0].address,
4244 intf->channels[0].lun,
4245 0, 1); /* Don't retry, and don't wait. */
4246 if (rv)
4247 atomic_sub(2, &panic_done_count);
4248 while (atomic_read(&panic_done_count) != 0)
4249 ipmi_poll(intf);
4181} 4250}
4182 4251
4183#ifdef CONFIG_IPMI_PANIC_STRING 4252#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4216,8 +4285,6 @@ static void send_panic_events(char *str)
4216 unsigned char data[16]; 4285 unsigned char data[16];
4217 struct ipmi_system_interface_addr *si; 4286 struct ipmi_system_interface_addr *si;
4218 struct ipmi_addr addr; 4287 struct ipmi_addr addr;
4219 struct ipmi_smi_msg smi_msg;
4220 struct ipmi_recv_msg recv_msg;
4221 4288
4222 si = (struct ipmi_system_interface_addr *) &addr; 4289 si = (struct ipmi_system_interface_addr *) &addr;
4223 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 4290 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -4245,9 +4312,6 @@ static void send_panic_events(char *str)
4245 data[7] = str[2]; 4312 data[7] = str[2];
4246 } 4313 }
4247 4314
4248 smi_msg.done = dummy_smi_done_handler;
4249 recv_msg.done = dummy_recv_done_handler;
4250
4251 /* For every registered interface, send the event. */ 4315 /* For every registered interface, send the event. */
4252 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 4316 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4253 if (!intf->handlers) 4317 if (!intf->handlers)
@@ -4257,18 +4321,7 @@ static void send_panic_events(char *str)
4257 intf->run_to_completion = 1; 4321 intf->run_to_completion = 1;
4258 /* Send the event announcing the panic. */ 4322 /* Send the event announcing the panic. */
4259 intf->handlers->set_run_to_completion(intf->send_info, 1); 4323 intf->handlers->set_run_to_completion(intf->send_info, 1);
4260 i_ipmi_request(NULL, 4324 ipmi_panic_request_and_wait(intf, &addr, &msg);
4261 intf,
4262 &addr,
4263 0,
4264 &msg,
4265 intf,
4266 &smi_msg,
4267 &recv_msg,
4268 0,
4269 intf->channels[0].address,
4270 intf->channels[0].lun,
4271 0, 1); /* Don't retry, and don't wait. */
4272 } 4325 }
4273 4326
4274#ifdef CONFIG_IPMI_PANIC_STRING 4327#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4316,18 +4369,7 @@ static void send_panic_events(char *str)
4316 msg.data = NULL; 4369 msg.data = NULL;
4317 msg.data_len = 0; 4370 msg.data_len = 0;
4318 intf->null_user_handler = device_id_fetcher; 4371 intf->null_user_handler = device_id_fetcher;
4319 i_ipmi_request(NULL, 4372 ipmi_panic_request_and_wait(intf, &addr, &msg);
4320 intf,
4321 &addr,
4322 0,
4323 &msg,
4324 intf,
4325 &smi_msg,
4326 &recv_msg,
4327 0,
4328 intf->channels[0].address,
4329 intf->channels[0].lun,
4330 0, 1); /* Don't retry, and don't wait. */
4331 4373
4332 if (intf->local_event_generator) { 4374 if (intf->local_event_generator) {
4333 /* Request the event receiver from the local MC. */ 4375 /* Request the event receiver from the local MC. */
@@ -4336,18 +4378,7 @@ static void send_panic_events(char *str)
4336 msg.data = NULL; 4378 msg.data = NULL;
4337 msg.data_len = 0; 4379 msg.data_len = 0;
4338 intf->null_user_handler = event_receiver_fetcher; 4380 intf->null_user_handler = event_receiver_fetcher;
4339 i_ipmi_request(NULL, 4381 ipmi_panic_request_and_wait(intf, &addr, &msg);
4340 intf,
4341 &addr,
4342 0,
4343 &msg,
4344 intf,
4345 &smi_msg,
4346 &recv_msg,
4347 0,
4348 intf->channels[0].address,
4349 intf->channels[0].lun,
4350 0, 1); /* no retry, and no wait. */
4351 } 4382 }
4352 intf->null_user_handler = NULL; 4383 intf->null_user_handler = NULL;
4353 4384
@@ -4404,18 +4435,7 @@ static void send_panic_events(char *str)
4404 strncpy(data+5, p, 11); 4435 strncpy(data+5, p, 11);
4405 p += size; 4436 p += size;
4406 4437
4407 i_ipmi_request(NULL, 4438 ipmi_panic_request_and_wait(intf, &addr, &msg);
4408 intf,
4409 &addr,
4410 0,
4411 &msg,
4412 intf,
4413 &smi_msg,
4414 &recv_msg,
4415 0,
4416 intf->channels[0].address,
4417 intf->channels[0].lun,
4418 0, 1); /* no retry, and no wait. */
4419 } 4439 }
4420 } 4440 }
4421#endif /* CONFIG_IPMI_PANIC_STRING */ 4441#endif /* CONFIG_IPMI_PANIC_STRING */
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index f9fdc114b31d..1e638fff40ea 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -170,7 +170,6 @@ struct smi_info {
170 struct si_sm_handlers *handlers; 170 struct si_sm_handlers *handlers;
171 enum si_type si_type; 171 enum si_type si_type;
172 spinlock_t si_lock; 172 spinlock_t si_lock;
173 spinlock_t msg_lock;
174 struct list_head xmit_msgs; 173 struct list_head xmit_msgs;
175 struct list_head hp_xmit_msgs; 174 struct list_head hp_xmit_msgs;
176 struct ipmi_smi_msg *curr_msg; 175 struct ipmi_smi_msg *curr_msg;
@@ -319,16 +318,8 @@ static int register_xaction_notifier(struct notifier_block *nb)
319static void deliver_recv_msg(struct smi_info *smi_info, 318static void deliver_recv_msg(struct smi_info *smi_info,
320 struct ipmi_smi_msg *msg) 319 struct ipmi_smi_msg *msg)
321{ 320{
322 /* Deliver the message to the upper layer with the lock 321 /* Deliver the message to the upper layer. */
323 released. */ 322 ipmi_smi_msg_received(smi_info->intf, msg);
324
325 if (smi_info->run_to_completion) {
326 ipmi_smi_msg_received(smi_info->intf, msg);
327 } else {
328 spin_unlock(&(smi_info->si_lock));
329 ipmi_smi_msg_received(smi_info->intf, msg);
330 spin_lock(&(smi_info->si_lock));
331 }
332} 323}
333 324
334static void return_hosed_msg(struct smi_info *smi_info, int cCode) 325static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -357,13 +348,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
357 struct timeval t; 348 struct timeval t;
358#endif 349#endif
359 350
360 /*
361 * No need to save flags, we aleady have interrupts off and we
362 * already hold the SMI lock.
363 */
364 if (!smi_info->run_to_completion)
365 spin_lock(&(smi_info->msg_lock));
366
367 /* Pick the high priority queue first. */ 351 /* Pick the high priority queue first. */
368 if (!list_empty(&(smi_info->hp_xmit_msgs))) { 352 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
369 entry = smi_info->hp_xmit_msgs.next; 353 entry = smi_info->hp_xmit_msgs.next;
@@ -401,9 +385,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
401 rv = SI_SM_CALL_WITHOUT_DELAY; 385 rv = SI_SM_CALL_WITHOUT_DELAY;
402 } 386 }
403 out: 387 out:
404 if (!smi_info->run_to_completion)
405 spin_unlock(&(smi_info->msg_lock));
406
407 return rv; 388 return rv;
408} 389}
409 390
@@ -480,9 +461,7 @@ static void handle_flags(struct smi_info *smi_info)
480 461
481 start_clear_flags(smi_info); 462 start_clear_flags(smi_info);
482 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 463 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
483 spin_unlock(&(smi_info->si_lock));
484 ipmi_smi_watchdog_pretimeout(smi_info->intf); 464 ipmi_smi_watchdog_pretimeout(smi_info->intf);
485 spin_lock(&(smi_info->si_lock));
486 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 465 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
487 /* Messages available. */ 466 /* Messages available. */
488 smi_info->curr_msg = ipmi_alloc_smi_msg(); 467 smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -888,19 +867,6 @@ static void sender(void *send_info,
888 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 867 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
889#endif 868#endif
890 869
891 /*
892 * last_timeout_jiffies is updated here to avoid
893 * smi_timeout() handler passing very large time_diff
894 * value to smi_event_handler() that causes
895 * the send command to abort.
896 */
897 smi_info->last_timeout_jiffies = jiffies;
898
899 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
900
901 if (smi_info->thread)
902 wake_up_process(smi_info->thread);
903
904 if (smi_info->run_to_completion) { 870 if (smi_info->run_to_completion) {
905 /* 871 /*
906 * If we are running to completion, then throw it in 872 * If we are running to completion, then throw it in
@@ -923,16 +889,29 @@ static void sender(void *send_info,
923 return; 889 return;
924 } 890 }
925 891
926 spin_lock_irqsave(&smi_info->msg_lock, flags); 892 spin_lock_irqsave(&smi_info->si_lock, flags);
927 if (priority > 0) 893 if (priority > 0)
928 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); 894 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
929 else 895 else
930 list_add_tail(&msg->link, &smi_info->xmit_msgs); 896 list_add_tail(&msg->link, &smi_info->xmit_msgs);
931 spin_unlock_irqrestore(&smi_info->msg_lock, flags);
932 897
933 spin_lock_irqsave(&smi_info->si_lock, flags); 898 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
934 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) 899 /*
900 * last_timeout_jiffies is updated here to avoid
901 * smi_timeout() handler passing very large time_diff
902 * value to smi_event_handler() that causes
903 * the send command to abort.
904 */
905 smi_info->last_timeout_jiffies = jiffies;
906
907 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
908
909 if (smi_info->thread)
910 wake_up_process(smi_info->thread);
911
935 start_next_msg(smi_info); 912 start_next_msg(smi_info);
913 smi_event_handler(smi_info, 0);
914 }
936 spin_unlock_irqrestore(&smi_info->si_lock, flags); 915 spin_unlock_irqrestore(&smi_info->si_lock, flags);
937} 916}
938 917
@@ -1033,16 +1012,19 @@ static int ipmi_thread(void *data)
1033static void poll(void *send_info) 1012static void poll(void *send_info)
1034{ 1013{
1035 struct smi_info *smi_info = send_info; 1014 struct smi_info *smi_info = send_info;
1036 unsigned long flags; 1015 unsigned long flags = 0;
1016 int run_to_completion = smi_info->run_to_completion;
1037 1017
1038 /* 1018 /*
1039 * Make sure there is some delay in the poll loop so we can 1019 * Make sure there is some delay in the poll loop so we can
1040 * drive time forward and timeout things. 1020 * drive time forward and timeout things.
1041 */ 1021 */
1042 udelay(10); 1022 udelay(10);
1043 spin_lock_irqsave(&smi_info->si_lock, flags); 1023 if (!run_to_completion)
1024 spin_lock_irqsave(&smi_info->si_lock, flags);
1044 smi_event_handler(smi_info, 10); 1025 smi_event_handler(smi_info, 10);
1045 spin_unlock_irqrestore(&smi_info->si_lock, flags); 1026 if (!run_to_completion)
1027 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1046} 1028}
1047 1029
1048static void request_events(void *send_info) 1030static void request_events(void *send_info)
@@ -1679,10 +1661,8 @@ static struct smi_info *smi_info_alloc(void)
1679{ 1661{
1680 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); 1662 struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1681 1663
1682 if (info) { 1664 if (info)
1683 spin_lock_init(&info->si_lock); 1665 spin_lock_init(&info->si_lock);
1684 spin_lock_init(&info->msg_lock);
1685 }
1686 return info; 1666 return info;
1687} 1667}
1688 1668
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 020a6aec2d86..7ed356e52035 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -520,6 +520,7 @@ static void panic_halt_ipmi_heartbeat(void)
520 msg.cmd = IPMI_WDOG_RESET_TIMER; 520 msg.cmd = IPMI_WDOG_RESET_TIMER;
521 msg.data = NULL; 521 msg.data = NULL;
522 msg.data_len = 0; 522 msg.data_len = 0;
523 atomic_add(2, &panic_done_count);
523 rv = ipmi_request_supply_msgs(watchdog_user, 524 rv = ipmi_request_supply_msgs(watchdog_user,
524 (struct ipmi_addr *) &addr, 525 (struct ipmi_addr *) &addr,
525 0, 526 0,
@@ -528,8 +529,8 @@ static void panic_halt_ipmi_heartbeat(void)
528 &panic_halt_heartbeat_smi_msg, 529 &panic_halt_heartbeat_smi_msg,
529 &panic_halt_heartbeat_recv_msg, 530 &panic_halt_heartbeat_recv_msg,
530 1); 531 1);
531 if (!rv) 532 if (rv)
532 atomic_add(2, &panic_done_count); 533 atomic_sub(2, &panic_done_count);
533} 534}
534 535
535static struct ipmi_smi_msg panic_halt_smi_msg = { 536static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -553,16 +554,18 @@ static void panic_halt_ipmi_set_timeout(void)
553 /* Wait for the messages to be free. */ 554 /* Wait for the messages to be free. */
554 while (atomic_read(&panic_done_count) != 0) 555 while (atomic_read(&panic_done_count) != 0)
555 ipmi_poll_interface(watchdog_user); 556 ipmi_poll_interface(watchdog_user);
557 atomic_add(2, &panic_done_count);
556 rv = i_ipmi_set_timeout(&panic_halt_smi_msg, 558 rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
557 &panic_halt_recv_msg, 559 &panic_halt_recv_msg,
558 &send_heartbeat_now); 560 &send_heartbeat_now);
559 if (!rv) { 561 if (rv) {
560 atomic_add(2, &panic_done_count); 562 atomic_sub(2, &panic_done_count);
561 if (send_heartbeat_now)
562 panic_halt_ipmi_heartbeat();
563 } else
564 printk(KERN_WARNING PFX 563 printk(KERN_WARNING PFX
565 "Unable to extend the watchdog timeout."); 564 "Unable to extend the watchdog timeout.");
565 } else {
566 if (send_heartbeat_now)
567 panic_halt_ipmi_heartbeat();
568 }
566 while (atomic_read(&panic_done_count) != 0) 569 while (atomic_read(&panic_done_count) != 0)
567 ipmi_poll_interface(watchdog_user); 570 ipmi_poll_interface(watchdog_user);
568} 571}
@@ -1164,7 +1167,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
1164 if (code == SYS_POWER_OFF || code == SYS_HALT) { 1167 if (code == SYS_POWER_OFF || code == SYS_HALT) {
1165 /* Disable the WDT if we are shutting down. */ 1168 /* Disable the WDT if we are shutting down. */
1166 ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 1169 ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
1167 panic_halt_ipmi_set_timeout(); 1170 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
1168 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { 1171 } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
1169 /* Set a long timer to let the reboot happens, but 1172 /* Set a long timer to let the reboot happens, but
1170 reboot if it hangs, but only if the watchdog 1173 reboot if it hangs, but only if the watchdog
@@ -1172,7 +1175,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
1172 timeout = 120; 1175 timeout = 120;
1173 pretimeout = 0; 1176 pretimeout = 0;
1174 ipmi_watchdog_state = WDOG_TIMEOUT_RESET; 1177 ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
1175 panic_halt_ipmi_set_timeout(); 1178 ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
1176 } 1179 }
1177 } 1180 }
1178 return NOTIFY_OK; 1181 return NOTIFY_OK;
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index a2161f631a83..2231aec23918 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -271,7 +271,7 @@ static int tosa_lcd_resume(struct spi_device *spi)
271} 271}
272#else 272#else
273#define tosa_lcd_suspend NULL 273#define tosa_lcd_suspend NULL
274#define tosa_lcd_reume NULL 274#define tosa_lcd_resume NULL
275#endif 275#endif
276 276
277static struct spi_driver tosa_lcd_driver = { 277static struct spi_driver tosa_lcd_driver = {