aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/IPMI.txt9
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c246
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c60
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c121
4 files changed, 304 insertions, 132 deletions
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 0e3924ecd76b..9101cbf2d883 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -502,7 +502,10 @@ used to control it:
502 502
503 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> 503 modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
504 preaction=<preaction type> preop=<preop type> start_now=x 504 preaction=<preaction type> preop=<preop type> start_now=x
505 nowayout=x 505 nowayout=x ifnum_to_use=n
506
507ifnum_to_use specifies which interface the watchdog timer should use.
508The default is -1, which means to pick the first one registered.
506 509
507The timeout is the number of seconds to the action, and the pretimeout 510The timeout is the number of seconds to the action, and the pretimeout
508is the amount of seconds before the reset that the pre-timeout panic will 511is the amount of seconds before the reset that the pre-timeout panic will
@@ -624,5 +627,9 @@ command line. The parameter is also available via the proc filesystem
624in /proc/sys/dev/ipmi/poweroff_powercycle. Note that if the system 627in /proc/sys/dev/ipmi/poweroff_powercycle. Note that if the system
625does not support power cycling, it will always do the power off. 628does not support power cycling, it will always do the power off.
626 629
630The "ifnum_to_use" parameter specifies which interface the poweroff
631code should use. The default is -1, which means to pick the first one
632registered.
633
627Note that if you have ACPI enabled, the system will prefer using ACPI to 634Note that if you have ACPI enabled, the system will prefer using ACPI to
628power off. 635power off.
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 6a77b264eb2c..03f32611831d 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -200,6 +200,10 @@ struct ipmi_smi
200 * protects this. */ 200 * protects this. */
201 struct list_head users; 201 struct list_head users;
202 202
203 /* Information to supply to users. */
204 unsigned char ipmi_version_major;
205 unsigned char ipmi_version_minor;
206
203 /* Used for wake ups at startup. */ 207 /* Used for wake ups at startup. */
204 wait_queue_head_t waitq; 208 wait_queue_head_t waitq;
205 209
@@ -207,7 +211,10 @@ struct ipmi_smi
207 char *my_dev_name; 211 char *my_dev_name;
208 char *sysfs_name; 212 char *sysfs_name;
209 213
210 /* This is the lower-layer's sender routine. */ 214 /* This is the lower-layer's sender routine. Note that you
215 * must either be holding the ipmi_interfaces_mutex or be in
216 * an umpreemptible region to use this. You must fetch the
217 * value into a local variable and make sure it is not NULL. */
211 struct ipmi_smi_handlers *handlers; 218 struct ipmi_smi_handlers *handlers;
212 void *send_info; 219 void *send_info;
213 220
@@ -246,6 +253,7 @@ struct ipmi_smi
246 spinlock_t events_lock; /* For dealing with event stuff. */ 253 spinlock_t events_lock; /* For dealing with event stuff. */
247 struct list_head waiting_events; 254 struct list_head waiting_events;
248 unsigned int waiting_events_count; /* How many events in queue? */ 255 unsigned int waiting_events_count; /* How many events in queue? */
256 int delivering_events;
249 257
250 /* The event receiver for my BMC, only really used at panic 258 /* The event receiver for my BMC, only really used at panic
251 shutdown as a place to store this. */ 259 shutdown as a place to store this. */
@@ -357,7 +365,7 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex);
357/* List of watchers that want to know when smi's are added and 365/* List of watchers that want to know when smi's are added and
358 deleted. */ 366 deleted. */
359static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); 367static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
360static DECLARE_RWSEM(smi_watchers_sem); 368static DEFINE_MUTEX(smi_watchers_mutex);
361 369
362 370
363static void free_recv_msg_list(struct list_head *q) 371static void free_recv_msg_list(struct list_head *q)
@@ -418,8 +426,9 @@ static void intf_free(struct kref *ref)
418} 426}
419 427
420struct watcher_entry { 428struct watcher_entry {
429 int intf_num;
430 ipmi_smi_t intf;
421 struct list_head link; 431 struct list_head link;
422 int intf_num;
423}; 432};
424 433
425int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 434int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
@@ -428,36 +437,45 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
428 struct list_head to_deliver = LIST_HEAD_INIT(to_deliver); 437 struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
429 struct watcher_entry *e, *e2; 438 struct watcher_entry *e, *e2;
430 439
440 mutex_lock(&smi_watchers_mutex);
441
431 mutex_lock(&ipmi_interfaces_mutex); 442 mutex_lock(&ipmi_interfaces_mutex);
432 443
444 /* Build a list of things to deliver. */
433 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 445 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
434 if (intf->intf_num == -1) 446 if (intf->intf_num == -1)
435 continue; 447 continue;
436 e = kmalloc(sizeof(*e), GFP_KERNEL); 448 e = kmalloc(sizeof(*e), GFP_KERNEL);
437 if (!e) 449 if (!e)
438 goto out_err; 450 goto out_err;
451 kref_get(&intf->refcount);
452 e->intf = intf;
439 e->intf_num = intf->intf_num; 453 e->intf_num = intf->intf_num;
440 list_add_tail(&e->link, &to_deliver); 454 list_add_tail(&e->link, &to_deliver);
441 } 455 }
442 456
443 down_write(&smi_watchers_sem); 457 /* We will succeed, so add it to the list. */
444 list_add(&(watcher->link), &smi_watchers); 458 list_add(&watcher->link, &smi_watchers);
445 up_write(&smi_watchers_sem);
446 459
447 mutex_unlock(&ipmi_interfaces_mutex); 460 mutex_unlock(&ipmi_interfaces_mutex);
448 461
449 list_for_each_entry_safe(e, e2, &to_deliver, link) { 462 list_for_each_entry_safe(e, e2, &to_deliver, link) {
450 list_del(&e->link); 463 list_del(&e->link);
451 watcher->new_smi(e->intf_num, intf->si_dev); 464 watcher->new_smi(e->intf_num, e->intf->si_dev);
465 kref_put(&e->intf->refcount, intf_free);
452 kfree(e); 466 kfree(e);
453 } 467 }
454 468
469 mutex_unlock(&smi_watchers_mutex);
455 470
456 return 0; 471 return 0;
457 472
458 out_err: 473 out_err:
474 mutex_unlock(&ipmi_interfaces_mutex);
475 mutex_unlock(&smi_watchers_mutex);
459 list_for_each_entry_safe(e, e2, &to_deliver, link) { 476 list_for_each_entry_safe(e, e2, &to_deliver, link) {
460 list_del(&e->link); 477 list_del(&e->link);
478 kref_put(&e->intf->refcount, intf_free);
461 kfree(e); 479 kfree(e);
462 } 480 }
463 return -ENOMEM; 481 return -ENOMEM;
@@ -465,25 +483,26 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
465 483
466int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) 484int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
467{ 485{
468 down_write(&smi_watchers_sem); 486 mutex_lock(&smi_watchers_mutex);
469 list_del(&(watcher->link)); 487 list_del(&(watcher->link));
470 up_write(&smi_watchers_sem); 488 mutex_unlock(&smi_watchers_mutex);
471 return 0; 489 return 0;
472} 490}
473 491
492/*
493 * Must be called with smi_watchers_mutex held.
494 */
474static void 495static void
475call_smi_watchers(int i, struct device *dev) 496call_smi_watchers(int i, struct device *dev)
476{ 497{
477 struct ipmi_smi_watcher *w; 498 struct ipmi_smi_watcher *w;
478 499
479 down_read(&smi_watchers_sem);
480 list_for_each_entry(w, &smi_watchers, link) { 500 list_for_each_entry(w, &smi_watchers, link) {
481 if (try_module_get(w->owner)) { 501 if (try_module_get(w->owner)) {
482 w->new_smi(i, dev); 502 w->new_smi(i, dev);
483 module_put(w->owner); 503 module_put(w->owner);
484 } 504 }
485 } 505 }
486 up_read(&smi_watchers_sem);
487} 506}
488 507
489static int 508static int
@@ -609,6 +628,17 @@ static void deliver_response(struct ipmi_recv_msg *msg)
609 } 628 }
610} 629}
611 630
631static void
632deliver_err_response(struct ipmi_recv_msg *msg, int err)
633{
634 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
635 msg->msg_data[0] = err;
636 msg->msg.netfn |= 1; /* Convert to a response. */
637 msg->msg.data_len = 1;
638 msg->msg.data = msg->msg_data;
639 deliver_response(msg);
640}
641
612/* Find the next sequence number not being used and add the given 642/* Find the next sequence number not being used and add the given
613 message with the given timeout to the sequence table. This must be 643 message with the given timeout to the sequence table. This must be
614 called with the interface's seq_lock held. */ 644 called with the interface's seq_lock held. */
@@ -746,14 +776,8 @@ static int intf_err_seq(ipmi_smi_t intf,
746 } 776 }
747 spin_unlock_irqrestore(&(intf->seq_lock), flags); 777 spin_unlock_irqrestore(&(intf->seq_lock), flags);
748 778
749 if (msg) { 779 if (msg)
750 msg->recv_type = IPMI_RESPONSE_RECV_TYPE; 780 deliver_err_response(msg, err);
751 msg->msg_data[0] = err;
752 msg->msg.netfn |= 1; /* Convert to a response. */
753 msg->msg.data_len = 1;
754 msg->msg.data = msg->msg_data;
755 deliver_response(msg);
756 }
757 781
758 return rv; 782 return rv;
759} 783}
@@ -795,19 +819,18 @@ int ipmi_create_user(unsigned int if_num,
795 if (!new_user) 819 if (!new_user)
796 return -ENOMEM; 820 return -ENOMEM;
797 821
798 rcu_read_lock(); 822 mutex_lock(&ipmi_interfaces_mutex);
799 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 823 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
800 if (intf->intf_num == if_num) 824 if (intf->intf_num == if_num)
801 goto found; 825 goto found;
802 } 826 }
803 rcu_read_unlock(); 827 /* Not found, return an error */
804 rv = -EINVAL; 828 rv = -EINVAL;
805 goto out_kfree; 829 goto out_kfree;
806 830
807 found: 831 found:
808 /* Note that each existing user holds a refcount to the interface. */ 832 /* Note that each existing user holds a refcount to the interface. */
809 kref_get(&intf->refcount); 833 kref_get(&intf->refcount);
810 rcu_read_unlock();
811 834
812 kref_init(&new_user->refcount); 835 kref_init(&new_user->refcount);
813 new_user->handler = handler; 836 new_user->handler = handler;
@@ -828,6 +851,10 @@ int ipmi_create_user(unsigned int if_num,
828 } 851 }
829 } 852 }
830 853
854 /* Hold the lock so intf->handlers is guaranteed to be good
855 * until now */
856 mutex_unlock(&ipmi_interfaces_mutex);
857
831 new_user->valid = 1; 858 new_user->valid = 1;
832 spin_lock_irqsave(&intf->seq_lock, flags); 859 spin_lock_irqsave(&intf->seq_lock, flags);
833 list_add_rcu(&new_user->link, &intf->users); 860 list_add_rcu(&new_user->link, &intf->users);
@@ -838,6 +865,7 @@ int ipmi_create_user(unsigned int if_num,
838out_kref: 865out_kref:
839 kref_put(&intf->refcount, intf_free); 866 kref_put(&intf->refcount, intf_free);
840out_kfree: 867out_kfree:
868 mutex_unlock(&ipmi_interfaces_mutex);
841 kfree(new_user); 869 kfree(new_user);
842 return rv; 870 return rv;
843} 871}
@@ -867,6 +895,7 @@ int ipmi_destroy_user(ipmi_user_t user)
867 && (intf->seq_table[i].recv_msg->user == user)) 895 && (intf->seq_table[i].recv_msg->user == user))
868 { 896 {
869 intf->seq_table[i].inuse = 0; 897 intf->seq_table[i].inuse = 0;
898 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
870 } 899 }
871 } 900 }
872 spin_unlock_irqrestore(&intf->seq_lock, flags); 901 spin_unlock_irqrestore(&intf->seq_lock, flags);
@@ -893,9 +922,13 @@ int ipmi_destroy_user(ipmi_user_t user)
893 kfree(rcvr); 922 kfree(rcvr);
894 } 923 }
895 924
896 module_put(intf->handlers->owner); 925 mutex_lock(&ipmi_interfaces_mutex);
897 if (intf->handlers->dec_usecount) 926 if (intf->handlers) {
898 intf->handlers->dec_usecount(intf->send_info); 927 module_put(intf->handlers->owner);
928 if (intf->handlers->dec_usecount)
929 intf->handlers->dec_usecount(intf->send_info);
930 }
931 mutex_unlock(&ipmi_interfaces_mutex);
899 932
900 kref_put(&intf->refcount, intf_free); 933 kref_put(&intf->refcount, intf_free);
901 934
@@ -908,8 +941,8 @@ void ipmi_get_version(ipmi_user_t user,
908 unsigned char *major, 941 unsigned char *major,
909 unsigned char *minor) 942 unsigned char *minor)
910{ 943{
911 *major = ipmi_version_major(&user->intf->bmc->id); 944 *major = user->intf->ipmi_version_major;
912 *minor = ipmi_version_minor(&user->intf->bmc->id); 945 *minor = user->intf->ipmi_version_minor;
913} 946}
914 947
915int ipmi_set_my_address(ipmi_user_t user, 948int ipmi_set_my_address(ipmi_user_t user,
@@ -964,20 +997,33 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
964 spin_lock_irqsave(&intf->events_lock, flags); 997 spin_lock_irqsave(&intf->events_lock, flags);
965 user->gets_events = val; 998 user->gets_events = val;
966 999
967 if (val) { 1000 if (intf->delivering_events)
968 /* Deliver any queued events. */ 1001 /*
1002 * Another thread is delivering events for this, so
1003 * let it handle any new events.
1004 */
1005 goto out;
1006
1007 /* Deliver any queued events. */
1008 while (user->gets_events && !list_empty(&intf->waiting_events)) {
969 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) 1009 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
970 list_move_tail(&msg->link, &msgs); 1010 list_move_tail(&msg->link, &msgs);
971 intf->waiting_events_count = 0; 1011 intf->waiting_events_count = 0;
972 }
973 1012
974 /* Hold the events lock while doing this to preserve order. */ 1013 intf->delivering_events = 1;
975 list_for_each_entry_safe(msg, msg2, &msgs, link) { 1014 spin_unlock_irqrestore(&intf->events_lock, flags);
976 msg->user = user; 1015
977 kref_get(&user->refcount); 1016 list_for_each_entry_safe(msg, msg2, &msgs, link) {
978 deliver_response(msg); 1017 msg->user = user;
1018 kref_get(&user->refcount);
1019 deliver_response(msg);
1020 }
1021
1022 spin_lock_irqsave(&intf->events_lock, flags);
1023 intf->delivering_events = 0;
979 } 1024 }
980 1025
1026 out:
981 spin_unlock_irqrestore(&intf->events_lock, flags); 1027 spin_unlock_irqrestore(&intf->events_lock, flags);
982 1028
983 return 0; 1029 return 0;
@@ -1088,7 +1134,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
1088void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) 1134void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
1089{ 1135{
1090 ipmi_smi_t intf = user->intf; 1136 ipmi_smi_t intf = user->intf;
1091 intf->handlers->set_run_to_completion(intf->send_info, val); 1137 if (intf->handlers)
1138 intf->handlers->set_run_to_completion(intf->send_info, val);
1092} 1139}
1093 1140
1094static unsigned char 1141static unsigned char
@@ -1199,10 +1246,11 @@ static int i_ipmi_request(ipmi_user_t user,
1199 int retries, 1246 int retries,
1200 unsigned int retry_time_ms) 1247 unsigned int retry_time_ms)
1201{ 1248{
1202 int rv = 0; 1249 int rv = 0;
1203 struct ipmi_smi_msg *smi_msg; 1250 struct ipmi_smi_msg *smi_msg;
1204 struct ipmi_recv_msg *recv_msg; 1251 struct ipmi_recv_msg *recv_msg;
1205 unsigned long flags; 1252 unsigned long flags;
1253 struct ipmi_smi_handlers *handlers;
1206 1254
1207 1255
1208 if (supplied_recv) { 1256 if (supplied_recv) {
@@ -1225,6 +1273,13 @@ static int i_ipmi_request(ipmi_user_t user,
1225 } 1273 }
1226 } 1274 }
1227 1275
1276 rcu_read_lock();
1277 handlers = intf->handlers;
1278 if (!handlers) {
1279 rv = -ENODEV;
1280 goto out_err;
1281 }
1282
1228 recv_msg->user = user; 1283 recv_msg->user = user;
1229 if (user) 1284 if (user)
1230 kref_get(&user->refcount); 1285 kref_get(&user->refcount);
@@ -1541,11 +1596,14 @@ static int i_ipmi_request(ipmi_user_t user,
1541 printk("\n"); 1596 printk("\n");
1542 } 1597 }
1543#endif 1598#endif
1544 intf->handlers->sender(intf->send_info, smi_msg, priority); 1599
1600 handlers->sender(intf->send_info, smi_msg, priority);
1601 rcu_read_unlock();
1545 1602
1546 return 0; 1603 return 0;
1547 1604
1548 out_err: 1605 out_err:
1606 rcu_read_unlock();
1549 ipmi_free_smi_msg(smi_msg); 1607 ipmi_free_smi_msg(smi_msg);
1550 ipmi_free_recv_msg(recv_msg); 1608 ipmi_free_recv_msg(recv_msg);
1551 return rv; 1609 return rv;
@@ -2492,13 +2550,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2492 int rv; 2550 int rv;
2493 ipmi_smi_t intf; 2551 ipmi_smi_t intf;
2494 ipmi_smi_t tintf; 2552 ipmi_smi_t tintf;
2495 int version_major;
2496 int version_minor;
2497 struct list_head *link; 2553 struct list_head *link;
2498 2554
2499 version_major = ipmi_version_major(device_id);
2500 version_minor = ipmi_version_minor(device_id);
2501
2502 /* Make sure the driver is actually initialized, this handles 2555 /* Make sure the driver is actually initialized, this handles
2503 problems with initialization order. */ 2556 problems with initialization order. */
2504 if (!initialized) { 2557 if (!initialized) {
@@ -2515,6 +2568,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2515 if (!intf) 2568 if (!intf)
2516 return -ENOMEM; 2569 return -ENOMEM;
2517 memset(intf, 0, sizeof(*intf)); 2570 memset(intf, 0, sizeof(*intf));
2571
2572 intf->ipmi_version_major = ipmi_version_major(device_id);
2573 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2574
2518 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); 2575 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2519 if (!intf->bmc) { 2576 if (!intf->bmc) {
2520 kfree(intf); 2577 kfree(intf);
@@ -2554,6 +2611,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2554 spin_lock_init(&intf->counter_lock); 2611 spin_lock_init(&intf->counter_lock);
2555 intf->proc_dir = NULL; 2612 intf->proc_dir = NULL;
2556 2613
2614 mutex_lock(&smi_watchers_mutex);
2557 mutex_lock(&ipmi_interfaces_mutex); 2615 mutex_lock(&ipmi_interfaces_mutex);
2558 /* Look for a hole in the numbers. */ 2616 /* Look for a hole in the numbers. */
2559 i = 0; 2617 i = 0;
@@ -2577,8 +2635,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2577 2635
2578 get_guid(intf); 2636 get_guid(intf);
2579 2637
2580 if ((version_major > 1) 2638 if ((intf->ipmi_version_major > 1)
2581 || ((version_major == 1) && (version_minor >= 5))) 2639 || ((intf->ipmi_version_major == 1)
2640 && (intf->ipmi_version_minor >= 5)))
2582 { 2641 {
2583 /* Start scanning the channels to see what is 2642 /* Start scanning the channels to see what is
2584 available. */ 2643 available. */
@@ -2607,8 +2666,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2607 if (rv) { 2666 if (rv) {
2608 if (intf->proc_dir) 2667 if (intf->proc_dir)
2609 remove_proc_entries(intf); 2668 remove_proc_entries(intf);
2669 intf->handlers = NULL;
2610 list_del_rcu(&intf->link); 2670 list_del_rcu(&intf->link);
2611 mutex_unlock(&ipmi_interfaces_mutex); 2671 mutex_unlock(&ipmi_interfaces_mutex);
2672 mutex_unlock(&smi_watchers_mutex);
2612 synchronize_rcu(); 2673 synchronize_rcu();
2613 kref_put(&intf->refcount, intf_free); 2674 kref_put(&intf->refcount, intf_free);
2614 } else { 2675 } else {
@@ -2616,30 +2677,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2616 intf->intf_num = i; 2677 intf->intf_num = i;
2617 mutex_unlock(&ipmi_interfaces_mutex); 2678 mutex_unlock(&ipmi_interfaces_mutex);
2618 call_smi_watchers(i, intf->si_dev); 2679 call_smi_watchers(i, intf->si_dev);
2680 mutex_unlock(&smi_watchers_mutex);
2619 } 2681 }
2620 2682
2621 return rv; 2683 return rv;
2622} 2684}
2623 2685
2686static void cleanup_smi_msgs(ipmi_smi_t intf)
2687{
2688 int i;
2689 struct seq_table *ent;
2690
2691 /* No need for locks, the interface is down. */
2692 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2693 ent = &(intf->seq_table[i]);
2694 if (!ent->inuse)
2695 continue;
2696 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2697 }
2698}
2699
2624int ipmi_unregister_smi(ipmi_smi_t intf) 2700int ipmi_unregister_smi(ipmi_smi_t intf)
2625{ 2701{
2626 struct ipmi_smi_watcher *w; 2702 struct ipmi_smi_watcher *w;
2703 int intf_num = intf->intf_num;
2627 2704
2628 ipmi_bmc_unregister(intf); 2705 ipmi_bmc_unregister(intf);
2629 2706
2707 mutex_lock(&smi_watchers_mutex);
2630 mutex_lock(&ipmi_interfaces_mutex); 2708 mutex_lock(&ipmi_interfaces_mutex);
2709 intf->intf_num = -1;
2710 intf->handlers = NULL;
2631 list_del_rcu(&intf->link); 2711 list_del_rcu(&intf->link);
2632 mutex_unlock(&ipmi_interfaces_mutex); 2712 mutex_unlock(&ipmi_interfaces_mutex);
2633 synchronize_rcu(); 2713 synchronize_rcu();
2634 2714
2715 cleanup_smi_msgs(intf);
2716
2635 remove_proc_entries(intf); 2717 remove_proc_entries(intf);
2636 2718
2637 /* Call all the watcher interfaces to tell them that 2719 /* Call all the watcher interfaces to tell them that
2638 an interface is gone. */ 2720 an interface is gone. */
2639 down_read(&smi_watchers_sem);
2640 list_for_each_entry(w, &smi_watchers, link) 2721 list_for_each_entry(w, &smi_watchers, link)
2641 w->smi_gone(intf->intf_num); 2722 w->smi_gone(intf_num);
2642 up_read(&smi_watchers_sem); 2723 mutex_unlock(&smi_watchers_mutex);
2643 2724
2644 kref_put(&intf->refcount, intf_free); 2725 kref_put(&intf->refcount, intf_free);
2645 return 0; 2726 return 0;
@@ -2721,6 +2802,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2721 struct ipmi_ipmb_addr *ipmb_addr; 2802 struct ipmi_ipmb_addr *ipmb_addr;
2722 struct ipmi_recv_msg *recv_msg; 2803 struct ipmi_recv_msg *recv_msg;
2723 unsigned long flags; 2804 unsigned long flags;
2805 struct ipmi_smi_handlers *handlers;
2724 2806
2725 if (msg->rsp_size < 10) { 2807 if (msg->rsp_size < 10) {
2726 /* Message not big enough, just ignore it. */ 2808 /* Message not big enough, just ignore it. */
@@ -2777,10 +2859,16 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2777 printk("\n"); 2859 printk("\n");
2778 } 2860 }
2779#endif 2861#endif
2780 intf->handlers->sender(intf->send_info, msg, 0); 2862 rcu_read_lock();
2781 2863 handlers = intf->handlers;
2782 rv = -1; /* We used the message, so return the value that 2864 if (handlers) {
2783 causes it to not be freed or queued. */ 2865 handlers->sender(intf->send_info, msg, 0);
2866 /* We used the message, so return the value
2867 that causes it to not be freed or
2868 queued. */
2869 rv = -1;
2870 }
2871 rcu_read_unlock();
2784 } else { 2872 } else {
2785 /* Deliver the message to the user. */ 2873 /* Deliver the message to the user. */
2786 spin_lock_irqsave(&intf->counter_lock, flags); 2874 spin_lock_irqsave(&intf->counter_lock, flags);
@@ -3370,16 +3458,6 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3370 rcu_read_unlock(); 3458 rcu_read_unlock();
3371} 3459}
3372 3460
3373static void
3374handle_msg_timeout(struct ipmi_recv_msg *msg)
3375{
3376 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3377 msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE;
3378 msg->msg.netfn |= 1; /* Convert to a response. */
3379 msg->msg.data_len = 1;
3380 msg->msg.data = msg->msg_data;
3381 deliver_response(msg);
3382}
3383 3461
3384static struct ipmi_smi_msg * 3462static struct ipmi_smi_msg *
3385smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, 3463smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3411,7 +3489,11 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3411 struct list_head *timeouts, long timeout_period, 3489 struct list_head *timeouts, long timeout_period,
3412 int slot, unsigned long *flags) 3490 int slot, unsigned long *flags)
3413{ 3491{
3414 struct ipmi_recv_msg *msg; 3492 struct ipmi_recv_msg *msg;
3493 struct ipmi_smi_handlers *handlers;
3494
3495 if (intf->intf_num == -1)
3496 return;
3415 3497
3416 if (!ent->inuse) 3498 if (!ent->inuse)
3417 return; 3499 return;
@@ -3454,13 +3536,19 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3454 return; 3536 return;
3455 3537
3456 spin_unlock_irqrestore(&intf->seq_lock, *flags); 3538 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3539
3457 /* Send the new message. We send with a zero 3540 /* Send the new message. We send with a zero
3458 * priority. It timed out, I doubt time is 3541 * priority. It timed out, I doubt time is
3459 * that critical now, and high priority 3542 * that critical now, and high priority
3460 * messages are really only for messages to the 3543 * messages are really only for messages to the
3461 * local MC, which don't get resent. */ 3544 * local MC, which don't get resent. */
3462 intf->handlers->sender(intf->send_info, 3545 handlers = intf->handlers;
3463 smi_msg, 0); 3546 if (handlers)
3547 intf->handlers->sender(intf->send_info,
3548 smi_msg, 0);
3549 else
3550 ipmi_free_smi_msg(smi_msg);
3551
3464 spin_lock_irqsave(&intf->seq_lock, *flags); 3552 spin_lock_irqsave(&intf->seq_lock, *flags);
3465 } 3553 }
3466} 3554}
@@ -3504,18 +3592,24 @@ static void ipmi_timeout_handler(long timeout_period)
3504 spin_unlock_irqrestore(&intf->seq_lock, flags); 3592 spin_unlock_irqrestore(&intf->seq_lock, flags);
3505 3593
3506 list_for_each_entry_safe(msg, msg2, &timeouts, link) 3594 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3507 handle_msg_timeout(msg); 3595 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3508 } 3596 }
3509 rcu_read_unlock(); 3597 rcu_read_unlock();
3510} 3598}
3511 3599
3512static void ipmi_request_event(void) 3600static void ipmi_request_event(void)
3513{ 3601{
3514 ipmi_smi_t intf; 3602 ipmi_smi_t intf;
3603 struct ipmi_smi_handlers *handlers;
3515 3604
3516 rcu_read_lock(); 3605 rcu_read_lock();
3517 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) 3606 /* Called from the timer, no need to check if handlers is
3518 intf->handlers->request_events(intf->send_info); 3607 * valid. */
3608 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3609 handlers = intf->handlers;
3610 if (handlers)
3611 handlers->request_events(intf->send_info);
3612 }
3519 rcu_read_unlock(); 3613 rcu_read_unlock();
3520} 3614}
3521 3615
@@ -3679,8 +3773,8 @@ static void send_panic_events(char *str)
3679 3773
3680 /* For every registered interface, send the event. */ 3774 /* For every registered interface, send the event. */
3681 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 3775 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3682 if (intf->intf_num == -1) 3776 if (!intf->handlers)
3683 /* Interface was not ready yet. */ 3777 /* Interface is not ready. */
3684 continue; 3778 continue;
3685 3779
3686 /* Send the event announcing the panic. */ 3780 /* Send the event announcing the panic. */
@@ -3846,8 +3940,8 @@ static int panic_event(struct notifier_block *this,
3846 3940
3847 /* For every registered interface, set it to run to completion. */ 3941 /* For every registered interface, set it to run to completion. */
3848 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 3942 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3849 if (intf->intf_num == -1) 3943 if (!intf->handlers)
3850 /* Interface was not ready yet. */ 3944 /* Interface is not ready. */
3851 continue; 3945 continue;
3852 3946
3853 intf->handlers->set_run_to_completion(intf->send_info, 1); 3947 intf->handlers->set_run_to_completion(intf->send_info, 1);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index 8d941db83457..85f8071be1b5 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -43,6 +43,9 @@
43 43
44#define PFX "IPMI poweroff: " 44#define PFX "IPMI poweroff: "
45 45
46static void ipmi_po_smi_gone(int if_num);
47static void ipmi_po_new_smi(int if_num, struct device *device);
48
46/* Definitions for controlling power off (if the system supports it). It 49/* Definitions for controlling power off (if the system supports it). It
47 * conveniently matches the IPMI chassis control values. */ 50 * conveniently matches the IPMI chassis control values. */
48#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */ 51#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */
@@ -51,6 +54,37 @@
51/* the IPMI data command */ 54/* the IPMI data command */
52static int poweroff_powercycle; 55static int poweroff_powercycle;
53 56
57/* Which interface to use, -1 means the first we see. */
58static int ifnum_to_use = -1;
59
60/* Our local state. */
61static int ready = 0;
62static ipmi_user_t ipmi_user;
63static int ipmi_ifnum;
64static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
65
66/* Holds the old poweroff function so we can restore it on removal. */
67static void (*old_poweroff_func)(void);
68
69static int set_param_ifnum(const char *val, struct kernel_param *kp)
70{
71 int rv = param_set_int(val, kp);
72 if (rv)
73 return rv;
74 if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
75 return 0;
76
77 ipmi_po_smi_gone(ipmi_ifnum);
78 ipmi_po_new_smi(ifnum_to_use, NULL);
79 return 0;
80}
81
82module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
83 &ifnum_to_use, 0644);
84MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
85 "timer. Setting to -1 defaults to the first registered "
86 "interface");
87
54/* parameter definition to allow user to flag power cycle */ 88/* parameter definition to allow user to flag power cycle */
55module_param(poweroff_powercycle, int, 0644); 89module_param(poweroff_powercycle, int, 0644);
56MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); 90MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
@@ -440,15 +474,6 @@ static struct poweroff_function poweroff_functions[] = {
440 / sizeof(struct poweroff_function)) 474 / sizeof(struct poweroff_function))
441 475
442 476
443/* Our local state. */
444static int ready = 0;
445static ipmi_user_t ipmi_user;
446static void (*specific_poweroff_func)(ipmi_user_t user) = NULL;
447
448/* Holds the old poweroff function so we can restore it on removal. */
449static void (*old_poweroff_func)(void);
450
451
452/* Called on a powerdown request. */ 477/* Called on a powerdown request. */
453static void ipmi_poweroff_function (void) 478static void ipmi_poweroff_function (void)
454{ 479{
@@ -473,6 +498,9 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
473 if (ready) 498 if (ready)
474 return; 499 return;
475 500
501 if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
502 return;
503
476 rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL, 504 rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
477 &ipmi_user); 505 &ipmi_user);
478 if (rv) { 506 if (rv) {
@@ -481,6 +509,8 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
481 return; 509 return;
482 } 510 }
483 511
512 ipmi_ifnum = if_num;
513
484 /* 514 /*
485 * Do a get device ide and store some results, since this is 515 * Do a get device ide and store some results, since this is
486 * used by several functions. 516 * used by several functions.
@@ -541,9 +571,15 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
541 571
542static void ipmi_po_smi_gone(int if_num) 572static void ipmi_po_smi_gone(int if_num)
543{ 573{
544 /* This can never be called, because once poweroff driver is 574 if (!ready)
545 registered, the interface can't go away until the power 575 return;
546 driver is unregistered. */ 576
577 if (ipmi_ifnum != if_num)
578 return;
579
580 ready = 0;
581 ipmi_destroy_user(ipmi_user);
582 pm_power_off = old_poweroff_func;
547} 583}
548 584
549static struct ipmi_smi_watcher smi_watcher = 585static struct ipmi_smi_watcher smi_watcher =
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 73f759eaa5a6..90fb2a541916 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -135,6 +135,7 @@
135static int nowayout = WATCHDOG_NOWAYOUT; 135static int nowayout = WATCHDOG_NOWAYOUT;
136 136
137static ipmi_user_t watchdog_user = NULL; 137static ipmi_user_t watchdog_user = NULL;
138static int watchdog_ifnum;
138 139
139/* Default the timeout to 10 seconds. */ 140/* Default the timeout to 10 seconds. */
140static int timeout = 10; 141static int timeout = 10;
@@ -161,6 +162,8 @@ static struct fasync_struct *fasync_q = NULL;
161static char pretimeout_since_last_heartbeat = 0; 162static char pretimeout_since_last_heartbeat = 0;
162static char expect_close; 163static char expect_close;
163 164
165static int ifnum_to_use = -1;
166
164static DECLARE_RWSEM(register_sem); 167static DECLARE_RWSEM(register_sem);
165 168
166/* Parameters to ipmi_set_timeout */ 169/* Parameters to ipmi_set_timeout */
@@ -169,6 +172,8 @@ static DECLARE_RWSEM(register_sem);
169#define IPMI_SET_TIMEOUT_FORCE_HB 2 172#define IPMI_SET_TIMEOUT_FORCE_HB 2
170 173
171static int ipmi_set_timeout(int do_heartbeat); 174static int ipmi_set_timeout(int do_heartbeat);
175static void ipmi_register_watchdog(int ipmi_intf);
176static void ipmi_unregister_watchdog(int ipmi_intf);
172 177
173/* If true, the driver will start running as soon as it is configured 178/* If true, the driver will start running as soon as it is configured
174 and ready. */ 179 and ready. */
@@ -245,6 +250,26 @@ static int get_param_str(char *buffer, struct kernel_param *kp)
245 return strlen(buffer); 250 return strlen(buffer);
246} 251}
247 252
253
254static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
255{
256 int rv = param_set_int(val, kp);
257 if (rv)
258 return rv;
259 if ((ifnum_to_use < 0) || (ifnum_to_use == watchdog_ifnum))
260 return 0;
261
262 ipmi_unregister_watchdog(watchdog_ifnum);
263 ipmi_register_watchdog(ifnum_to_use);
264 return 0;
265}
266
267module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
268 &ifnum_to_use, 0644);
269MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
270 "timer. Setting to -1 defaults to the first registered "
271 "interface");
272
248module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644); 273module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
249MODULE_PARM_DESC(timeout, "Timeout value in seconds."); 274MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
250 275
@@ -263,12 +288,13 @@ module_param_call(preop, set_param_str, get_param_str, preop_op, 0644);
263MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: " 288MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
264 "preop_none, preop_panic, preop_give_data."); 289 "preop_none, preop_panic, preop_give_data.");
265 290
266module_param(start_now, int, 0); 291module_param(start_now, int, 0444);
267MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as" 292MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
268 "soon as the driver is loaded."); 293 "soon as the driver is loaded.");
269 294
270module_param(nowayout, int, 0644); 295module_param(nowayout, int, 0644);
271MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=CONFIG_WATCHDOG_NOWAYOUT)"); 296MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
297 "(default=CONFIG_WATCHDOG_NOWAYOUT)");
272 298
273/* Default state of the timer. */ 299/* Default state of the timer. */
274static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE; 300static unsigned char ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
@@ -872,6 +898,11 @@ static void ipmi_register_watchdog(int ipmi_intf)
872 if (watchdog_user) 898 if (watchdog_user)
873 goto out; 899 goto out;
874 900
901 if ((ifnum_to_use >= 0) && (ifnum_to_use != ipmi_intf))
902 goto out;
903
904 watchdog_ifnum = ipmi_intf;
905
875 rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user); 906 rv = ipmi_create_user(ipmi_intf, &ipmi_hndlrs, NULL, &watchdog_user);
876 if (rv < 0) { 907 if (rv < 0) {
877 printk(KERN_CRIT PFX "Unable to register with ipmi\n"); 908 printk(KERN_CRIT PFX "Unable to register with ipmi\n");
@@ -901,6 +932,39 @@ static void ipmi_register_watchdog(int ipmi_intf)
901 } 932 }
902} 933}
903 934
935static void ipmi_unregister_watchdog(int ipmi_intf)
936{
937 int rv;
938
939 down_write(&register_sem);
940
941 if (!watchdog_user)
942 goto out;
943
944 if (watchdog_ifnum != ipmi_intf)
945 goto out;
946
947 /* Make sure no one can call us any more. */
948 misc_deregister(&ipmi_wdog_miscdev);
949
950 /* Wait to make sure the message makes it out. The lower layer has
951 pointers to our buffers, we want to make sure they are done before
952 we release our memory. */
953 while (atomic_read(&set_timeout_tofree))
954 schedule_timeout_uninterruptible(1);
955
956 /* Disconnect from IPMI. */
957 rv = ipmi_destroy_user(watchdog_user);
958 if (rv) {
959 printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
960 rv);
961 }
962 watchdog_user = NULL;
963
964 out:
965 up_write(&register_sem);
966}
967
904#ifdef HAVE_NMI_HANDLER 968#ifdef HAVE_NMI_HANDLER
905static int 969static int
906ipmi_nmi(void *dev_id, int cpu, int handled) 970ipmi_nmi(void *dev_id, int cpu, int handled)
@@ -1004,9 +1068,7 @@ static void ipmi_new_smi(int if_num, struct device *device)
1004 1068
1005static void ipmi_smi_gone(int if_num) 1069static void ipmi_smi_gone(int if_num)
1006{ 1070{
1007 /* This can never be called, because once the watchdog is 1071 ipmi_unregister_watchdog(if_num);
1008 registered, the interface can't go away until the watchdog
1009 is unregistered. */
1010} 1072}
1011 1073
1012static struct ipmi_smi_watcher smi_watcher = 1074static struct ipmi_smi_watcher smi_watcher =
@@ -1148,30 +1210,32 @@ static int __init ipmi_wdog_init(void)
1148 1210
1149 check_parms(); 1211 check_parms();
1150 1212
1213 register_reboot_notifier(&wdog_reboot_notifier);
1214 atomic_notifier_chain_register(&panic_notifier_list,
1215 &wdog_panic_notifier);
1216
1151 rv = ipmi_smi_watcher_register(&smi_watcher); 1217 rv = ipmi_smi_watcher_register(&smi_watcher);
1152 if (rv) { 1218 if (rv) {
1153#ifdef HAVE_NMI_HANDLER 1219#ifdef HAVE_NMI_HANDLER
1154 if (preaction_val == WDOG_PRETIMEOUT_NMI) 1220 if (preaction_val == WDOG_PRETIMEOUT_NMI)
1155 release_nmi(&ipmi_nmi_handler); 1221 release_nmi(&ipmi_nmi_handler);
1156#endif 1222#endif
1223 atomic_notifier_chain_unregister(&panic_notifier_list,
1224 &wdog_panic_notifier);
1225 unregister_reboot_notifier(&wdog_reboot_notifier);
1157 printk(KERN_WARNING PFX "can't register smi watcher\n"); 1226 printk(KERN_WARNING PFX "can't register smi watcher\n");
1158 return rv; 1227 return rv;
1159 } 1228 }
1160 1229
1161 register_reboot_notifier(&wdog_reboot_notifier);
1162 atomic_notifier_chain_register(&panic_notifier_list,
1163 &wdog_panic_notifier);
1164
1165 printk(KERN_INFO PFX "driver initialized\n"); 1230 printk(KERN_INFO PFX "driver initialized\n");
1166 1231
1167 return 0; 1232 return 0;
1168} 1233}
1169 1234
1170static __exit void ipmi_unregister_watchdog(void) 1235static void __exit ipmi_wdog_exit(void)
1171{ 1236{
1172 int rv; 1237 ipmi_smi_watcher_unregister(&smi_watcher);
1173 1238 ipmi_unregister_watchdog(watchdog_ifnum);
1174 down_write(&register_sem);
1175 1239
1176#ifdef HAVE_NMI_HANDLER 1240#ifdef HAVE_NMI_HANDLER
1177 if (nmi_handler_registered) 1241 if (nmi_handler_registered)
@@ -1179,37 +1243,8 @@ static __exit void ipmi_unregister_watchdog(void)
1179#endif 1243#endif
1180 1244
1181 atomic_notifier_chain_unregister(&panic_notifier_list, 1245 atomic_notifier_chain_unregister(&panic_notifier_list,
1182 &wdog_panic_notifier); 1246 &wdog_panic_notifier);
1183 unregister_reboot_notifier(&wdog_reboot_notifier); 1247 unregister_reboot_notifier(&wdog_reboot_notifier);
1184
1185 if (! watchdog_user)
1186 goto out;
1187
1188 /* Make sure no one can call us any more. */
1189 misc_deregister(&ipmi_wdog_miscdev);
1190
1191 /* Wait to make sure the message makes it out. The lower layer has
1192 pointers to our buffers, we want to make sure they are done before
1193 we release our memory. */
1194 while (atomic_read(&set_timeout_tofree))
1195 schedule_timeout_uninterruptible(1);
1196
1197 /* Disconnect from IPMI. */
1198 rv = ipmi_destroy_user(watchdog_user);
1199 if (rv) {
1200 printk(KERN_WARNING PFX "error unlinking from IPMI: %d\n",
1201 rv);
1202 }
1203 watchdog_user = NULL;
1204
1205 out:
1206 up_write(&register_sem);
1207}
1208
1209static void __exit ipmi_wdog_exit(void)
1210{
1211 ipmi_smi_watcher_unregister(&smi_watcher);
1212 ipmi_unregister_watchdog();
1213} 1248}
1214module_exit(ipmi_wdog_exit); 1249module_exit(ipmi_wdog_exit);
1215module_init(ipmi_wdog_init); 1250module_init(ipmi_wdog_init);