diff options
author | Corey Minyard <minyard@acm.org> | 2006-12-06 23:41:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:39:47 -0500 |
commit | b2c03941b50944a268ee4d5823872f220809a3ba (patch) | |
tree | 9ade96c649031f1eaf20587a2fdf855fe0118f4c /drivers/char/ipmi/ipmi_msghandler.c | |
parent | 759643b874907e76ae81e34df62f41ab6683f5c2 (diff) |
[PATCH] IPMI: Allow hot system interface remove
This modifies the IPMI driver so that a lower-level interface can be
dynamically removed while in use so it can support hot-removal of hardware.
It also adds the ability to specify and dynamically change the IPMI interface
the watchdog timer and the poweroff code use.
Signed-off-by: Corey Minyard <minyard@acm.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/char/ipmi/ipmi_msghandler.c')
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 246 |
1 files changed, 170 insertions, 76 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 6a77b264eb2c..03f32611831d 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -200,6 +200,10 @@ struct ipmi_smi | |||
200 | * protects this. */ | 200 | * protects this. */ |
201 | struct list_head users; | 201 | struct list_head users; |
202 | 202 | ||
203 | /* Information to supply to users. */ | ||
204 | unsigned char ipmi_version_major; | ||
205 | unsigned char ipmi_version_minor; | ||
206 | |||
203 | /* Used for wake ups at startup. */ | 207 | /* Used for wake ups at startup. */ |
204 | wait_queue_head_t waitq; | 208 | wait_queue_head_t waitq; |
205 | 209 | ||
@@ -207,7 +211,10 @@ struct ipmi_smi | |||
207 | char *my_dev_name; | 211 | char *my_dev_name; |
208 | char *sysfs_name; | 212 | char *sysfs_name; |
209 | 213 | ||
210 | /* This is the lower-layer's sender routine. */ | 214 | /* This is the lower-layer's sender routine. Note that you |
215 | * must either be holding the ipmi_interfaces_mutex or be in | ||
216 | * an umpreemptible region to use this. You must fetch the | ||
217 | * value into a local variable and make sure it is not NULL. */ | ||
211 | struct ipmi_smi_handlers *handlers; | 218 | struct ipmi_smi_handlers *handlers; |
212 | void *send_info; | 219 | void *send_info; |
213 | 220 | ||
@@ -246,6 +253,7 @@ struct ipmi_smi | |||
246 | spinlock_t events_lock; /* For dealing with event stuff. */ | 253 | spinlock_t events_lock; /* For dealing with event stuff. */ |
247 | struct list_head waiting_events; | 254 | struct list_head waiting_events; |
248 | unsigned int waiting_events_count; /* How many events in queue? */ | 255 | unsigned int waiting_events_count; /* How many events in queue? */ |
256 | int delivering_events; | ||
249 | 257 | ||
250 | /* The event receiver for my BMC, only really used at panic | 258 | /* The event receiver for my BMC, only really used at panic |
251 | shutdown as a place to store this. */ | 259 | shutdown as a place to store this. */ |
@@ -357,7 +365,7 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex); | |||
357 | /* List of watchers that want to know when smi's are added and | 365 | /* List of watchers that want to know when smi's are added and |
358 | deleted. */ | 366 | deleted. */ |
359 | static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); | 367 | static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); |
360 | static DECLARE_RWSEM(smi_watchers_sem); | 368 | static DEFINE_MUTEX(smi_watchers_mutex); |
361 | 369 | ||
362 | 370 | ||
363 | static void free_recv_msg_list(struct list_head *q) | 371 | static void free_recv_msg_list(struct list_head *q) |
@@ -418,8 +426,9 @@ static void intf_free(struct kref *ref) | |||
418 | } | 426 | } |
419 | 427 | ||
420 | struct watcher_entry { | 428 | struct watcher_entry { |
429 | int intf_num; | ||
430 | ipmi_smi_t intf; | ||
421 | struct list_head link; | 431 | struct list_head link; |
422 | int intf_num; | ||
423 | }; | 432 | }; |
424 | 433 | ||
425 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | 434 | int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) |
@@ -428,36 +437,45 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | |||
428 | struct list_head to_deliver = LIST_HEAD_INIT(to_deliver); | 437 | struct list_head to_deliver = LIST_HEAD_INIT(to_deliver); |
429 | struct watcher_entry *e, *e2; | 438 | struct watcher_entry *e, *e2; |
430 | 439 | ||
440 | mutex_lock(&smi_watchers_mutex); | ||
441 | |||
431 | mutex_lock(&ipmi_interfaces_mutex); | 442 | mutex_lock(&ipmi_interfaces_mutex); |
432 | 443 | ||
444 | /* Build a list of things to deliver. */ | ||
433 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 445 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
434 | if (intf->intf_num == -1) | 446 | if (intf->intf_num == -1) |
435 | continue; | 447 | continue; |
436 | e = kmalloc(sizeof(*e), GFP_KERNEL); | 448 | e = kmalloc(sizeof(*e), GFP_KERNEL); |
437 | if (!e) | 449 | if (!e) |
438 | goto out_err; | 450 | goto out_err; |
451 | kref_get(&intf->refcount); | ||
452 | e->intf = intf; | ||
439 | e->intf_num = intf->intf_num; | 453 | e->intf_num = intf->intf_num; |
440 | list_add_tail(&e->link, &to_deliver); | 454 | list_add_tail(&e->link, &to_deliver); |
441 | } | 455 | } |
442 | 456 | ||
443 | down_write(&smi_watchers_sem); | 457 | /* We will succeed, so add it to the list. */ |
444 | list_add(&(watcher->link), &smi_watchers); | 458 | list_add(&watcher->link, &smi_watchers); |
445 | up_write(&smi_watchers_sem); | ||
446 | 459 | ||
447 | mutex_unlock(&ipmi_interfaces_mutex); | 460 | mutex_unlock(&ipmi_interfaces_mutex); |
448 | 461 | ||
449 | list_for_each_entry_safe(e, e2, &to_deliver, link) { | 462 | list_for_each_entry_safe(e, e2, &to_deliver, link) { |
450 | list_del(&e->link); | 463 | list_del(&e->link); |
451 | watcher->new_smi(e->intf_num, intf->si_dev); | 464 | watcher->new_smi(e->intf_num, e->intf->si_dev); |
465 | kref_put(&e->intf->refcount, intf_free); | ||
452 | kfree(e); | 466 | kfree(e); |
453 | } | 467 | } |
454 | 468 | ||
469 | mutex_unlock(&smi_watchers_mutex); | ||
455 | 470 | ||
456 | return 0; | 471 | return 0; |
457 | 472 | ||
458 | out_err: | 473 | out_err: |
474 | mutex_unlock(&ipmi_interfaces_mutex); | ||
475 | mutex_unlock(&smi_watchers_mutex); | ||
459 | list_for_each_entry_safe(e, e2, &to_deliver, link) { | 476 | list_for_each_entry_safe(e, e2, &to_deliver, link) { |
460 | list_del(&e->link); | 477 | list_del(&e->link); |
478 | kref_put(&e->intf->refcount, intf_free); | ||
461 | kfree(e); | 479 | kfree(e); |
462 | } | 480 | } |
463 | return -ENOMEM; | 481 | return -ENOMEM; |
@@ -465,25 +483,26 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | |||
465 | 483 | ||
466 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | 484 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) |
467 | { | 485 | { |
468 | down_write(&smi_watchers_sem); | 486 | mutex_lock(&smi_watchers_mutex); |
469 | list_del(&(watcher->link)); | 487 | list_del(&(watcher->link)); |
470 | up_write(&smi_watchers_sem); | 488 | mutex_unlock(&smi_watchers_mutex); |
471 | return 0; | 489 | return 0; |
472 | } | 490 | } |
473 | 491 | ||
492 | /* | ||
493 | * Must be called with smi_watchers_mutex held. | ||
494 | */ | ||
474 | static void | 495 | static void |
475 | call_smi_watchers(int i, struct device *dev) | 496 | call_smi_watchers(int i, struct device *dev) |
476 | { | 497 | { |
477 | struct ipmi_smi_watcher *w; | 498 | struct ipmi_smi_watcher *w; |
478 | 499 | ||
479 | down_read(&smi_watchers_sem); | ||
480 | list_for_each_entry(w, &smi_watchers, link) { | 500 | list_for_each_entry(w, &smi_watchers, link) { |
481 | if (try_module_get(w->owner)) { | 501 | if (try_module_get(w->owner)) { |
482 | w->new_smi(i, dev); | 502 | w->new_smi(i, dev); |
483 | module_put(w->owner); | 503 | module_put(w->owner); |
484 | } | 504 | } |
485 | } | 505 | } |
486 | up_read(&smi_watchers_sem); | ||
487 | } | 506 | } |
488 | 507 | ||
489 | static int | 508 | static int |
@@ -609,6 +628,17 @@ static void deliver_response(struct ipmi_recv_msg *msg) | |||
609 | } | 628 | } |
610 | } | 629 | } |
611 | 630 | ||
631 | static void | ||
632 | deliver_err_response(struct ipmi_recv_msg *msg, int err) | ||
633 | { | ||
634 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | ||
635 | msg->msg_data[0] = err; | ||
636 | msg->msg.netfn |= 1; /* Convert to a response. */ | ||
637 | msg->msg.data_len = 1; | ||
638 | msg->msg.data = msg->msg_data; | ||
639 | deliver_response(msg); | ||
640 | } | ||
641 | |||
612 | /* Find the next sequence number not being used and add the given | 642 | /* Find the next sequence number not being used and add the given |
613 | message with the given timeout to the sequence table. This must be | 643 | message with the given timeout to the sequence table. This must be |
614 | called with the interface's seq_lock held. */ | 644 | called with the interface's seq_lock held. */ |
@@ -746,14 +776,8 @@ static int intf_err_seq(ipmi_smi_t intf, | |||
746 | } | 776 | } |
747 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 777 | spin_unlock_irqrestore(&(intf->seq_lock), flags); |
748 | 778 | ||
749 | if (msg) { | 779 | if (msg) |
750 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 780 | deliver_err_response(msg, err); |
751 | msg->msg_data[0] = err; | ||
752 | msg->msg.netfn |= 1; /* Convert to a response. */ | ||
753 | msg->msg.data_len = 1; | ||
754 | msg->msg.data = msg->msg_data; | ||
755 | deliver_response(msg); | ||
756 | } | ||
757 | 781 | ||
758 | return rv; | 782 | return rv; |
759 | } | 783 | } |
@@ -795,19 +819,18 @@ int ipmi_create_user(unsigned int if_num, | |||
795 | if (!new_user) | 819 | if (!new_user) |
796 | return -ENOMEM; | 820 | return -ENOMEM; |
797 | 821 | ||
798 | rcu_read_lock(); | 822 | mutex_lock(&ipmi_interfaces_mutex); |
799 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 823 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
800 | if (intf->intf_num == if_num) | 824 | if (intf->intf_num == if_num) |
801 | goto found; | 825 | goto found; |
802 | } | 826 | } |
803 | rcu_read_unlock(); | 827 | /* Not found, return an error */ |
804 | rv = -EINVAL; | 828 | rv = -EINVAL; |
805 | goto out_kfree; | 829 | goto out_kfree; |
806 | 830 | ||
807 | found: | 831 | found: |
808 | /* Note that each existing user holds a refcount to the interface. */ | 832 | /* Note that each existing user holds a refcount to the interface. */ |
809 | kref_get(&intf->refcount); | 833 | kref_get(&intf->refcount); |
810 | rcu_read_unlock(); | ||
811 | 834 | ||
812 | kref_init(&new_user->refcount); | 835 | kref_init(&new_user->refcount); |
813 | new_user->handler = handler; | 836 | new_user->handler = handler; |
@@ -828,6 +851,10 @@ int ipmi_create_user(unsigned int if_num, | |||
828 | } | 851 | } |
829 | } | 852 | } |
830 | 853 | ||
854 | /* Hold the lock so intf->handlers is guaranteed to be good | ||
855 | * until now */ | ||
856 | mutex_unlock(&ipmi_interfaces_mutex); | ||
857 | |||
831 | new_user->valid = 1; | 858 | new_user->valid = 1; |
832 | spin_lock_irqsave(&intf->seq_lock, flags); | 859 | spin_lock_irqsave(&intf->seq_lock, flags); |
833 | list_add_rcu(&new_user->link, &intf->users); | 860 | list_add_rcu(&new_user->link, &intf->users); |
@@ -838,6 +865,7 @@ int ipmi_create_user(unsigned int if_num, | |||
838 | out_kref: | 865 | out_kref: |
839 | kref_put(&intf->refcount, intf_free); | 866 | kref_put(&intf->refcount, intf_free); |
840 | out_kfree: | 867 | out_kfree: |
868 | mutex_unlock(&ipmi_interfaces_mutex); | ||
841 | kfree(new_user); | 869 | kfree(new_user); |
842 | return rv; | 870 | return rv; |
843 | } | 871 | } |
@@ -867,6 +895,7 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
867 | && (intf->seq_table[i].recv_msg->user == user)) | 895 | && (intf->seq_table[i].recv_msg->user == user)) |
868 | { | 896 | { |
869 | intf->seq_table[i].inuse = 0; | 897 | intf->seq_table[i].inuse = 0; |
898 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); | ||
870 | } | 899 | } |
871 | } | 900 | } |
872 | spin_unlock_irqrestore(&intf->seq_lock, flags); | 901 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
@@ -893,9 +922,13 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
893 | kfree(rcvr); | 922 | kfree(rcvr); |
894 | } | 923 | } |
895 | 924 | ||
896 | module_put(intf->handlers->owner); | 925 | mutex_lock(&ipmi_interfaces_mutex); |
897 | if (intf->handlers->dec_usecount) | 926 | if (intf->handlers) { |
898 | intf->handlers->dec_usecount(intf->send_info); | 927 | module_put(intf->handlers->owner); |
928 | if (intf->handlers->dec_usecount) | ||
929 | intf->handlers->dec_usecount(intf->send_info); | ||
930 | } | ||
931 | mutex_unlock(&ipmi_interfaces_mutex); | ||
899 | 932 | ||
900 | kref_put(&intf->refcount, intf_free); | 933 | kref_put(&intf->refcount, intf_free); |
901 | 934 | ||
@@ -908,8 +941,8 @@ void ipmi_get_version(ipmi_user_t user, | |||
908 | unsigned char *major, | 941 | unsigned char *major, |
909 | unsigned char *minor) | 942 | unsigned char *minor) |
910 | { | 943 | { |
911 | *major = ipmi_version_major(&user->intf->bmc->id); | 944 | *major = user->intf->ipmi_version_major; |
912 | *minor = ipmi_version_minor(&user->intf->bmc->id); | 945 | *minor = user->intf->ipmi_version_minor; |
913 | } | 946 | } |
914 | 947 | ||
915 | int ipmi_set_my_address(ipmi_user_t user, | 948 | int ipmi_set_my_address(ipmi_user_t user, |
@@ -964,20 +997,33 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) | |||
964 | spin_lock_irqsave(&intf->events_lock, flags); | 997 | spin_lock_irqsave(&intf->events_lock, flags); |
965 | user->gets_events = val; | 998 | user->gets_events = val; |
966 | 999 | ||
967 | if (val) { | 1000 | if (intf->delivering_events) |
968 | /* Deliver any queued events. */ | 1001 | /* |
1002 | * Another thread is delivering events for this, so | ||
1003 | * let it handle any new events. | ||
1004 | */ | ||
1005 | goto out; | ||
1006 | |||
1007 | /* Deliver any queued events. */ | ||
1008 | while (user->gets_events && !list_empty(&intf->waiting_events)) { | ||
969 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) | 1009 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) |
970 | list_move_tail(&msg->link, &msgs); | 1010 | list_move_tail(&msg->link, &msgs); |
971 | intf->waiting_events_count = 0; | 1011 | intf->waiting_events_count = 0; |
972 | } | ||
973 | 1012 | ||
974 | /* Hold the events lock while doing this to preserve order. */ | 1013 | intf->delivering_events = 1; |
975 | list_for_each_entry_safe(msg, msg2, &msgs, link) { | 1014 | spin_unlock_irqrestore(&intf->events_lock, flags); |
976 | msg->user = user; | 1015 | |
977 | kref_get(&user->refcount); | 1016 | list_for_each_entry_safe(msg, msg2, &msgs, link) { |
978 | deliver_response(msg); | 1017 | msg->user = user; |
1018 | kref_get(&user->refcount); | ||
1019 | deliver_response(msg); | ||
1020 | } | ||
1021 | |||
1022 | spin_lock_irqsave(&intf->events_lock, flags); | ||
1023 | intf->delivering_events = 0; | ||
979 | } | 1024 | } |
980 | 1025 | ||
1026 | out: | ||
981 | spin_unlock_irqrestore(&intf->events_lock, flags); | 1027 | spin_unlock_irqrestore(&intf->events_lock, flags); |
982 | 1028 | ||
983 | return 0; | 1029 | return 0; |
@@ -1088,7 +1134,8 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, | |||
1088 | void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) | 1134 | void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) |
1089 | { | 1135 | { |
1090 | ipmi_smi_t intf = user->intf; | 1136 | ipmi_smi_t intf = user->intf; |
1091 | intf->handlers->set_run_to_completion(intf->send_info, val); | 1137 | if (intf->handlers) |
1138 | intf->handlers->set_run_to_completion(intf->send_info, val); | ||
1092 | } | 1139 | } |
1093 | 1140 | ||
1094 | static unsigned char | 1141 | static unsigned char |
@@ -1199,10 +1246,11 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1199 | int retries, | 1246 | int retries, |
1200 | unsigned int retry_time_ms) | 1247 | unsigned int retry_time_ms) |
1201 | { | 1248 | { |
1202 | int rv = 0; | 1249 | int rv = 0; |
1203 | struct ipmi_smi_msg *smi_msg; | 1250 | struct ipmi_smi_msg *smi_msg; |
1204 | struct ipmi_recv_msg *recv_msg; | 1251 | struct ipmi_recv_msg *recv_msg; |
1205 | unsigned long flags; | 1252 | unsigned long flags; |
1253 | struct ipmi_smi_handlers *handlers; | ||
1206 | 1254 | ||
1207 | 1255 | ||
1208 | if (supplied_recv) { | 1256 | if (supplied_recv) { |
@@ -1225,6 +1273,13 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1225 | } | 1273 | } |
1226 | } | 1274 | } |
1227 | 1275 | ||
1276 | rcu_read_lock(); | ||
1277 | handlers = intf->handlers; | ||
1278 | if (!handlers) { | ||
1279 | rv = -ENODEV; | ||
1280 | goto out_err; | ||
1281 | } | ||
1282 | |||
1228 | recv_msg->user = user; | 1283 | recv_msg->user = user; |
1229 | if (user) | 1284 | if (user) |
1230 | kref_get(&user->refcount); | 1285 | kref_get(&user->refcount); |
@@ -1541,11 +1596,14 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1541 | printk("\n"); | 1596 | printk("\n"); |
1542 | } | 1597 | } |
1543 | #endif | 1598 | #endif |
1544 | intf->handlers->sender(intf->send_info, smi_msg, priority); | 1599 | |
1600 | handlers->sender(intf->send_info, smi_msg, priority); | ||
1601 | rcu_read_unlock(); | ||
1545 | 1602 | ||
1546 | return 0; | 1603 | return 0; |
1547 | 1604 | ||
1548 | out_err: | 1605 | out_err: |
1606 | rcu_read_unlock(); | ||
1549 | ipmi_free_smi_msg(smi_msg); | 1607 | ipmi_free_smi_msg(smi_msg); |
1550 | ipmi_free_recv_msg(recv_msg); | 1608 | ipmi_free_recv_msg(recv_msg); |
1551 | return rv; | 1609 | return rv; |
@@ -2492,13 +2550,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2492 | int rv; | 2550 | int rv; |
2493 | ipmi_smi_t intf; | 2551 | ipmi_smi_t intf; |
2494 | ipmi_smi_t tintf; | 2552 | ipmi_smi_t tintf; |
2495 | int version_major; | ||
2496 | int version_minor; | ||
2497 | struct list_head *link; | 2553 | struct list_head *link; |
2498 | 2554 | ||
2499 | version_major = ipmi_version_major(device_id); | ||
2500 | version_minor = ipmi_version_minor(device_id); | ||
2501 | |||
2502 | /* Make sure the driver is actually initialized, this handles | 2555 | /* Make sure the driver is actually initialized, this handles |
2503 | problems with initialization order. */ | 2556 | problems with initialization order. */ |
2504 | if (!initialized) { | 2557 | if (!initialized) { |
@@ -2515,6 +2568,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2515 | if (!intf) | 2568 | if (!intf) |
2516 | return -ENOMEM; | 2569 | return -ENOMEM; |
2517 | memset(intf, 0, sizeof(*intf)); | 2570 | memset(intf, 0, sizeof(*intf)); |
2571 | |||
2572 | intf->ipmi_version_major = ipmi_version_major(device_id); | ||
2573 | intf->ipmi_version_minor = ipmi_version_minor(device_id); | ||
2574 | |||
2518 | intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); | 2575 | intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); |
2519 | if (!intf->bmc) { | 2576 | if (!intf->bmc) { |
2520 | kfree(intf); | 2577 | kfree(intf); |
@@ -2554,6 +2611,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2554 | spin_lock_init(&intf->counter_lock); | 2611 | spin_lock_init(&intf->counter_lock); |
2555 | intf->proc_dir = NULL; | 2612 | intf->proc_dir = NULL; |
2556 | 2613 | ||
2614 | mutex_lock(&smi_watchers_mutex); | ||
2557 | mutex_lock(&ipmi_interfaces_mutex); | 2615 | mutex_lock(&ipmi_interfaces_mutex); |
2558 | /* Look for a hole in the numbers. */ | 2616 | /* Look for a hole in the numbers. */ |
2559 | i = 0; | 2617 | i = 0; |
@@ -2577,8 +2635,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2577 | 2635 | ||
2578 | get_guid(intf); | 2636 | get_guid(intf); |
2579 | 2637 | ||
2580 | if ((version_major > 1) | 2638 | if ((intf->ipmi_version_major > 1) |
2581 | || ((version_major == 1) && (version_minor >= 5))) | 2639 | || ((intf->ipmi_version_major == 1) |
2640 | && (intf->ipmi_version_minor >= 5))) | ||
2582 | { | 2641 | { |
2583 | /* Start scanning the channels to see what is | 2642 | /* Start scanning the channels to see what is |
2584 | available. */ | 2643 | available. */ |
@@ -2607,8 +2666,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2607 | if (rv) { | 2666 | if (rv) { |
2608 | if (intf->proc_dir) | 2667 | if (intf->proc_dir) |
2609 | remove_proc_entries(intf); | 2668 | remove_proc_entries(intf); |
2669 | intf->handlers = NULL; | ||
2610 | list_del_rcu(&intf->link); | 2670 | list_del_rcu(&intf->link); |
2611 | mutex_unlock(&ipmi_interfaces_mutex); | 2671 | mutex_unlock(&ipmi_interfaces_mutex); |
2672 | mutex_unlock(&smi_watchers_mutex); | ||
2612 | synchronize_rcu(); | 2673 | synchronize_rcu(); |
2613 | kref_put(&intf->refcount, intf_free); | 2674 | kref_put(&intf->refcount, intf_free); |
2614 | } else { | 2675 | } else { |
@@ -2616,30 +2677,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2616 | intf->intf_num = i; | 2677 | intf->intf_num = i; |
2617 | mutex_unlock(&ipmi_interfaces_mutex); | 2678 | mutex_unlock(&ipmi_interfaces_mutex); |
2618 | call_smi_watchers(i, intf->si_dev); | 2679 | call_smi_watchers(i, intf->si_dev); |
2680 | mutex_unlock(&smi_watchers_mutex); | ||
2619 | } | 2681 | } |
2620 | 2682 | ||
2621 | return rv; | 2683 | return rv; |
2622 | } | 2684 | } |
2623 | 2685 | ||
2686 | static void cleanup_smi_msgs(ipmi_smi_t intf) | ||
2687 | { | ||
2688 | int i; | ||
2689 | struct seq_table *ent; | ||
2690 | |||
2691 | /* No need for locks, the interface is down. */ | ||
2692 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { | ||
2693 | ent = &(intf->seq_table[i]); | ||
2694 | if (!ent->inuse) | ||
2695 | continue; | ||
2696 | deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED); | ||
2697 | } | ||
2698 | } | ||
2699 | |||
2624 | int ipmi_unregister_smi(ipmi_smi_t intf) | 2700 | int ipmi_unregister_smi(ipmi_smi_t intf) |
2625 | { | 2701 | { |
2626 | struct ipmi_smi_watcher *w; | 2702 | struct ipmi_smi_watcher *w; |
2703 | int intf_num = intf->intf_num; | ||
2627 | 2704 | ||
2628 | ipmi_bmc_unregister(intf); | 2705 | ipmi_bmc_unregister(intf); |
2629 | 2706 | ||
2707 | mutex_lock(&smi_watchers_mutex); | ||
2630 | mutex_lock(&ipmi_interfaces_mutex); | 2708 | mutex_lock(&ipmi_interfaces_mutex); |
2709 | intf->intf_num = -1; | ||
2710 | intf->handlers = NULL; | ||
2631 | list_del_rcu(&intf->link); | 2711 | list_del_rcu(&intf->link); |
2632 | mutex_unlock(&ipmi_interfaces_mutex); | 2712 | mutex_unlock(&ipmi_interfaces_mutex); |
2633 | synchronize_rcu(); | 2713 | synchronize_rcu(); |
2634 | 2714 | ||
2715 | cleanup_smi_msgs(intf); | ||
2716 | |||
2635 | remove_proc_entries(intf); | 2717 | remove_proc_entries(intf); |
2636 | 2718 | ||
2637 | /* Call all the watcher interfaces to tell them that | 2719 | /* Call all the watcher interfaces to tell them that |
2638 | an interface is gone. */ | 2720 | an interface is gone. */ |
2639 | down_read(&smi_watchers_sem); | ||
2640 | list_for_each_entry(w, &smi_watchers, link) | 2721 | list_for_each_entry(w, &smi_watchers, link) |
2641 | w->smi_gone(intf->intf_num); | 2722 | w->smi_gone(intf_num); |
2642 | up_read(&smi_watchers_sem); | 2723 | mutex_unlock(&smi_watchers_mutex); |
2643 | 2724 | ||
2644 | kref_put(&intf->refcount, intf_free); | 2725 | kref_put(&intf->refcount, intf_free); |
2645 | return 0; | 2726 | return 0; |
@@ -2721,6 +2802,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2721 | struct ipmi_ipmb_addr *ipmb_addr; | 2802 | struct ipmi_ipmb_addr *ipmb_addr; |
2722 | struct ipmi_recv_msg *recv_msg; | 2803 | struct ipmi_recv_msg *recv_msg; |
2723 | unsigned long flags; | 2804 | unsigned long flags; |
2805 | struct ipmi_smi_handlers *handlers; | ||
2724 | 2806 | ||
2725 | if (msg->rsp_size < 10) { | 2807 | if (msg->rsp_size < 10) { |
2726 | /* Message not big enough, just ignore it. */ | 2808 | /* Message not big enough, just ignore it. */ |
@@ -2777,10 +2859,16 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2777 | printk("\n"); | 2859 | printk("\n"); |
2778 | } | 2860 | } |
2779 | #endif | 2861 | #endif |
2780 | intf->handlers->sender(intf->send_info, msg, 0); | 2862 | rcu_read_lock(); |
2781 | 2863 | handlers = intf->handlers; | |
2782 | rv = -1; /* We used the message, so return the value that | 2864 | if (handlers) { |
2783 | causes it to not be freed or queued. */ | 2865 | handlers->sender(intf->send_info, msg, 0); |
2866 | /* We used the message, so return the value | ||
2867 | that causes it to not be freed or | ||
2868 | queued. */ | ||
2869 | rv = -1; | ||
2870 | } | ||
2871 | rcu_read_unlock(); | ||
2784 | } else { | 2872 | } else { |
2785 | /* Deliver the message to the user. */ | 2873 | /* Deliver the message to the user. */ |
2786 | spin_lock_irqsave(&intf->counter_lock, flags); | 2874 | spin_lock_irqsave(&intf->counter_lock, flags); |
@@ -3370,16 +3458,6 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | |||
3370 | rcu_read_unlock(); | 3458 | rcu_read_unlock(); |
3371 | } | 3459 | } |
3372 | 3460 | ||
3373 | static void | ||
3374 | handle_msg_timeout(struct ipmi_recv_msg *msg) | ||
3375 | { | ||
3376 | msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | ||
3377 | msg->msg_data[0] = IPMI_TIMEOUT_COMPLETION_CODE; | ||
3378 | msg->msg.netfn |= 1; /* Convert to a response. */ | ||
3379 | msg->msg.data_len = 1; | ||
3380 | msg->msg.data = msg->msg_data; | ||
3381 | deliver_response(msg); | ||
3382 | } | ||
3383 | 3461 | ||
3384 | static struct ipmi_smi_msg * | 3462 | static struct ipmi_smi_msg * |
3385 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | 3463 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, |
@@ -3411,7 +3489,11 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3411 | struct list_head *timeouts, long timeout_period, | 3489 | struct list_head *timeouts, long timeout_period, |
3412 | int slot, unsigned long *flags) | 3490 | int slot, unsigned long *flags) |
3413 | { | 3491 | { |
3414 | struct ipmi_recv_msg *msg; | 3492 | struct ipmi_recv_msg *msg; |
3493 | struct ipmi_smi_handlers *handlers; | ||
3494 | |||
3495 | if (intf->intf_num == -1) | ||
3496 | return; | ||
3415 | 3497 | ||
3416 | if (!ent->inuse) | 3498 | if (!ent->inuse) |
3417 | return; | 3499 | return; |
@@ -3454,13 +3536,19 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3454 | return; | 3536 | return; |
3455 | 3537 | ||
3456 | spin_unlock_irqrestore(&intf->seq_lock, *flags); | 3538 | spin_unlock_irqrestore(&intf->seq_lock, *flags); |
3539 | |||
3457 | /* Send the new message. We send with a zero | 3540 | /* Send the new message. We send with a zero |
3458 | * priority. It timed out, I doubt time is | 3541 | * priority. It timed out, I doubt time is |
3459 | * that critical now, and high priority | 3542 | * that critical now, and high priority |
3460 | * messages are really only for messages to the | 3543 | * messages are really only for messages to the |
3461 | * local MC, which don't get resent. */ | 3544 | * local MC, which don't get resent. */ |
3462 | intf->handlers->sender(intf->send_info, | 3545 | handlers = intf->handlers; |
3463 | smi_msg, 0); | 3546 | if (handlers) |
3547 | intf->handlers->sender(intf->send_info, | ||
3548 | smi_msg, 0); | ||
3549 | else | ||
3550 | ipmi_free_smi_msg(smi_msg); | ||
3551 | |||
3464 | spin_lock_irqsave(&intf->seq_lock, *flags); | 3552 | spin_lock_irqsave(&intf->seq_lock, *flags); |
3465 | } | 3553 | } |
3466 | } | 3554 | } |
@@ -3504,18 +3592,24 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3504 | spin_unlock_irqrestore(&intf->seq_lock, flags); | 3592 | spin_unlock_irqrestore(&intf->seq_lock, flags); |
3505 | 3593 | ||
3506 | list_for_each_entry_safe(msg, msg2, &timeouts, link) | 3594 | list_for_each_entry_safe(msg, msg2, &timeouts, link) |
3507 | handle_msg_timeout(msg); | 3595 | deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE); |
3508 | } | 3596 | } |
3509 | rcu_read_unlock(); | 3597 | rcu_read_unlock(); |
3510 | } | 3598 | } |
3511 | 3599 | ||
3512 | static void ipmi_request_event(void) | 3600 | static void ipmi_request_event(void) |
3513 | { | 3601 | { |
3514 | ipmi_smi_t intf; | 3602 | ipmi_smi_t intf; |
3603 | struct ipmi_smi_handlers *handlers; | ||
3515 | 3604 | ||
3516 | rcu_read_lock(); | 3605 | rcu_read_lock(); |
3517 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) | 3606 | /* Called from the timer, no need to check if handlers is |
3518 | intf->handlers->request_events(intf->send_info); | 3607 | * valid. */ |
3608 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | ||
3609 | handlers = intf->handlers; | ||
3610 | if (handlers) | ||
3611 | handlers->request_events(intf->send_info); | ||
3612 | } | ||
3519 | rcu_read_unlock(); | 3613 | rcu_read_unlock(); |
3520 | } | 3614 | } |
3521 | 3615 | ||
@@ -3679,8 +3773,8 @@ static void send_panic_events(char *str) | |||
3679 | 3773 | ||
3680 | /* For every registered interface, send the event. */ | 3774 | /* For every registered interface, send the event. */ |
3681 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 3775 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3682 | if (intf->intf_num == -1) | 3776 | if (!intf->handlers) |
3683 | /* Interface was not ready yet. */ | 3777 | /* Interface is not ready. */ |
3684 | continue; | 3778 | continue; |
3685 | 3779 | ||
3686 | /* Send the event announcing the panic. */ | 3780 | /* Send the event announcing the panic. */ |
@@ -3846,8 +3940,8 @@ static int panic_event(struct notifier_block *this, | |||
3846 | 3940 | ||
3847 | /* For every registered interface, set it to run to completion. */ | 3941 | /* For every registered interface, set it to run to completion. */ |
3848 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 3942 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3849 | if (intf->intf_num == -1) | 3943 | if (!intf->handlers) |
3850 | /* Interface was not ready yet. */ | 3944 | /* Interface is not ready. */ |
3851 | continue; | 3945 | continue; |
3852 | 3946 | ||
3853 | intf->handlers->set_run_to_completion(intf->send_info, 1); | 3947 | intf->handlers->set_run_to_completion(intf->send_info, 1); |