aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorCorey Minyard <minyard@acm.org>2005-11-07 03:59:54 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:43 -0500
commit393d2cc354d150b8b4bb888a9da7db4c935e12bd (patch)
treeb696a63b19d8bc3ce9a9c2bb0e66aa91fcf954a4 /drivers
parentf5b3db0017f8415301f3427b30263186e8478c3f (diff)
[PATCH] ipmi: use refcount in message handler
This patch is rather large, but it really can't be done in smaller chunks easily and I believe it is an important change. This has been out and tested for a while in the latest IPMI driver release. There are no functional changes, just changes as necessary to convert the locking over (and a few minor style updates). The IPMI driver uses read/write locks to ensure that things exist while they are in use. This is bad from a number of points of view. This patch removes the rwlocks and uses refcounts and RCU lists to manage what the locks did. Signed-off-by: Corey Minyard <minyard@acm.org> Cc: Matt Domsch <Matt_Domsch@dell.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c953
1 files changed, 498 insertions, 455 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 32fa82c78c73..320d7f035bf9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,13 +38,13 @@
38#include <linux/sched.h> 38#include <linux/sched.h>
39#include <linux/poll.h> 39#include <linux/poll.h>
40#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/rwsem.h>
42#include <linux/slab.h> 41#include <linux/slab.h>
43#include <linux/ipmi.h> 42#include <linux/ipmi.h>
44#include <linux/ipmi_smi.h> 43#include <linux/ipmi_smi.h>
45#include <linux/notifier.h> 44#include <linux/notifier.h>
46#include <linux/init.h> 45#include <linux/init.h>
47#include <linux/proc_fs.h> 46#include <linux/proc_fs.h>
47#include <linux/rcupdate.h>
48 48
49#define PFX "IPMI message handler: " 49#define PFX "IPMI message handler: "
50 50
@@ -65,10 +65,19 @@ struct proc_dir_entry *proc_ipmi_root = NULL;
65 the max message timer. This is in milliseconds. */ 65 the max message timer. This is in milliseconds. */
66#define MAX_MSG_TIMEOUT 60000 66#define MAX_MSG_TIMEOUT 60000
67 67
68
69/*
70 * The main "user" data structure.
71 */
68struct ipmi_user 72struct ipmi_user
69{ 73{
70 struct list_head link; 74 struct list_head link;
71 75
76 /* Set to "0" when the user is destroyed. */
77 int valid;
78
79 struct kref refcount;
80
72 /* The upper layer that handles receive messages. */ 81 /* The upper layer that handles receive messages. */
73 struct ipmi_user_hndl *handler; 82 struct ipmi_user_hndl *handler;
74 void *handler_data; 83 void *handler_data;
@@ -87,6 +96,15 @@ struct cmd_rcvr
87 ipmi_user_t user; 96 ipmi_user_t user;
88 unsigned char netfn; 97 unsigned char netfn;
89 unsigned char cmd; 98 unsigned char cmd;
99
100 /*
101 * This is used to form a linked lised during mass deletion.
102 * Since this is in an RCU list, we cannot use the link above
103 * or change any data until the RCU period completes. So we
104 * use this next variable during mass deletion so we can have
105 * a list and don't have to wait and restart the search on
106 * every individual deletion of a command. */
107 struct cmd_rcvr *next;
90}; 108};
91 109
92struct seq_table 110struct seq_table
@@ -150,13 +168,11 @@ struct ipmi_smi
150 /* What interface number are we? */ 168 /* What interface number are we? */
151 int intf_num; 169 int intf_num;
152 170
153 /* The list of upper layers that are using me. We read-lock 171 struct kref refcount;
154 this when delivering messages to the upper layer to keep 172
155 the user from going away while we are processing the 173 /* The list of upper layers that are using me. seq_lock
156 message. This means that you cannot add or delete a user 174 * protects this. */
157 from the receive callback. */ 175 struct list_head users;
158 rwlock_t users_lock;
159 struct list_head users;
160 176
161 /* Used for wake ups at startup. */ 177 /* Used for wake ups at startup. */
162 wait_queue_head_t waitq; 178 wait_queue_head_t waitq;
@@ -193,7 +209,7 @@ struct ipmi_smi
193 209
194 /* The list of command receivers that are registered for commands 210 /* The list of command receivers that are registered for commands
195 on this interface. */ 211 on this interface. */
196 rwlock_t cmd_rcvr_lock; 212 spinlock_t cmd_rcvrs_lock;
197 struct list_head cmd_rcvrs; 213 struct list_head cmd_rcvrs;
198 214
199 /* Events that were queues because no one was there to receive 215 /* Events that were queues because no one was there to receive
@@ -296,16 +312,17 @@ struct ipmi_smi
296 unsigned int events; 312 unsigned int events;
297}; 313};
298 314
315/* Used to mark an interface entry that cannot be used but is not a
316 * free entry, either, primarily used at creation and deletion time so
317 * a slot doesn't get reused too quickly. */
318#define IPMI_INVALID_INTERFACE_ENTRY ((ipmi_smi_t) ((long) 1))
319#define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \
320 || (i == IPMI_INVALID_INTERFACE_ENTRY))
321
299#define MAX_IPMI_INTERFACES 4 322#define MAX_IPMI_INTERFACES 4
300static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; 323static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES];
301 324
302/* Used to keep interfaces from going away while operations are 325/* Directly protects the ipmi_interfaces data structure. */
303 operating on interfaces. Grab read if you are not modifying the
304 interfaces, write if you are. */
305static DECLARE_RWSEM(interfaces_sem);
306
307/* Directly protects the ipmi_interfaces data structure. This is
308 claimed in the timer interrupt. */
309static DEFINE_SPINLOCK(interfaces_lock); 326static DEFINE_SPINLOCK(interfaces_lock);
310 327
311/* List of watchers that want to know when smi's are added and 328/* List of watchers that want to know when smi's are added and
@@ -313,20 +330,73 @@ static DEFINE_SPINLOCK(interfaces_lock);
313static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers); 330static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
314static DECLARE_RWSEM(smi_watchers_sem); 331static DECLARE_RWSEM(smi_watchers_sem);
315 332
333
334static void free_recv_msg_list(struct list_head *q)
335{
336 struct ipmi_recv_msg *msg, *msg2;
337
338 list_for_each_entry_safe(msg, msg2, q, link) {
339 list_del(&msg->link);
340 ipmi_free_recv_msg(msg);
341 }
342}
343
344static void clean_up_interface_data(ipmi_smi_t intf)
345{
346 int i;
347 struct cmd_rcvr *rcvr, *rcvr2;
348 unsigned long flags;
349 struct list_head list;
350
351 free_recv_msg_list(&intf->waiting_msgs);
352 free_recv_msg_list(&intf->waiting_events);
353
354 /* Wholesale remove all the entries from the list in the
355 * interface and wait for RCU to know that none are in use. */
356 spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
357 list_add_rcu(&list, &intf->cmd_rcvrs);
358 list_del_rcu(&intf->cmd_rcvrs);
359 spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
360 synchronize_rcu();
361
362 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
363 kfree(rcvr);
364
365 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
366 if ((intf->seq_table[i].inuse)
367 && (intf->seq_table[i].recv_msg))
368 {
369 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
370 }
371 }
372}
373
374static void intf_free(struct kref *ref)
375{
376 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
377
378 clean_up_interface_data(intf);
379 kfree(intf);
380}
381
316int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) 382int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
317{ 383{
318 int i; 384 int i;
385 unsigned long flags;
319 386
320 down_read(&interfaces_sem);
321 down_write(&smi_watchers_sem); 387 down_write(&smi_watchers_sem);
322 list_add(&(watcher->link), &smi_watchers); 388 list_add(&(watcher->link), &smi_watchers);
389 up_write(&smi_watchers_sem);
390 spin_lock_irqsave(&interfaces_lock, flags);
323 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 391 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
324 if (ipmi_interfaces[i] != NULL) { 392 ipmi_smi_t intf = ipmi_interfaces[i];
325 watcher->new_smi(i); 393 if (IPMI_INVALID_INTERFACE(intf))
326 } 394 continue;
395 spin_unlock_irqrestore(&interfaces_lock, flags);
396 watcher->new_smi(i);
397 spin_lock_irqsave(&interfaces_lock, flags);
327 } 398 }
328 up_write(&smi_watchers_sem); 399 spin_unlock_irqrestore(&interfaces_lock, flags);
329 up_read(&interfaces_sem);
330 return 0; 400 return 0;
331} 401}
332 402
@@ -471,8 +541,8 @@ static void deliver_response(struct ipmi_recv_msg *msg)
471 } 541 }
472 ipmi_free_recv_msg(msg); 542 ipmi_free_recv_msg(msg);
473 } else { 543 } else {
474 msg->user->handler->ipmi_recv_hndl(msg, 544 ipmi_user_t user = msg->user;
475 msg->user->handler_data); 545 user->handler->ipmi_recv_hndl(msg, user->handler_data);
476 } 546 }
477} 547}
478 548
@@ -662,15 +732,18 @@ int ipmi_create_user(unsigned int if_num,
662 if (! new_user) 732 if (! new_user)
663 return -ENOMEM; 733 return -ENOMEM;
664 734
665 down_read(&interfaces_sem); 735 spin_lock_irqsave(&interfaces_lock, flags);
666 if ((if_num >= MAX_IPMI_INTERFACES) || ipmi_interfaces[if_num] == NULL) 736 intf = ipmi_interfaces[if_num];
667 { 737 if ((if_num >= MAX_IPMI_INTERFACES) || IPMI_INVALID_INTERFACE(intf)) {
668 rv = -EINVAL; 738 spin_unlock_irqrestore(&interfaces_lock, flags);
669 goto out_unlock; 739 return -EINVAL;
670 } 740 }
671 741
672 intf = ipmi_interfaces[if_num]; 742 /* Note that each existing user holds a refcount to the interface. */
743 kref_get(&intf->refcount);
744 spin_unlock_irqrestore(&interfaces_lock, flags);
673 745
746 kref_init(&new_user->refcount);
674 new_user->handler = handler; 747 new_user->handler = handler;
675 new_user->handler_data = handler_data; 748 new_user->handler_data = handler_data;
676 new_user->intf = intf; 749 new_user->intf = intf;
@@ -678,98 +751,92 @@ int ipmi_create_user(unsigned int if_num,
678 751
679 if (!try_module_get(intf->handlers->owner)) { 752 if (!try_module_get(intf->handlers->owner)) {
680 rv = -ENODEV; 753 rv = -ENODEV;
681 goto out_unlock; 754 goto out_err;
682 } 755 }
683 756
684 if (intf->handlers->inc_usecount) { 757 if (intf->handlers->inc_usecount) {
685 rv = intf->handlers->inc_usecount(intf->send_info); 758 rv = intf->handlers->inc_usecount(intf->send_info);
686 if (rv) { 759 if (rv) {
687 module_put(intf->handlers->owner); 760 module_put(intf->handlers->owner);
688 goto out_unlock; 761 goto out_err;
689 } 762 }
690 } 763 }
691 764
692 write_lock_irqsave(&intf->users_lock, flags); 765 new_user->valid = 1;
693 list_add_tail(&new_user->link, &intf->users); 766 spin_lock_irqsave(&intf->seq_lock, flags);
694 write_unlock_irqrestore(&intf->users_lock, flags); 767 list_add_rcu(&new_user->link, &intf->users);
695 768 spin_unlock_irqrestore(&intf->seq_lock, flags);
696 out_unlock: 769 *user = new_user;
697 if (rv) { 770 return 0;
698 kfree(new_user);
699 } else {
700 *user = new_user;
701 }
702 771
703 up_read(&interfaces_sem); 772 out_err:
773 kfree(new_user);
774 kref_put(&intf->refcount, intf_free);
704 return rv; 775 return rv;
705} 776}
706 777
707static int ipmi_destroy_user_nolock(ipmi_user_t user) 778static void free_user(struct kref *ref)
779{
780 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
781 kfree(user);
782}
783
784int ipmi_destroy_user(ipmi_user_t user)
708{ 785{
709 int rv = -ENODEV; 786 int rv = -ENODEV;
710 ipmi_user_t t_user; 787 ipmi_smi_t intf = user->intf;
711 struct cmd_rcvr *rcvr, *rcvr2;
712 int i; 788 int i;
713 unsigned long flags; 789 unsigned long flags;
790 struct cmd_rcvr *rcvr;
791 struct list_head *entry1, *entry2;
792 struct cmd_rcvr *rcvrs = NULL;
714 793
715 /* Find the user and delete them from the list. */ 794 user->valid = 1;
716 list_for_each_entry(t_user, &(user->intf->users), link) {
717 if (t_user == user) {
718 list_del(&t_user->link);
719 rv = 0;
720 break;
721 }
722 }
723 795
724 if (rv) { 796 /* Remove the user from the interface's sequence table. */
725 goto out_unlock; 797 spin_lock_irqsave(&intf->seq_lock, flags);
726 } 798 list_del_rcu(&user->link);
727 799
728 /* Remove the user from the interfaces sequence table. */
729 spin_lock_irqsave(&(user->intf->seq_lock), flags);
730 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 800 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
731 if (user->intf->seq_table[i].inuse 801 if (intf->seq_table[i].inuse
732 && (user->intf->seq_table[i].recv_msg->user == user)) 802 && (intf->seq_table[i].recv_msg->user == user))
733 { 803 {
734 user->intf->seq_table[i].inuse = 0; 804 intf->seq_table[i].inuse = 0;
735 } 805 }
736 } 806 }
737 spin_unlock_irqrestore(&(user->intf->seq_lock), flags); 807 spin_unlock_irqrestore(&intf->seq_lock, flags);
738 808
739 /* Remove the user from the command receiver's table. */ 809 /*
740 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags); 810 * Remove the user from the command receiver's table. First
741 list_for_each_entry_safe(rcvr, rcvr2, &(user->intf->cmd_rcvrs), link) { 811 * we build a list of everything (not using the standard link,
812 * since other things may be using it till we do
813 * synchronize_rcu()) then free everything in that list.
814 */
815 spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
816 list_for_each_safe_rcu(entry1, entry2, &intf->cmd_rcvrs) {
817 rcvr = list_entry(entry1, struct cmd_rcvr, link);
742 if (rcvr->user == user) { 818 if (rcvr->user == user) {
743 list_del(&rcvr->link); 819 list_del_rcu(&rcvr->link);
744 kfree(rcvr); 820 rcvr->next = rcvrs;
821 rcvrs = rcvr;
745 } 822 }
746 } 823 }
747 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); 824 spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
825 synchronize_rcu();
826 while (rcvrs) {
827 rcvr = rcvrs;
828 rcvrs = rcvr->next;
829 kfree(rcvr);
830 }
748 831
749 kfree(user); 832 module_put(intf->handlers->owner);
833 if (intf->handlers->dec_usecount)
834 intf->handlers->dec_usecount(intf->send_info);
750 835
751 out_unlock: 836 kref_put(&intf->refcount, intf_free);
752 837
753 return rv; 838 kref_put(&user->refcount, free_user);
754}
755
756int ipmi_destroy_user(ipmi_user_t user)
757{
758 int rv;
759 ipmi_smi_t intf = user->intf;
760 unsigned long flags;
761 839
762 down_read(&interfaces_sem);
763 write_lock_irqsave(&intf->users_lock, flags);
764 rv = ipmi_destroy_user_nolock(user);
765 if (!rv) {
766 module_put(intf->handlers->owner);
767 if (intf->handlers->dec_usecount)
768 intf->handlers->dec_usecount(intf->send_info);
769 }
770
771 write_unlock_irqrestore(&intf->users_lock, flags);
772 up_read(&interfaces_sem);
773 return rv; 840 return rv;
774} 841}
775 842
@@ -823,62 +890,78 @@ int ipmi_get_my_LUN(ipmi_user_t user,
823 890
824int ipmi_set_gets_events(ipmi_user_t user, int val) 891int ipmi_set_gets_events(ipmi_user_t user, int val)
825{ 892{
826 unsigned long flags; 893 unsigned long flags;
827 struct ipmi_recv_msg *msg, *msg2; 894 ipmi_smi_t intf = user->intf;
895 struct ipmi_recv_msg *msg, *msg2;
896 struct list_head msgs;
828 897
829 read_lock(&(user->intf->users_lock)); 898 INIT_LIST_HEAD(&msgs);
830 spin_lock_irqsave(&(user->intf->events_lock), flags); 899
900 spin_lock_irqsave(&intf->events_lock, flags);
831 user->gets_events = val; 901 user->gets_events = val;
832 902
833 if (val) { 903 if (val) {
834 /* Deliver any queued events. */ 904 /* Deliver any queued events. */
835 list_for_each_entry_safe(msg, msg2, &(user->intf->waiting_events), link) { 905 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) {
836 list_del(&msg->link); 906 list_del(&msg->link);
837 msg->user = user; 907 list_add_tail(&msg->link, &msgs);
838 deliver_response(msg);
839 } 908 }
840 } 909 }
841 910
842 spin_unlock_irqrestore(&(user->intf->events_lock), flags); 911 /* Hold the events lock while doing this to preserve order. */
843 read_unlock(&(user->intf->users_lock)); 912 list_for_each_entry_safe(msg, msg2, &msgs, link) {
913 msg->user = user;
914 kref_get(&user->refcount);
915 deliver_response(msg);
916 }
917
918 spin_unlock_irqrestore(&intf->events_lock, flags);
844 919
845 return 0; 920 return 0;
846} 921}
847 922
923static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
924 unsigned char netfn,
925 unsigned char cmd)
926{
927 struct cmd_rcvr *rcvr;
928
929 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
930 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd))
931 return rcvr;
932 }
933 return NULL;
934}
935
848int ipmi_register_for_cmd(ipmi_user_t user, 936int ipmi_register_for_cmd(ipmi_user_t user,
849 unsigned char netfn, 937 unsigned char netfn,
850 unsigned char cmd) 938 unsigned char cmd)
851{ 939{
852 struct cmd_rcvr *cmp; 940 ipmi_smi_t intf = user->intf;
853 unsigned long flags; 941 struct cmd_rcvr *rcvr;
854 struct cmd_rcvr *rcvr; 942 struct cmd_rcvr *entry;
855 int rv = 0; 943 int rv = 0;
856 944
857 945
858 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL); 946 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
859 if (! rcvr) 947 if (! rcvr)
860 return -ENOMEM; 948 return -ENOMEM;
949 rcvr->cmd = cmd;
950 rcvr->netfn = netfn;
951 rcvr->user = user;
861 952
862 read_lock(&(user->intf->users_lock)); 953 spin_lock_irq(&intf->cmd_rcvrs_lock);
863 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
864 /* Make sure the command/netfn is not already registered. */ 954 /* Make sure the command/netfn is not already registered. */
865 list_for_each_entry(cmp, &(user->intf->cmd_rcvrs), link) { 955 entry = find_cmd_rcvr(intf, netfn, cmd);
866 if ((cmp->netfn == netfn) && (cmp->cmd == cmd)) { 956 if (entry) {
867 rv = -EBUSY; 957 rv = -EBUSY;
868 break; 958 goto out_unlock;
869 }
870 }
871
872 if (! rv) {
873 rcvr->cmd = cmd;
874 rcvr->netfn = netfn;
875 rcvr->user = user;
876 list_add_tail(&(rcvr->link), &(user->intf->cmd_rcvrs));
877 } 959 }
878 960
879 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags); 961 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
880 read_unlock(&(user->intf->users_lock));
881 962
963 out_unlock:
964 spin_unlock_irq(&intf->cmd_rcvrs_lock);
882 if (rv) 965 if (rv)
883 kfree(rcvr); 966 kfree(rcvr);
884 967
@@ -889,31 +972,28 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
889 unsigned char netfn, 972 unsigned char netfn,
890 unsigned char cmd) 973 unsigned char cmd)
891{ 974{
892 unsigned long flags; 975 ipmi_smi_t intf = user->intf;
893 struct cmd_rcvr *rcvr; 976 struct cmd_rcvr *rcvr;
894 int rv = -ENOENT;
895 977
896 read_lock(&(user->intf->users_lock)); 978 spin_lock_irq(&intf->cmd_rcvrs_lock);
897 write_lock_irqsave(&(user->intf->cmd_rcvr_lock), flags);
898 /* Make sure the command/netfn is not already registered. */ 979 /* Make sure the command/netfn is not already registered. */
899 list_for_each_entry(rcvr, &(user->intf->cmd_rcvrs), link) { 980 rcvr = find_cmd_rcvr(intf, netfn, cmd);
900 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { 981 if ((rcvr) && (rcvr->user == user)) {
901 rv = 0; 982 list_del_rcu(&rcvr->link);
902 list_del(&rcvr->link); 983 spin_unlock_irq(&intf->cmd_rcvrs_lock);
903 kfree(rcvr); 984 synchronize_rcu();
904 break; 985 kfree(rcvr);
905 } 986 return 0;
987 } else {
988 spin_unlock_irq(&intf->cmd_rcvrs_lock);
989 return -ENOENT;
906 } 990 }
907 write_unlock_irqrestore(&(user->intf->cmd_rcvr_lock), flags);
908 read_unlock(&(user->intf->users_lock));
909
910 return rv;
911} 991}
912 992
913void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) 993void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
914{ 994{
915 user->intf->handlers->set_run_to_completion(user->intf->send_info, 995 ipmi_smi_t intf = user->intf;
916 val); 996 intf->handlers->set_run_to_completion(intf->send_info, val);
917} 997}
918 998
919static unsigned char 999static unsigned char
@@ -1010,19 +1090,19 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1010 supplied in certain circumstances (mainly at panic time). If 1090 supplied in certain circumstances (mainly at panic time). If
1011 messages are supplied, they will be freed, even if an error 1091 messages are supplied, they will be freed, even if an error
1012 occurs. */ 1092 occurs. */
1013static inline int i_ipmi_request(ipmi_user_t user, 1093static int i_ipmi_request(ipmi_user_t user,
1014 ipmi_smi_t intf, 1094 ipmi_smi_t intf,
1015 struct ipmi_addr *addr, 1095 struct ipmi_addr *addr,
1016 long msgid, 1096 long msgid,
1017 struct kernel_ipmi_msg *msg, 1097 struct kernel_ipmi_msg *msg,
1018 void *user_msg_data, 1098 void *user_msg_data,
1019 void *supplied_smi, 1099 void *supplied_smi,
1020 struct ipmi_recv_msg *supplied_recv, 1100 struct ipmi_recv_msg *supplied_recv,
1021 int priority, 1101 int priority,
1022 unsigned char source_address, 1102 unsigned char source_address,
1023 unsigned char source_lun, 1103 unsigned char source_lun,
1024 int retries, 1104 int retries,
1025 unsigned int retry_time_ms) 1105 unsigned int retry_time_ms)
1026{ 1106{
1027 int rv = 0; 1107 int rv = 0;
1028 struct ipmi_smi_msg *smi_msg; 1108 struct ipmi_smi_msg *smi_msg;
@@ -1051,6 +1131,8 @@ static inline int i_ipmi_request(ipmi_user_t user,
1051 } 1131 }
1052 1132
1053 recv_msg->user = user; 1133 recv_msg->user = user;
1134 if (user)
1135 kref_get(&user->refcount);
1054 recv_msg->msgid = msgid; 1136 recv_msg->msgid = msgid;
1055 /* Store the message to send in the receive message so timeout 1137 /* Store the message to send in the receive message so timeout
1056 responses can get the proper response data. */ 1138 responses can get the proper response data. */
@@ -1725,11 +1807,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1725 unsigned char version_major, 1807 unsigned char version_major,
1726 unsigned char version_minor, 1808 unsigned char version_minor,
1727 unsigned char slave_addr, 1809 unsigned char slave_addr,
1728 ipmi_smi_t *intf) 1810 ipmi_smi_t *new_intf)
1729{ 1811{
1730 int i, j; 1812 int i, j;
1731 int rv; 1813 int rv;
1732 ipmi_smi_t new_intf; 1814 ipmi_smi_t intf;
1733 unsigned long flags; 1815 unsigned long flags;
1734 1816
1735 1817
@@ -1745,189 +1827,142 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
1745 return -ENODEV; 1827 return -ENODEV;
1746 } 1828 }
1747 1829
1748 new_intf = kmalloc(sizeof(*new_intf), GFP_KERNEL); 1830 intf = kmalloc(sizeof(*intf), GFP_KERNEL);
1749 if (!new_intf) 1831 if (!intf)
1750 return -ENOMEM; 1832 return -ENOMEM;
1751 memset(new_intf, 0, sizeof(*new_intf)); 1833 memset(intf, 0, sizeof(*intf));
1752 1834 intf->intf_num = -1;
1753 new_intf->proc_dir = NULL; 1835 kref_init(&intf->refcount);
1836 intf->version_major = version_major;
1837 intf->version_minor = version_minor;
1838 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1839 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
1840 intf->channels[j].lun = 2;
1841 }
1842 if (slave_addr != 0)
1843 intf->channels[0].address = slave_addr;
1844 INIT_LIST_HEAD(&intf->users);
1845 intf->handlers = handlers;
1846 intf->send_info = send_info;
1847 spin_lock_init(&intf->seq_lock);
1848 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1849 intf->seq_table[j].inuse = 0;
1850 intf->seq_table[j].seqid = 0;
1851 }
1852 intf->curr_seq = 0;
1853#ifdef CONFIG_PROC_FS
1854 spin_lock_init(&intf->proc_entry_lock);
1855#endif
1856 spin_lock_init(&intf->waiting_msgs_lock);
1857 INIT_LIST_HEAD(&intf->waiting_msgs);
1858 spin_lock_init(&intf->events_lock);
1859 INIT_LIST_HEAD(&intf->waiting_events);
1860 intf->waiting_events_count = 0;
1861 spin_lock_init(&intf->cmd_rcvrs_lock);
1862 INIT_LIST_HEAD(&intf->cmd_rcvrs);
1863 init_waitqueue_head(&intf->waitq);
1864
1865 spin_lock_init(&intf->counter_lock);
1866 intf->proc_dir = NULL;
1754 1867
1755 rv = -ENOMEM; 1868 rv = -ENOMEM;
1756 1869 spin_lock_irqsave(&interfaces_lock, flags);
1757 down_write(&interfaces_sem);
1758 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 1870 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1759 if (ipmi_interfaces[i] == NULL) { 1871 if (ipmi_interfaces[i] == NULL) {
1760 new_intf->intf_num = i; 1872 intf->intf_num = i;
1761 new_intf->version_major = version_major; 1873 /* Reserve the entry till we are done. */
1762 new_intf->version_minor = version_minor; 1874 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1763 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
1764 new_intf->channels[j].address
1765 = IPMI_BMC_SLAVE_ADDR;
1766 new_intf->channels[j].lun = 2;
1767 }
1768 if (slave_addr != 0)
1769 new_intf->channels[0].address = slave_addr;
1770 rwlock_init(&(new_intf->users_lock));
1771 INIT_LIST_HEAD(&(new_intf->users));
1772 new_intf->handlers = handlers;
1773 new_intf->send_info = send_info;
1774 spin_lock_init(&(new_intf->seq_lock));
1775 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
1776 new_intf->seq_table[j].inuse = 0;
1777 new_intf->seq_table[j].seqid = 0;
1778 }
1779 new_intf->curr_seq = 0;
1780#ifdef CONFIG_PROC_FS
1781 spin_lock_init(&(new_intf->proc_entry_lock));
1782#endif
1783 spin_lock_init(&(new_intf->waiting_msgs_lock));
1784 INIT_LIST_HEAD(&(new_intf->waiting_msgs));
1785 spin_lock_init(&(new_intf->events_lock));
1786 INIT_LIST_HEAD(&(new_intf->waiting_events));
1787 new_intf->waiting_events_count = 0;
1788 rwlock_init(&(new_intf->cmd_rcvr_lock));
1789 init_waitqueue_head(&new_intf->waitq);
1790 INIT_LIST_HEAD(&(new_intf->cmd_rcvrs));
1791
1792 spin_lock_init(&(new_intf->counter_lock));
1793
1794 spin_lock_irqsave(&interfaces_lock, flags);
1795 ipmi_interfaces[i] = new_intf;
1796 spin_unlock_irqrestore(&interfaces_lock, flags);
1797
1798 rv = 0; 1875 rv = 0;
1799 *intf = new_intf;
1800 break; 1876 break;
1801 } 1877 }
1802 } 1878 }
1879 spin_unlock_irqrestore(&interfaces_lock, flags);
1880 if (rv)
1881 goto out;
1803 1882
1804 downgrade_write(&interfaces_sem); 1883 /* FIXME - this is an ugly kludge, this sets the intf for the
1805 1884 caller before sending any messages with it. */
1806 if (rv == 0) 1885 *new_intf = intf;
1807 rv = add_proc_entries(*intf, i);
1808
1809 if (rv == 0) {
1810 if ((version_major > 1)
1811 || ((version_major == 1) && (version_minor >= 5)))
1812 {
1813 /* Start scanning the channels to see what is
1814 available. */
1815 (*intf)->null_user_handler = channel_handler;
1816 (*intf)->curr_channel = 0;
1817 rv = send_channel_info_cmd(*intf, 0);
1818 if (rv)
1819 goto out;
1820 1886
1821 /* Wait for the channel info to be read. */ 1887 if ((version_major > 1)
1822 up_read(&interfaces_sem); 1888 || ((version_major == 1) && (version_minor >= 5)))
1823 wait_event((*intf)->waitq, 1889 {
1824 ((*intf)->curr_channel>=IPMI_MAX_CHANNELS)); 1890 /* Start scanning the channels to see what is
1825 down_read(&interfaces_sem); 1891 available. */
1892 intf->null_user_handler = channel_handler;
1893 intf->curr_channel = 0;
1894 rv = send_channel_info_cmd(intf, 0);
1895 if (rv)
1896 goto out;
1826 1897
1827 if (ipmi_interfaces[i] != new_intf) 1898 /* Wait for the channel info to be read. */
1828 /* Well, it went away. Just return. */ 1899 wait_event(intf->waitq,
1829 goto out; 1900 intf->curr_channel >= IPMI_MAX_CHANNELS);
1830 } else { 1901 } else {
1831 /* Assume a single IPMB channel at zero. */ 1902 /* Assume a single IPMB channel at zero. */
1832 (*intf)->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; 1903 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
1833 (*intf)->channels[0].protocol 1904 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
1834 = IPMI_CHANNEL_PROTOCOL_IPMB;
1835 }
1836
1837 /* Call all the watcher interfaces to tell
1838 them that a new interface is available. */
1839 call_smi_watchers(i);
1840 } 1905 }
1841 1906
1842 out: 1907 if (rv == 0)
1843 up_read(&interfaces_sem); 1908 rv = add_proc_entries(intf, i);
1844 1909
1910 out:
1845 if (rv) { 1911 if (rv) {
1846 if (new_intf->proc_dir) 1912 if (intf->proc_dir)
1847 remove_proc_entries(new_intf); 1913 remove_proc_entries(intf);
1848 kfree(new_intf); 1914 kref_put(&intf->refcount, intf_free);
1915 if (i < MAX_IPMI_INTERFACES) {
1916 spin_lock_irqsave(&interfaces_lock, flags);
1917 ipmi_interfaces[i] = NULL;
1918 spin_unlock_irqrestore(&interfaces_lock, flags);
1919 }
1920 } else {
1921 spin_lock_irqsave(&interfaces_lock, flags);
1922 ipmi_interfaces[i] = intf;
1923 spin_unlock_irqrestore(&interfaces_lock, flags);
1924 call_smi_watchers(i);
1849 } 1925 }
1850 1926
1851 return rv; 1927 return rv;
1852} 1928}
1853 1929
1854static void free_recv_msg_list(struct list_head *q)
1855{
1856 struct ipmi_recv_msg *msg, *msg2;
1857
1858 list_for_each_entry_safe(msg, msg2, q, link) {
1859 list_del(&msg->link);
1860 ipmi_free_recv_msg(msg);
1861 }
1862}
1863
1864static void free_cmd_rcvr_list(struct list_head *q)
1865{
1866 struct cmd_rcvr *rcvr, *rcvr2;
1867
1868 list_for_each_entry_safe(rcvr, rcvr2, q, link) {
1869 list_del(&rcvr->link);
1870 kfree(rcvr);
1871 }
1872}
1873
1874static void clean_up_interface_data(ipmi_smi_t intf)
1875{
1876 int i;
1877
1878 free_recv_msg_list(&(intf->waiting_msgs));
1879 free_recv_msg_list(&(intf->waiting_events));
1880 free_cmd_rcvr_list(&(intf->cmd_rcvrs));
1881
1882 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1883 if ((intf->seq_table[i].inuse)
1884 && (intf->seq_table[i].recv_msg))
1885 {
1886 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1887 }
1888 }
1889}
1890
1891int ipmi_unregister_smi(ipmi_smi_t intf) 1930int ipmi_unregister_smi(ipmi_smi_t intf)
1892{ 1931{
1893 int rv = -ENODEV;
1894 int i; 1932 int i;
1895 struct ipmi_smi_watcher *w; 1933 struct ipmi_smi_watcher *w;
1896 unsigned long flags; 1934 unsigned long flags;
1897 1935
1898 down_write(&interfaces_sem); 1936 spin_lock_irqsave(&interfaces_lock, flags);
1899 if (list_empty(&(intf->users))) 1937 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
1900 { 1938 if (ipmi_interfaces[i] == intf) {
1901 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 1939 /* Set the interface number reserved until we
1902 if (ipmi_interfaces[i] == intf) { 1940 * are done. */
1903 remove_proc_entries(intf); 1941 ipmi_interfaces[i] = IPMI_INVALID_INTERFACE_ENTRY;
1904 spin_lock_irqsave(&interfaces_lock, flags); 1942 intf->intf_num = -1;
1905 ipmi_interfaces[i] = NULL; 1943 break;
1906 clean_up_interface_data(intf);
1907 spin_unlock_irqrestore(&interfaces_lock,flags);
1908 kfree(intf);
1909 rv = 0;
1910 goto out_call_watcher;
1911 }
1912 } 1944 }
1913 } else {
1914 rv = -EBUSY;
1915 } 1945 }
1916 up_write(&interfaces_sem); 1946 spin_unlock_irqrestore(&interfaces_lock,flags);
1917 1947
1918 return rv; 1948 if (i == MAX_IPMI_INTERFACES)
1949 return -ENODEV;
1919 1950
1920 out_call_watcher: 1951 remove_proc_entries(intf);
1921 downgrade_write(&interfaces_sem);
1922 1952
1923 /* Call all the watcher interfaces to tell them that 1953 /* Call all the watcher interfaces to tell them that
1924 an interface is gone. */ 1954 an interface is gone. */
1925 down_read(&smi_watchers_sem); 1955 down_read(&smi_watchers_sem);
1926 list_for_each_entry(w, &smi_watchers, link) { 1956 list_for_each_entry(w, &smi_watchers, link)
1927 w->smi_gone(i); 1957 w->smi_gone(i);
1928 }
1929 up_read(&smi_watchers_sem); 1958 up_read(&smi_watchers_sem);
1930 up_read(&interfaces_sem); 1959
1960 /* Allow the entry to be reused now. */
1961 spin_lock_irqsave(&interfaces_lock, flags);
1962 ipmi_interfaces[i] = NULL;
1963 spin_unlock_irqrestore(&interfaces_lock,flags);
1964
1965 kref_put(&intf->refcount, intf_free);
1931 return 0; 1966 return 0;
1932} 1967}
1933 1968
@@ -1998,14 +2033,14 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
1998static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, 2033static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
1999 struct ipmi_smi_msg *msg) 2034 struct ipmi_smi_msg *msg)
2000{ 2035{
2001 struct cmd_rcvr *rcvr; 2036 struct cmd_rcvr *rcvr;
2002 int rv = 0; 2037 int rv = 0;
2003 unsigned char netfn; 2038 unsigned char netfn;
2004 unsigned char cmd; 2039 unsigned char cmd;
2005 ipmi_user_t user = NULL; 2040 ipmi_user_t user = NULL;
2006 struct ipmi_ipmb_addr *ipmb_addr; 2041 struct ipmi_ipmb_addr *ipmb_addr;
2007 struct ipmi_recv_msg *recv_msg; 2042 struct ipmi_recv_msg *recv_msg;
2008 unsigned long flags; 2043 unsigned long flags;
2009 2044
2010 if (msg->rsp_size < 10) { 2045 if (msg->rsp_size < 10) {
2011 /* Message not big enough, just ignore it. */ 2046 /* Message not big enough, just ignore it. */
@@ -2023,16 +2058,14 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2023 netfn = msg->rsp[4] >> 2; 2058 netfn = msg->rsp[4] >> 2;
2024 cmd = msg->rsp[8]; 2059 cmd = msg->rsp[8];
2025 2060
2026 read_lock(&(intf->cmd_rcvr_lock)); 2061 spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
2027 2062 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2028 /* Find the command/netfn. */ 2063 if (rcvr) {
2029 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2064 user = rcvr->user;
2030 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { 2065 kref_get(&user->refcount);
2031 user = rcvr->user; 2066 } else
2032 break; 2067 user = NULL;
2033 } 2068 spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
2034 }
2035 read_unlock(&(intf->cmd_rcvr_lock));
2036 2069
2037 if (user == NULL) { 2070 if (user == NULL) {
2038 /* We didn't find a user, deliver an error response. */ 2071 /* We didn't find a user, deliver an error response. */
@@ -2079,6 +2112,7 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2079 message, so requeue it for handling 2112 message, so requeue it for handling
2080 later. */ 2113 later. */
2081 rv = 1; 2114 rv = 1;
2115 kref_put(&user->refcount, free_user);
2082 } else { 2116 } else {
2083 /* Extract the source address from the data. */ 2117 /* Extract the source address from the data. */
2084 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; 2118 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
@@ -2179,14 +2213,14 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
2179static int handle_lan_get_msg_cmd(ipmi_smi_t intf, 2213static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2180 struct ipmi_smi_msg *msg) 2214 struct ipmi_smi_msg *msg)
2181{ 2215{
2182 struct cmd_rcvr *rcvr; 2216 struct cmd_rcvr *rcvr;
2183 int rv = 0; 2217 int rv = 0;
2184 unsigned char netfn; 2218 unsigned char netfn;
2185 unsigned char cmd; 2219 unsigned char cmd;
2186 ipmi_user_t user = NULL; 2220 ipmi_user_t user = NULL;
2187 struct ipmi_lan_addr *lan_addr; 2221 struct ipmi_lan_addr *lan_addr;
2188 struct ipmi_recv_msg *recv_msg; 2222 struct ipmi_recv_msg *recv_msg;
2189 unsigned long flags; 2223 unsigned long flags;
2190 2224
2191 if (msg->rsp_size < 12) { 2225 if (msg->rsp_size < 12) {
2192 /* Message not big enough, just ignore it. */ 2226 /* Message not big enough, just ignore it. */
@@ -2204,19 +2238,17 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2204 netfn = msg->rsp[6] >> 2; 2238 netfn = msg->rsp[6] >> 2;
2205 cmd = msg->rsp[10]; 2239 cmd = msg->rsp[10];
2206 2240
2207 read_lock(&(intf->cmd_rcvr_lock)); 2241 spin_lock_irqsave(&intf->cmd_rcvrs_lock, flags);
2208 2242 rcvr = find_cmd_rcvr(intf, netfn, cmd);
2209 /* Find the command/netfn. */ 2243 if (rcvr) {
2210 list_for_each_entry(rcvr, &(intf->cmd_rcvrs), link) { 2244 user = rcvr->user;
2211 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)) { 2245 kref_get(&user->refcount);
2212 user = rcvr->user; 2246 } else
2213 break; 2247 user = NULL;
2214 } 2248 spin_unlock_irqrestore(&intf->cmd_rcvrs_lock, flags);
2215 }
2216 read_unlock(&(intf->cmd_rcvr_lock));
2217 2249
2218 if (user == NULL) { 2250 if (user == NULL) {
2219 /* We didn't find a user, deliver an error response. */ 2251 /* We didn't find a user, just give up. */
2220 spin_lock_irqsave(&intf->counter_lock, flags); 2252 spin_lock_irqsave(&intf->counter_lock, flags);
2221 intf->unhandled_commands++; 2253 intf->unhandled_commands++;
2222 spin_unlock_irqrestore(&intf->counter_lock, flags); 2254 spin_unlock_irqrestore(&intf->counter_lock, flags);
@@ -2235,6 +2267,7 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
2235 message, so requeue it for handling 2267 message, so requeue it for handling
2236 later. */ 2268 later. */
2237 rv = 1; 2269 rv = 1;
2270 kref_put(&user->refcount, free_user);
2238 } else { 2271 } else {
2239 /* Extract the source address from the data. */ 2272 /* Extract the source address from the data. */
2240 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; 2273 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
@@ -2286,8 +2319,6 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
2286 recv_msg->msg.data_len = msg->rsp_size - 3; 2319 recv_msg->msg.data_len = msg->rsp_size - 3;
2287} 2320}
2288 2321
2289/* This will be called with the intf->users_lock read-locked, so no need
2290 to do that here. */
2291static int handle_read_event_rsp(ipmi_smi_t intf, 2322static int handle_read_event_rsp(ipmi_smi_t intf,
2292 struct ipmi_smi_msg *msg) 2323 struct ipmi_smi_msg *msg)
2293{ 2324{
@@ -2313,7 +2344,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2313 2344
2314 INIT_LIST_HEAD(&msgs); 2345 INIT_LIST_HEAD(&msgs);
2315 2346
2316 spin_lock_irqsave(&(intf->events_lock), flags); 2347 spin_lock_irqsave(&intf->events_lock, flags);
2317 2348
2318 spin_lock(&intf->counter_lock); 2349 spin_lock(&intf->counter_lock);
2319 intf->events++; 2350 intf->events++;
@@ -2321,12 +2352,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2321 2352
2322 /* Allocate and fill in one message for every user that is getting 2353 /* Allocate and fill in one message for every user that is getting
2323 events. */ 2354 events. */
2324 list_for_each_entry(user, &(intf->users), link) { 2355 rcu_read_lock();
2356 list_for_each_entry_rcu(user, &intf->users, link) {
2325 if (! user->gets_events) 2357 if (! user->gets_events)
2326 continue; 2358 continue;
2327 2359
2328 recv_msg = ipmi_alloc_recv_msg(); 2360 recv_msg = ipmi_alloc_recv_msg();
2329 if (! recv_msg) { 2361 if (! recv_msg) {
2362 rcu_read_unlock();
2330 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { 2363 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
2331 list_del(&recv_msg->link); 2364 list_del(&recv_msg->link);
2332 ipmi_free_recv_msg(recv_msg); 2365 ipmi_free_recv_msg(recv_msg);
@@ -2342,8 +2375,10 @@ static int handle_read_event_rsp(ipmi_smi_t intf,
2342 2375
2343 copy_event_into_recv_msg(recv_msg, msg); 2376 copy_event_into_recv_msg(recv_msg, msg);
2344 recv_msg->user = user; 2377 recv_msg->user = user;
2378 kref_get(&user->refcount);
2345 list_add_tail(&(recv_msg->link), &msgs); 2379 list_add_tail(&(recv_msg->link), &msgs);
2346 } 2380 }
2381 rcu_read_unlock();
2347 2382
2348 if (deliver_count) { 2383 if (deliver_count) {
2349 /* Now deliver all the messages. */ 2384 /* Now deliver all the messages. */
@@ -2382,9 +2417,8 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2382 struct ipmi_smi_msg *msg) 2417 struct ipmi_smi_msg *msg)
2383{ 2418{
2384 struct ipmi_recv_msg *recv_msg; 2419 struct ipmi_recv_msg *recv_msg;
2385 int found = 0;
2386 struct ipmi_user *user;
2387 unsigned long flags; 2420 unsigned long flags;
2421 struct ipmi_user *user;
2388 2422
2389 recv_msg = (struct ipmi_recv_msg *) msg->user_data; 2423 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
2390 if (recv_msg == NULL) 2424 if (recv_msg == NULL)
@@ -2396,16 +2430,9 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
2396 return 0; 2430 return 0;
2397 } 2431 }
2398 2432
2433 user = recv_msg->user;
2399 /* Make sure the user still exists. */ 2434 /* Make sure the user still exists. */
2400 list_for_each_entry(user, &(intf->users), link) { 2435 if (user && !user->valid) {
2401 if (user == recv_msg->user) {
2402 /* Found it, so we can deliver it */
2403 found = 1;
2404 break;
2405 }
2406 }
2407
2408 if ((! found) && recv_msg->user) {
2409 /* The user for the message went away, so give up. */ 2436 /* The user for the message went away, so give up. */
2410 spin_lock_irqsave(&intf->counter_lock, flags); 2437 spin_lock_irqsave(&intf->counter_lock, flags);
2411 intf->unhandled_local_responses++; 2438 intf->unhandled_local_responses++;
@@ -2486,7 +2513,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
2486 { 2513 {
2487 /* It's a response to a response we sent. For this we 2514 /* It's a response to a response we sent. For this we
2488 deliver a send message response to the user. */ 2515 deliver a send message response to the user. */
2489 struct ipmi_recv_msg *recv_msg = msg->user_data; 2516 struct ipmi_recv_msg *recv_msg = msg->user_data;
2490 2517
2491 requeue = 0; 2518 requeue = 0;
2492 if (msg->rsp_size < 2) 2519 if (msg->rsp_size < 2)
@@ -2498,13 +2525,18 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
2498 /* Invalid channel number */ 2525 /* Invalid channel number */
2499 goto out; 2526 goto out;
2500 2527
2501 if (recv_msg) { 2528 if (!recv_msg)
2502 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE; 2529 goto out;
2503 recv_msg->msg.data = recv_msg->msg_data; 2530
2504 recv_msg->msg.data_len = 1; 2531 /* Make sure the user still exists. */
2505 recv_msg->msg_data[0] = msg->rsp[2]; 2532 if (!recv_msg->user || !recv_msg->user->valid)
2506 deliver_response(recv_msg); 2533 goto out;
2507 } 2534
2535 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
2536 recv_msg->msg.data = recv_msg->msg_data;
2537 recv_msg->msg.data_len = 1;
2538 recv_msg->msg_data[0] = msg->rsp[2];
2539 deliver_response(recv_msg);
2508 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) 2540 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
2509 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) 2541 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
2510 { 2542 {
@@ -2570,14 +2602,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2570 int rv; 2602 int rv;
2571 2603
2572 2604
2573 /* Lock the user lock so the user can't go away while we are
2574 working on it. */
2575 read_lock(&(intf->users_lock));
2576
2577 if ((msg->data_size >= 2) 2605 if ((msg->data_size >= 2)
2578 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 2606 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
2579 && (msg->data[1] == IPMI_SEND_MSG_CMD) 2607 && (msg->data[1] == IPMI_SEND_MSG_CMD)
2580 && (msg->user_data == NULL)) { 2608 && (msg->user_data == NULL))
2609 {
2581 /* This is the local response to a command send, start 2610 /* This is the local response to a command send, start
2582 the timer for these. The user_data will not be 2611 the timer for these. The user_data will not be
2583 NULL if this is a response send, and we will let 2612 NULL if this is a response send, and we will let
@@ -2612,46 +2641,46 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
2612 } 2641 }
2613 2642
2614 ipmi_free_smi_msg(msg); 2643 ipmi_free_smi_msg(msg);
2615 goto out_unlock; 2644 goto out;
2616 } 2645 }
2617 2646
2618 /* To preserve message order, if the list is not empty, we 2647 /* To preserve message order, if the list is not empty, we
2619 tack this message onto the end of the list. */ 2648 tack this message onto the end of the list. */
2620 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2649 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2621 if (!list_empty(&(intf->waiting_msgs))) { 2650 if (!list_empty(&intf->waiting_msgs)) {
2622 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2651 list_add_tail(&msg->link, &intf->waiting_msgs);
2623 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2652 spin_unlock(&intf->waiting_msgs_lock);
2624 goto out_unlock; 2653 goto out;
2625 } 2654 }
2626 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2655 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2627 2656
2628 rv = handle_new_recv_msg(intf, msg); 2657 rv = handle_new_recv_msg(intf, msg);
2629 if (rv > 0) { 2658 if (rv > 0) {
2630 /* Could not handle the message now, just add it to a 2659 /* Could not handle the message now, just add it to a
2631 list to handle later. */ 2660 list to handle later. */
2632 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2661 spin_lock(&intf->waiting_msgs_lock);
2633 list_add_tail(&(msg->link), &(intf->waiting_msgs)); 2662 list_add_tail(&msg->link, &intf->waiting_msgs);
2634 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2663 spin_unlock(&intf->waiting_msgs_lock);
2635 } else if (rv == 0) { 2664 } else if (rv == 0) {
2636 ipmi_free_smi_msg(msg); 2665 ipmi_free_smi_msg(msg);
2637 } 2666 }
2638 2667
2639 out_unlock: 2668 out:
2640 read_unlock(&(intf->users_lock)); 2669 return;
2641} 2670}
2642 2671
2643void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 2672void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
2644{ 2673{
2645 ipmi_user_t user; 2674 ipmi_user_t user;
2646 2675
2647 read_lock(&(intf->users_lock)); 2676 rcu_read_lock();
2648 list_for_each_entry(user, &(intf->users), link) { 2677 list_for_each_entry_rcu(user, &intf->users, link) {
2649 if (! user->handler->ipmi_watchdog_pretimeout) 2678 if (! user->handler->ipmi_watchdog_pretimeout)
2650 continue; 2679 continue;
2651 2680
2652 user->handler->ipmi_watchdog_pretimeout(user->handler_data); 2681 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
2653 } 2682 }
2654 read_unlock(&(intf->users_lock)); 2683 rcu_read_unlock();
2655} 2684}
2656 2685
2657static void 2686static void
@@ -2691,8 +2720,65 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
2691 return smi_msg; 2720 return smi_msg;
2692} 2721}
2693 2722
2694static void 2723static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
2695ipmi_timeout_handler(long timeout_period) 2724 struct list_head *timeouts, long timeout_period,
2725 int slot, unsigned long *flags)
2726{
2727 struct ipmi_recv_msg *msg;
2728
2729 if (!ent->inuse)
2730 return;
2731
2732 ent->timeout -= timeout_period;
2733 if (ent->timeout > 0)
2734 return;
2735
2736 if (ent->retries_left == 0) {
2737 /* The message has used all its retries. */
2738 ent->inuse = 0;
2739 msg = ent->recv_msg;
2740 list_add_tail(&msg->link, timeouts);
2741 spin_lock(&intf->counter_lock);
2742 if (ent->broadcast)
2743 intf->timed_out_ipmb_broadcasts++;
2744 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2745 intf->timed_out_lan_commands++;
2746 else
2747 intf->timed_out_ipmb_commands++;
2748 spin_unlock(&intf->counter_lock);
2749 } else {
2750 struct ipmi_smi_msg *smi_msg;
2751 /* More retries, send again. */
2752
2753 /* Start with the max timer, set to normal
2754 timer after the message is sent. */
2755 ent->timeout = MAX_MSG_TIMEOUT;
2756 ent->retries_left--;
2757 spin_lock(&intf->counter_lock);
2758 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
2759 intf->retransmitted_lan_commands++;
2760 else
2761 intf->retransmitted_ipmb_commands++;
2762 spin_unlock(&intf->counter_lock);
2763
2764 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
2765 ent->seqid);
2766 if (! smi_msg)
2767 return;
2768
2769 spin_unlock_irqrestore(&intf->seq_lock, *flags);
2770 /* Send the new message. We send with a zero
2771 * priority. It timed out, I doubt time is
2772 * that critical now, and high priority
2773 * messages are really only for messages to the
2774 * local MC, which don't get resent. */
2775 intf->handlers->sender(intf->send_info,
2776 smi_msg, 0);
2777 spin_lock_irqsave(&intf->seq_lock, *flags);
2778 }
2779}
2780
2781static void ipmi_timeout_handler(long timeout_period)
2696{ 2782{
2697 ipmi_smi_t intf; 2783 ipmi_smi_t intf;
2698 struct list_head timeouts; 2784 struct list_head timeouts;
@@ -2706,14 +2792,14 @@ ipmi_timeout_handler(long timeout_period)
2706 spin_lock(&interfaces_lock); 2792 spin_lock(&interfaces_lock);
2707 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2793 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2708 intf = ipmi_interfaces[i]; 2794 intf = ipmi_interfaces[i];
2709 if (intf == NULL) 2795 if (IPMI_INVALID_INTERFACE(intf))
2710 continue; 2796 continue;
2711 2797 kref_get(&intf->refcount);
2712 read_lock(&(intf->users_lock)); 2798 spin_unlock(&interfaces_lock);
2713 2799
2714 /* See if any waiting messages need to be processed. */ 2800 /* See if any waiting messages need to be processed. */
2715 spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); 2801 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
2716 list_for_each_entry_safe(smi_msg, smi_msg2, &(intf->waiting_msgs), link) { 2802 list_for_each_entry_safe(smi_msg, smi_msg2, &intf->waiting_msgs, link) {
2717 if (! handle_new_recv_msg(intf, smi_msg)) { 2803 if (! handle_new_recv_msg(intf, smi_msg)) {
2718 list_del(&smi_msg->link); 2804 list_del(&smi_msg->link);
2719 ipmi_free_smi_msg(smi_msg); 2805 ipmi_free_smi_msg(smi_msg);
@@ -2723,73 +2809,23 @@ ipmi_timeout_handler(long timeout_period)
2723 break; 2809 break;
2724 } 2810 }
2725 } 2811 }
2726 spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); 2812 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
2727 2813
2728 /* Go through the seq table and find any messages that 2814 /* Go through the seq table and find any messages that
2729 have timed out, putting them in the timeouts 2815 have timed out, putting them in the timeouts
2730 list. */ 2816 list. */
2731 spin_lock_irqsave(&(intf->seq_lock), flags); 2817 spin_lock_irqsave(&intf->seq_lock, flags);
2732 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { 2818 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++)
2733 struct seq_table *ent = &(intf->seq_table[j]); 2819 check_msg_timeout(intf, &(intf->seq_table[j]),
2734 if (!ent->inuse) 2820 &timeouts, timeout_period, j,
2735 continue; 2821 &flags);
2736 2822 spin_unlock_irqrestore(&intf->seq_lock, flags);
2737 ent->timeout -= timeout_period; 2823
2738 if (ent->timeout > 0) 2824 list_for_each_entry_safe(msg, msg2, &timeouts, link)
2739 continue;
2740
2741 if (ent->retries_left == 0) {
2742 /* The message has used all its retries. */
2743 ent->inuse = 0;
2744 msg = ent->recv_msg;
2745 list_add_tail(&(msg->link), &timeouts);
2746 spin_lock(&intf->counter_lock);
2747 if (ent->broadcast)
2748 intf->timed_out_ipmb_broadcasts++;
2749 else if (ent->recv_msg->addr.addr_type
2750 == IPMI_LAN_ADDR_TYPE)
2751 intf->timed_out_lan_commands++;
2752 else
2753 intf->timed_out_ipmb_commands++;
2754 spin_unlock(&intf->counter_lock);
2755 } else {
2756 struct ipmi_smi_msg *smi_msg;
2757 /* More retries, send again. */
2758
2759 /* Start with the max timer, set to normal
2760 timer after the message is sent. */
2761 ent->timeout = MAX_MSG_TIMEOUT;
2762 ent->retries_left--;
2763 spin_lock(&intf->counter_lock);
2764 if (ent->recv_msg->addr.addr_type
2765 == IPMI_LAN_ADDR_TYPE)
2766 intf->retransmitted_lan_commands++;
2767 else
2768 intf->retransmitted_ipmb_commands++;
2769 spin_unlock(&intf->counter_lock);
2770 smi_msg = smi_from_recv_msg(intf,
2771 ent->recv_msg, j, ent->seqid);
2772 if (! smi_msg)
2773 continue;
2774
2775 spin_unlock_irqrestore(&(intf->seq_lock),flags);
2776 /* Send the new message. We send with a zero
2777 * priority. It timed out, I doubt time is
2778 * that critical now, and high priority
2779 * messages are really only for messages to the
2780 * local MC, which don't get resent. */
2781 intf->handlers->sender(intf->send_info,
2782 smi_msg, 0);
2783 spin_lock_irqsave(&(intf->seq_lock), flags);
2784 }
2785 }
2786 spin_unlock_irqrestore(&(intf->seq_lock), flags);
2787
2788 list_for_each_entry_safe(msg, msg2, &timeouts, link) {
2789 handle_msg_timeout(msg); 2825 handle_msg_timeout(msg);
2790 }
2791 2826
2792 read_unlock(&(intf->users_lock)); 2827 kref_put(&intf->refcount, intf_free);
2828 spin_lock(&interfaces_lock);
2793 } 2829 }
2794 spin_unlock(&interfaces_lock); 2830 spin_unlock(&interfaces_lock);
2795} 2831}
@@ -2802,7 +2838,7 @@ static void ipmi_request_event(void)
2802 spin_lock(&interfaces_lock); 2838 spin_lock(&interfaces_lock);
2803 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 2839 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2804 intf = ipmi_interfaces[i]; 2840 intf = ipmi_interfaces[i];
2805 if (intf == NULL) 2841 if (IPMI_INVALID_INTERFACE(intf))
2806 continue; 2842 continue;
2807 2843
2808 intf->handlers->request_events(intf->send_info); 2844 intf->handlers->request_events(intf->send_info);
@@ -2884,6 +2920,13 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
2884 return rv; 2920 return rv;
2885} 2921}
2886 2922
2923void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
2924{
2925 if (msg->user)
2926 kref_put(&msg->user->refcount, free_user);
2927 msg->done(msg);
2928}
2929
2887#ifdef CONFIG_IPMI_PANIC_EVENT 2930#ifdef CONFIG_IPMI_PANIC_EVENT
2888 2931
2889static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) 2932static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
@@ -2964,7 +3007,7 @@ static void send_panic_events(char *str)
2964 /* For every registered interface, send the event. */ 3007 /* For every registered interface, send the event. */
2965 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3008 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
2966 intf = ipmi_interfaces[i]; 3009 intf = ipmi_interfaces[i];
2967 if (intf == NULL) 3010 if (IPMI_INVALID_INTERFACE(intf))
2968 continue; 3011 continue;
2969 3012
2970 /* Send the event announcing the panic. */ 3013 /* Send the event announcing the panic. */
@@ -2995,7 +3038,7 @@ static void send_panic_events(char *str)
2995 int j; 3038 int j;
2996 3039
2997 intf = ipmi_interfaces[i]; 3040 intf = ipmi_interfaces[i];
2998 if (intf == NULL) 3041 if (IPMI_INVALID_INTERFACE(intf))
2999 continue; 3042 continue;
3000 3043
3001 /* First job here is to figure out where to send the 3044 /* First job here is to figure out where to send the
@@ -3131,7 +3174,7 @@ static int panic_event(struct notifier_block *this,
3131 /* For every registered interface, set it to run to completion. */ 3174 /* For every registered interface, set it to run to completion. */
3132 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3175 for (i = 0; i < MAX_IPMI_INTERFACES; i++) {
3133 intf = ipmi_interfaces[i]; 3176 intf = ipmi_interfaces[i];
3134 if (intf == NULL) 3177 if (IPMI_INVALID_INTERFACE(intf))
3135 continue; 3178 continue;
3136 3179
3137 intf->handlers->set_run_to_completion(intf->send_info, 1); 3180 intf->handlers->set_run_to_completion(intf->send_info, 1);
@@ -3160,9 +3203,8 @@ static int ipmi_init_msghandler(void)
3160 printk(KERN_INFO "ipmi message handler version " 3203 printk(KERN_INFO "ipmi message handler version "
3161 IPMI_DRIVER_VERSION "\n"); 3204 IPMI_DRIVER_VERSION "\n");
3162 3205
3163 for (i = 0; i < MAX_IPMI_INTERFACES; i++) { 3206 for (i = 0; i < MAX_IPMI_INTERFACES; i++)
3164 ipmi_interfaces[i] = NULL; 3207 ipmi_interfaces[i] = NULL;
3165 }
3166 3208
3167#ifdef CONFIG_PROC_FS 3209#ifdef CONFIG_PROC_FS
3168 proc_ipmi_root = proc_mkdir("ipmi", NULL); 3210 proc_ipmi_root = proc_mkdir("ipmi", NULL);
@@ -3258,3 +3300,4 @@ EXPORT_SYMBOL(ipmi_get_my_LUN);
3258EXPORT_SYMBOL(ipmi_smi_add_proc_entry); 3300EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
3259EXPORT_SYMBOL(proc_ipmi_root); 3301EXPORT_SYMBOL(proc_ipmi_root);
3260EXPORT_SYMBOL(ipmi_user_set_run_to_completion); 3302EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
3303EXPORT_SYMBOL(ipmi_free_recv_msg);