aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCorey Minyard <cminyard@mvista.com>2014-11-06 17:58:48 -0500
committerCorey Minyard <cminyard@mvista.com>2014-12-11 16:04:09 -0500
commit7ea0ed2b5be81781ba976bc03414ef5da76270b9 (patch)
treeef44f9f25c16980eb9461af56c7ad473cd82b408
parent7f4a1c84c3506f492b208fdaae21879f55a51229 (diff)
ipmi: Make the message handler easier to use for SMI interfaces
The message handler expected the SMI interface to keep a queue of messages, but that was kind of silly, the queue would be easier to manage in the message handler itself. As part of that, fix the message cleanup to make sure no messages are outstanding when an SMI interface is unregistered. This makes it easier for an SMI interface to unregister, it just has to call ipmi_unregister_smi() first and all processing from the message handler will be cleaned up. Signed-off-by: Corey Minyard <cminyard@mvista.com>
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c202
1 files changed, 162 insertions, 40 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 1a8e7b2131e9..b705218fbbfa 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -56,6 +56,8 @@ static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long); 56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf); 57static void handle_new_recv_msgs(ipmi_smi_t intf);
58static void need_waiter(ipmi_smi_t intf); 58static void need_waiter(ipmi_smi_t intf);
59static int handle_one_recv_msg(ipmi_smi_t intf,
60 struct ipmi_smi_msg *msg);
59 61
60static int initialized; 62static int initialized;
61 63
@@ -324,6 +326,9 @@ struct ipmi_smi {
324 326
325 struct kref refcount; 327 struct kref refcount;
326 328
329 /* Set when the interface is being unregistered. */
330 bool in_shutdown;
331
327 /* Used for a list of interfaces. */ 332 /* Used for a list of interfaces. */
328 struct list_head link; 333 struct list_head link;
329 334
@@ -382,6 +387,11 @@ struct ipmi_smi {
382 atomic_t watchdog_pretimeouts_to_deliver; 387 atomic_t watchdog_pretimeouts_to_deliver;
383 struct tasklet_struct recv_tasklet; 388 struct tasklet_struct recv_tasklet;
384 389
390 spinlock_t xmit_msgs_lock;
391 struct list_head xmit_msgs;
392 struct ipmi_smi_msg *curr_msg;
393 struct list_head hp_xmit_msgs;
394
385 /* 395 /*
386 * The list of command receivers that are registered for commands 396 * The list of command receivers that are registered for commands
387 * on this interface. 397 * on this interface.
@@ -1488,7 +1498,25 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1488static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, 1498static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
1489 struct ipmi_smi_msg *smi_msg, int priority) 1499 struct ipmi_smi_msg *smi_msg, int priority)
1490{ 1500{
1491 handlers->sender(intf->send_info, smi_msg, 0); 1501 int run_to_completion = intf->run_to_completion;
1502 unsigned long flags;
1503
1504 if (!run_to_completion)
1505 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1506 if (intf->curr_msg) {
1507 if (priority > 0)
1508 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1509 else
1510 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1511 smi_msg = NULL;
1512 } else {
1513 intf->curr_msg = smi_msg;
1514 }
1515 if (!run_to_completion)
1516 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1517
1518 if (smi_msg)
1519 handlers->sender(intf->send_info, smi_msg, 0);
1492} 1520}
1493 1521
1494/* 1522/*
@@ -1515,7 +1543,6 @@ static int i_ipmi_request(ipmi_user_t user,
1515 struct ipmi_smi_msg *smi_msg; 1543 struct ipmi_smi_msg *smi_msg;
1516 struct ipmi_recv_msg *recv_msg; 1544 struct ipmi_recv_msg *recv_msg;
1517 unsigned long flags; 1545 unsigned long flags;
1518 struct ipmi_smi_handlers *handlers;
1519 1546
1520 1547
1521 if (supplied_recv) 1548 if (supplied_recv)
@@ -1538,8 +1565,7 @@ static int i_ipmi_request(ipmi_user_t user,
1538 } 1565 }
1539 1566
1540 rcu_read_lock(); 1567 rcu_read_lock();
1541 handlers = intf->handlers; 1568 if (intf->in_shutdown) {
1542 if (!handlers) {
1543 rv = -ENODEV; 1569 rv = -ENODEV;
1544 goto out_err; 1570 goto out_err;
1545 } 1571 }
@@ -1874,7 +1900,7 @@ static int i_ipmi_request(ipmi_user_t user,
1874 } 1900 }
1875#endif 1901#endif
1876 1902
1877 smi_send(intf, handlers, smi_msg, priority); 1903 smi_send(intf, intf->handlers, smi_msg, priority);
1878 rcu_read_unlock(); 1904 rcu_read_unlock();
1879 1905
1880 return 0; 1906 return 0;
@@ -2810,6 +2836,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2810 smi_recv_tasklet, 2836 smi_recv_tasklet,
2811 (unsigned long) intf); 2837 (unsigned long) intf);
2812 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 2838 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2839 spin_lock_init(&intf->xmit_msgs_lock);
2840 INIT_LIST_HEAD(&intf->xmit_msgs);
2841 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
2813 spin_lock_init(&intf->events_lock); 2842 spin_lock_init(&intf->events_lock);
2814 atomic_set(&intf->event_waiters, 0); 2843 atomic_set(&intf->event_waiters, 0);
2815 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 2844 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -2909,12 +2938,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2909} 2938}
2910EXPORT_SYMBOL(ipmi_register_smi); 2939EXPORT_SYMBOL(ipmi_register_smi);
2911 2940
2941static void deliver_smi_err_response(ipmi_smi_t intf,
2942 struct ipmi_smi_msg *msg,
2943 unsigned char err)
2944{
2945 msg->rsp[0] = msg->data[0] | 4;
2946 msg->rsp[1] = msg->data[1];
2947 msg->rsp[2] = err;
2948 msg->rsp_size = 3;
2949 /* It's an error, so it will never requeue, no need to check return. */
2950 handle_one_recv_msg(intf, msg);
2951}
2952
2912static void cleanup_smi_msgs(ipmi_smi_t intf) 2953static void cleanup_smi_msgs(ipmi_smi_t intf)
2913{ 2954{
2914 int i; 2955 int i;
2915 struct seq_table *ent; 2956 struct seq_table *ent;
2957 struct ipmi_smi_msg *msg;
2958 struct list_head *entry;
2959 struct list_head tmplist;
2960
2961 /* Clear out our transmit queues and hold the messages. */
2962 INIT_LIST_HEAD(&tmplist);
2963 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
2964 list_splice_tail(&intf->xmit_msgs, &tmplist);
2965
2966 /* Current message first, to preserve order */
2967 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
2968 /* Wait for the message to clear out. */
2969 schedule_timeout(1);
2970 }
2916 2971
2917 /* No need for locks, the interface is down. */ 2972 /* No need for locks, the interface is down. */
2973
2974 /*
2975 * Return errors for all pending messages in queue and in the
2976 * tables waiting for remote responses.
2977 */
2978 while (!list_empty(&tmplist)) {
2979 entry = tmplist.next;
2980 list_del(entry);
2981 msg = list_entry(entry, struct ipmi_smi_msg, link);
2982 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
2983 }
2984
2918 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 2985 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2919 ent = &(intf->seq_table[i]); 2986 ent = &(intf->seq_table[i]);
2920 if (!ent->inuse) 2987 if (!ent->inuse)
@@ -2926,20 +2993,33 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
2926int ipmi_unregister_smi(ipmi_smi_t intf) 2993int ipmi_unregister_smi(ipmi_smi_t intf)
2927{ 2994{
2928 struct ipmi_smi_watcher *w; 2995 struct ipmi_smi_watcher *w;
2929 int intf_num = intf->intf_num; 2996 int intf_num = intf->intf_num;
2997 ipmi_user_t user;
2930 2998
2931 ipmi_bmc_unregister(intf); 2999 ipmi_bmc_unregister(intf);
2932 3000
2933 mutex_lock(&smi_watchers_mutex); 3001 mutex_lock(&smi_watchers_mutex);
2934 mutex_lock(&ipmi_interfaces_mutex); 3002 mutex_lock(&ipmi_interfaces_mutex);
2935 intf->intf_num = -1; 3003 intf->intf_num = -1;
2936 intf->handlers = NULL; 3004 intf->in_shutdown = true;
2937 list_del_rcu(&intf->link); 3005 list_del_rcu(&intf->link);
2938 mutex_unlock(&ipmi_interfaces_mutex); 3006 mutex_unlock(&ipmi_interfaces_mutex);
2939 synchronize_rcu(); 3007 synchronize_rcu();
2940 3008
2941 cleanup_smi_msgs(intf); 3009 cleanup_smi_msgs(intf);
2942 3010
3011 /* Clean up the effects of users on the lower-level software. */
3012 mutex_lock(&ipmi_interfaces_mutex);
3013 rcu_read_lock();
3014 list_for_each_entry_rcu(user, &intf->users, link) {
3015 module_put(intf->handlers->owner);
3016 if (intf->handlers->dec_usecount)
3017 intf->handlers->dec_usecount(intf->send_info);
3018 }
3019 rcu_read_unlock();
3020 intf->handlers = NULL;
3021 mutex_unlock(&ipmi_interfaces_mutex);
3022
2943 remove_proc_entries(intf); 3023 remove_proc_entries(intf);
2944 3024
2945 /* 3025 /*
@@ -3029,7 +3109,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3029 ipmi_user_t user = NULL; 3109 ipmi_user_t user = NULL;
3030 struct ipmi_ipmb_addr *ipmb_addr; 3110 struct ipmi_ipmb_addr *ipmb_addr;
3031 struct ipmi_recv_msg *recv_msg; 3111 struct ipmi_recv_msg *recv_msg;
3032 struct ipmi_smi_handlers *handlers;
3033 3112
3034 if (msg->rsp_size < 10) { 3113 if (msg->rsp_size < 10) {
3035 /* Message not big enough, just ignore it. */ 3114 /* Message not big enough, just ignore it. */
@@ -3083,9 +3162,8 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3083 } 3162 }
3084#endif 3163#endif
3085 rcu_read_lock(); 3164 rcu_read_lock();
3086 handlers = intf->handlers; 3165 if (!intf->in_shutdown) {
3087 if (handlers) { 3166 smi_send(intf, intf->handlers, msg, 0);
3088 smi_send(intf, handlers, msg, 0);
3089 /* 3167 /*
3090 * We used the message, so return the value 3168 * We used the message, so return the value
3091 * that causes it to not be freed or 3169 * that causes it to not be freed or
@@ -3756,25 +3834,24 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3756 while (!list_empty(&intf->waiting_rcv_msgs)) { 3834 while (!list_empty(&intf->waiting_rcv_msgs)) {
3757 smi_msg = list_entry(intf->waiting_rcv_msgs.next, 3835 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
3758 struct ipmi_smi_msg, link); 3836 struct ipmi_smi_msg, link);
3759 list_del(&smi_msg->link);
3760 if (!run_to_completion) 3837 if (!run_to_completion)
3761 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, 3838 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
3762 flags); 3839 flags);
3763 rv = handle_one_recv_msg(intf, smi_msg); 3840 rv = handle_one_recv_msg(intf, smi_msg);
3764 if (!run_to_completion) 3841 if (!run_to_completion)
3765 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 3842 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3766 if (rv == 0) { 3843 if (rv > 0) {
3767 /* Message handled */
3768 ipmi_free_smi_msg(smi_msg);
3769 } else if (rv < 0) {
3770 /* Fatal error on the message, del but don't free. */
3771 } else {
3772 /* 3844 /*
3773 * To preserve message order, quit if we 3845 * To preserve message order, quit if we
3774 * can't handle a message. 3846 * can't handle a message.
3775 */ 3847 */
3776 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
3777 break; 3848 break;
3849 } else {
3850 list_del(&smi_msg->link);
3851 if (rv == 0)
3852 /* Message handled */
3853 ipmi_free_smi_msg(smi_msg);
3854 /* If rv < 0, fatal error, del but don't free. */
3778 } 3855 }
3779 } 3856 }
3780 if (!run_to_completion) 3857 if (!run_to_completion)
@@ -3799,7 +3876,41 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3799 3876
3800static void smi_recv_tasklet(unsigned long val) 3877static void smi_recv_tasklet(unsigned long val)
3801{ 3878{
3802 handle_new_recv_msgs((ipmi_smi_t) val); 3879 unsigned long flags = 0; /* keep us warning-free. */
3880 ipmi_smi_t intf = (ipmi_smi_t) val;
3881 int run_to_completion = intf->run_to_completion;
3882 struct ipmi_smi_msg *newmsg = NULL;
3883
3884 /*
3885 * Start the next message if available.
3886 *
3887 * Do this here, not in the actual receiver, because we may deadlock
3888 * because the lower layer is allowed to hold locks while calling
3889 * message delivery.
3890 */
3891 if (!run_to_completion)
3892 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
3893 if (intf->curr_msg == NULL && !intf->in_shutdown) {
3894 struct list_head *entry = NULL;
3895
3896 /* Pick the high priority queue first. */
3897 if (!list_empty(&intf->hp_xmit_msgs))
3898 entry = intf->hp_xmit_msgs.next;
3899 else if (!list_empty(&intf->xmit_msgs))
3900 entry = intf->xmit_msgs.next;
3901
3902 if (entry) {
3903 list_del(entry);
3904 newmsg = list_entry(entry, struct ipmi_smi_msg, link);
3905 intf->curr_msg = newmsg;
3906 }
3907 }
3908 if (!run_to_completion)
3909 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
3910 if (newmsg)
3911 intf->handlers->sender(intf->send_info, newmsg, 0);
3912
3913 handle_new_recv_msgs(intf);
3803} 3914}
3804 3915
3805/* Handle a new message from the lower layer. */ 3916/* Handle a new message from the lower layer. */
@@ -3807,13 +3918,16 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3807 struct ipmi_smi_msg *msg) 3918 struct ipmi_smi_msg *msg)
3808{ 3919{
3809 unsigned long flags = 0; /* keep us warning-free. */ 3920 unsigned long flags = 0; /* keep us warning-free. */
3810 int run_to_completion; 3921 int run_to_completion = intf->run_to_completion;
3811
3812 3922
3813 if ((msg->data_size >= 2) 3923 if ((msg->data_size >= 2)
3814 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 3924 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3815 && (msg->data[1] == IPMI_SEND_MSG_CMD) 3925 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3816 && (msg->user_data == NULL)) { 3926 && (msg->user_data == NULL)) {
3927
3928 if (intf->in_shutdown)
3929 goto free_msg;
3930
3817 /* 3931 /*
3818 * This is the local response to a command send, start 3932 * This is the local response to a command send, start
3819 * the timer for these. The user_data will not be 3933 * the timer for these. The user_data will not be
@@ -3849,29 +3963,40 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3849 /* The message was sent, start the timer. */ 3963 /* The message was sent, start the timer. */
3850 intf_start_seq_timer(intf, msg->msgid); 3964 intf_start_seq_timer(intf, msg->msgid);
3851 3965
3966free_msg:
3852 ipmi_free_smi_msg(msg); 3967 ipmi_free_smi_msg(msg);
3853 goto out; 3968 } else {
3969 /*
3970 * To preserve message order, we keep a queue and deliver from
3971 * a tasklet.
3972 */
3973 if (!run_to_completion)
3974 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3975 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
3976 if (!run_to_completion)
3977 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
3978 flags);
3854 } 3979 }
3855 3980
3856 /*
3857 * To preserve message order, if the list is not empty, we
3858 * tack this message onto the end of the list.
3859 */
3860 run_to_completion = intf->run_to_completion;
3861 if (!run_to_completion) 3981 if (!run_to_completion)
3862 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); 3982 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
3863 list_add_tail(&msg->link, &intf->waiting_rcv_msgs); 3983 if (msg == intf->curr_msg)
3984 intf->curr_msg = NULL;
3864 if (!run_to_completion) 3985 if (!run_to_completion)
3865 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags); 3986 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
3866 3987
3867 tasklet_schedule(&intf->recv_tasklet); 3988 if (run_to_completion)
3868 out: 3989 smi_recv_tasklet((unsigned long) intf);
3869 return; 3990 else
3991 tasklet_schedule(&intf->recv_tasklet);
3870} 3992}
3871EXPORT_SYMBOL(ipmi_smi_msg_received); 3993EXPORT_SYMBOL(ipmi_smi_msg_received);
3872 3994
3873void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3995void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3874{ 3996{
3997 if (intf->in_shutdown)
3998 return;
3999
3875 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4000 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
3876 tasklet_schedule(&intf->recv_tasklet); 4001 tasklet_schedule(&intf->recv_tasklet);
3877} 4002}
@@ -3913,7 +4038,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3913 struct ipmi_recv_msg *msg; 4038 struct ipmi_recv_msg *msg;
3914 struct ipmi_smi_handlers *handlers; 4039 struct ipmi_smi_handlers *handlers;
3915 4040
3916 if (intf->intf_num == -1) 4041 if (intf->in_shutdown)
3917 return; 4042 return;
3918 4043
3919 if (!ent->inuse) 4044 if (!ent->inuse)
@@ -4040,15 +4165,12 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
4040 4165
4041static void ipmi_request_event(ipmi_smi_t intf) 4166static void ipmi_request_event(ipmi_smi_t intf)
4042{ 4167{
4043 struct ipmi_smi_handlers *handlers;
4044
4045 /* No event requests when in maintenance mode. */ 4168 /* No event requests when in maintenance mode. */
4046 if (intf->maintenance_mode_enable) 4169 if (intf->maintenance_mode_enable)
4047 return; 4170 return;
4048 4171
4049 handlers = intf->handlers; 4172 if (!intf->in_shutdown)
4050 if (handlers) 4173 intf->handlers->request_events(intf->send_info);
4051 handlers->request_events(intf->send_info);
4052} 4174}
4053 4175
4054static struct timer_list ipmi_timer; 4176static struct timer_list ipmi_timer;