diff options
author | Corey Minyard <cminyard@mvista.com> | 2012-03-28 17:42:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 20:14:36 -0400 |
commit | 7adf579c8babf62026e6aab1dee85e6b104d9936 (patch) | |
tree | 23b3c94b6f97ecee2b7f6eea3f10871d737506c2 /drivers/char | |
parent | 828dc9da50f9632bbc5bc9dfa510619d13135015 (diff) |
ipmi: use a tasklet for handling received messages
The IPMI driver would release a lock, deliver a message, then relock.
This is obviously ugly, and this patch converts the message handler
interface to use a tasklet to schedule work. This lets the receive
handler be called from an interrupt handler with interrupts enabled.
Signed-off-by: Corey Minyard <cminyard@mvista.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 141 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 14 |
2 files changed, 88 insertions, 67 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 58c0e6387cf7..289ab506b79b 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/init.h> | 46 | #include <linux/init.h> |
47 | #include <linux/proc_fs.h> | 47 | #include <linux/proc_fs.h> |
48 | #include <linux/rcupdate.h> | 48 | #include <linux/rcupdate.h> |
49 | #include <linux/interrupt.h> | ||
49 | 50 | ||
50 | #define PFX "IPMI message handler: " | 51 | #define PFX "IPMI message handler: " |
51 | 52 | ||
@@ -53,6 +54,8 @@ | |||
53 | 54 | ||
54 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); | 55 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); |
55 | static int ipmi_init_msghandler(void); | 56 | static int ipmi_init_msghandler(void); |
57 | static void smi_recv_tasklet(unsigned long); | ||
58 | static void handle_new_recv_msgs(ipmi_smi_t intf); | ||
56 | 59 | ||
57 | static int initialized; | 60 | static int initialized; |
58 | 61 | ||
@@ -355,12 +358,15 @@ struct ipmi_smi { | |||
355 | int curr_seq; | 358 | int curr_seq; |
356 | 359 | ||
357 | /* | 360 | /* |
358 | * Messages that were delayed for some reason (out of memory, | 361 | * Messages queued for delivery. If delivery fails (out of memory |
359 | * for instance), will go in here to be processed later in a | 362 | * for instance), They will stay in here to be processed later in a |
360 | * periodic timer interrupt. | 363 | * periodic timer interrupt. The tasklet is for handling received |
364 | * messages directly from the handler. | ||
361 | */ | 365 | */ |
362 | spinlock_t waiting_msgs_lock; | 366 | spinlock_t waiting_msgs_lock; |
363 | struct list_head waiting_msgs; | 367 | struct list_head waiting_msgs; |
368 | atomic_t watchdog_pretimeouts_to_deliver; | ||
369 | struct tasklet_struct recv_tasklet; | ||
364 | 370 | ||
365 | /* | 371 | /* |
366 | * The list of command receivers that are registered for commands | 372 | * The list of command receivers that are registered for commands |
@@ -493,6 +499,8 @@ static void clean_up_interface_data(ipmi_smi_t intf) | |||
493 | struct cmd_rcvr *rcvr, *rcvr2; | 499 | struct cmd_rcvr *rcvr, *rcvr2; |
494 | struct list_head list; | 500 | struct list_head list; |
495 | 501 | ||
502 | tasklet_kill(&intf->recv_tasklet); | ||
503 | |||
496 | free_smi_msg_list(&intf->waiting_msgs); | 504 | free_smi_msg_list(&intf->waiting_msgs); |
497 | free_recv_msg_list(&intf->waiting_events); | 505 | free_recv_msg_list(&intf->waiting_events); |
498 | 506 | ||
@@ -2792,6 +2800,9 @@ void ipmi_poll_interface(ipmi_user_t user) | |||
2792 | 2800 | ||
2793 | if (intf->handlers->poll) | 2801 | if (intf->handlers->poll) |
2794 | intf->handlers->poll(intf->send_info); | 2802 | intf->handlers->poll(intf->send_info); |
2803 | |||
2804 | /* In case something came in */ | ||
2805 | handle_new_recv_msgs(intf); | ||
2795 | } | 2806 | } |
2796 | EXPORT_SYMBOL(ipmi_poll_interface); | 2807 | EXPORT_SYMBOL(ipmi_poll_interface); |
2797 | 2808 | ||
@@ -2860,6 +2871,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2860 | #endif | 2871 | #endif |
2861 | spin_lock_init(&intf->waiting_msgs_lock); | 2872 | spin_lock_init(&intf->waiting_msgs_lock); |
2862 | INIT_LIST_HEAD(&intf->waiting_msgs); | 2873 | INIT_LIST_HEAD(&intf->waiting_msgs); |
2874 | tasklet_init(&intf->recv_tasklet, | ||
2875 | smi_recv_tasklet, | ||
2876 | (unsigned long) intf); | ||
2877 | atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); | ||
2863 | spin_lock_init(&intf->events_lock); | 2878 | spin_lock_init(&intf->events_lock); |
2864 | INIT_LIST_HEAD(&intf->waiting_events); | 2879 | INIT_LIST_HEAD(&intf->waiting_events); |
2865 | intf->waiting_events_count = 0; | 2880 | intf->waiting_events_count = 0; |
@@ -3622,11 +3637,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
3622 | } | 3637 | } |
3623 | 3638 | ||
3624 | /* | 3639 | /* |
3625 | * Handle a new message. Return 1 if the message should be requeued, | 3640 | * Handle a received message. Return 1 if the message should be requeued, |
3626 | * 0 if the message should be freed, or -1 if the message should not | 3641 | * 0 if the message should be freed, or -1 if the message should not |
3627 | * be freed or requeued. | 3642 | * be freed or requeued. |
3628 | */ | 3643 | */ |
3629 | static int handle_new_recv_msg(ipmi_smi_t intf, | 3644 | static int handle_one_recv_msg(ipmi_smi_t intf, |
3630 | struct ipmi_smi_msg *msg) | 3645 | struct ipmi_smi_msg *msg) |
3631 | { | 3646 | { |
3632 | int requeue; | 3647 | int requeue; |
@@ -3784,12 +3799,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3784 | return requeue; | 3799 | return requeue; |
3785 | } | 3800 | } |
3786 | 3801 | ||
3802 | /* | ||
3803 | * If there are messages in the queue or pretimeouts, handle them. | ||
3804 | */ | ||
3805 | static void handle_new_recv_msgs(ipmi_smi_t intf) | ||
3806 | { | ||
3807 | struct ipmi_smi_msg *smi_msg; | ||
3808 | unsigned long flags = 0; | ||
3809 | int rv; | ||
3810 | int run_to_completion = intf->run_to_completion; | ||
3811 | |||
3812 | /* See if any waiting messages need to be processed. */ | ||
3813 | if (!run_to_completion) | ||
3814 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
3815 | while (!list_empty(&intf->waiting_msgs)) { | ||
3816 | smi_msg = list_entry(intf->waiting_msgs.next, | ||
3817 | struct ipmi_smi_msg, link); | ||
3818 | list_del(&smi_msg->link); | ||
3819 | if (!run_to_completion) | ||
3820 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3821 | rv = handle_one_recv_msg(intf, smi_msg); | ||
3822 | if (!run_to_completion) | ||
3823 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
3824 | if (rv == 0) { | ||
3825 | /* Message handled */ | ||
3826 | ipmi_free_smi_msg(smi_msg); | ||
3827 | } else if (rv < 0) { | ||
3828 | /* Fatal error on the message, del but don't free. */ | ||
3829 | } else { | ||
3830 | /* | ||
3831 | * To preserve message order, quit if we | ||
3832 | * can't handle a message. | ||
3833 | */ | ||
3834 | list_add(&smi_msg->link, &intf->waiting_msgs); | ||
3835 | break; | ||
3836 | } | ||
3837 | } | ||
3838 | if (!run_to_completion) | ||
3839 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3840 | |||
3841 | /* | ||
3842 | * If the pretimout count is non-zero, decrement one from it and | ||
3843 | * deliver pretimeouts to all the users. | ||
3844 | */ | ||
3845 | if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { | ||
3846 | ipmi_user_t user; | ||
3847 | |||
3848 | rcu_read_lock(); | ||
3849 | list_for_each_entry_rcu(user, &intf->users, link) { | ||
3850 | if (user->handler->ipmi_watchdog_pretimeout) | ||
3851 | user->handler->ipmi_watchdog_pretimeout( | ||
3852 | user->handler_data); | ||
3853 | } | ||
3854 | rcu_read_unlock(); | ||
3855 | } | ||
3856 | } | ||
3857 | |||
3858 | static void smi_recv_tasklet(unsigned long val) | ||
3859 | { | ||
3860 | handle_new_recv_msgs((ipmi_smi_t) val); | ||
3861 | } | ||
3862 | |||
3787 | /* Handle a new message from the lower layer. */ | 3863 | /* Handle a new message from the lower layer. */ |
3788 | void ipmi_smi_msg_received(ipmi_smi_t intf, | 3864 | void ipmi_smi_msg_received(ipmi_smi_t intf, |
3789 | struct ipmi_smi_msg *msg) | 3865 | struct ipmi_smi_msg *msg) |
3790 | { | 3866 | { |
3791 | unsigned long flags = 0; /* keep us warning-free. */ | 3867 | unsigned long flags = 0; /* keep us warning-free. */ |
3792 | int rv; | ||
3793 | int run_to_completion; | 3868 | int run_to_completion; |
3794 | 3869 | ||
3795 | 3870 | ||
@@ -3843,31 +3918,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, | |||
3843 | run_to_completion = intf->run_to_completion; | 3918 | run_to_completion = intf->run_to_completion; |
3844 | if (!run_to_completion) | 3919 | if (!run_to_completion) |
3845 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | 3920 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); |
3846 | if (!list_empty(&intf->waiting_msgs)) { | 3921 | list_add_tail(&msg->link, &intf->waiting_msgs); |
3847 | list_add_tail(&msg->link, &intf->waiting_msgs); | ||
3848 | if (!run_to_completion) | ||
3849 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3850 | goto out; | ||
3851 | } | ||
3852 | if (!run_to_completion) | 3922 | if (!run_to_completion) |
3853 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3923 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); |
3854 | 3924 | ||
3855 | rv = handle_new_recv_msg(intf, msg); | 3925 | tasklet_schedule(&intf->recv_tasklet); |
3856 | if (rv > 0) { | ||
3857 | /* | ||
3858 | * Could not handle the message now, just add it to a | ||
3859 | * list to handle later. | ||
3860 | */ | ||
3861 | run_to_completion = intf->run_to_completion; | ||
3862 | if (!run_to_completion) | ||
3863 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
3864 | list_add_tail(&msg->link, &intf->waiting_msgs); | ||
3865 | if (!run_to_completion) | ||
3866 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3867 | } else if (rv == 0) { | ||
3868 | ipmi_free_smi_msg(msg); | ||
3869 | } | ||
3870 | |||
3871 | out: | 3926 | out: |
3872 | return; | 3927 | return; |
3873 | } | 3928 | } |
@@ -3875,16 +3930,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received); | |||
3875 | 3930 | ||
3876 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | 3931 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) |
3877 | { | 3932 | { |
3878 | ipmi_user_t user; | 3933 | atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); |
3879 | 3934 | tasklet_schedule(&intf->recv_tasklet); | |
3880 | rcu_read_lock(); | ||
3881 | list_for_each_entry_rcu(user, &intf->users, link) { | ||
3882 | if (!user->handler->ipmi_watchdog_pretimeout) | ||
3883 | continue; | ||
3884 | |||
3885 | user->handler->ipmi_watchdog_pretimeout(user->handler_data); | ||
3886 | } | ||
3887 | rcu_read_unlock(); | ||
3888 | } | 3935 | } |
3889 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); | 3936 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); |
3890 | 3937 | ||
@@ -3998,28 +4045,12 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3998 | ipmi_smi_t intf; | 4045 | ipmi_smi_t intf; |
3999 | struct list_head timeouts; | 4046 | struct list_head timeouts; |
4000 | struct ipmi_recv_msg *msg, *msg2; | 4047 | struct ipmi_recv_msg *msg, *msg2; |
4001 | struct ipmi_smi_msg *smi_msg, *smi_msg2; | ||
4002 | unsigned long flags; | 4048 | unsigned long flags; |
4003 | int i; | 4049 | int i; |
4004 | 4050 | ||
4005 | rcu_read_lock(); | 4051 | rcu_read_lock(); |
4006 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 4052 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
4007 | /* See if any waiting messages need to be processed. */ | 4053 | tasklet_schedule(&intf->recv_tasklet); |
4008 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
4009 | list_for_each_entry_safe(smi_msg, smi_msg2, | ||
4010 | &intf->waiting_msgs, link) { | ||
4011 | if (!handle_new_recv_msg(intf, smi_msg)) { | ||
4012 | list_del(&smi_msg->link); | ||
4013 | ipmi_free_smi_msg(smi_msg); | ||
4014 | } else { | ||
4015 | /* | ||
4016 | * To preserve message order, quit if we | ||
4017 | * can't handle a message. | ||
4018 | */ | ||
4019 | break; | ||
4020 | } | ||
4021 | } | ||
4022 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
4023 | 4054 | ||
4024 | /* | 4055 | /* |
4025 | * Go through the seq table and find any messages that | 4056 | * Go through the seq table and find any messages that |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 73ebbb1a3269..01e53cd105dd 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -320,16 +320,8 @@ static int register_xaction_notifier(struct notifier_block *nb) | |||
320 | static void deliver_recv_msg(struct smi_info *smi_info, | 320 | static void deliver_recv_msg(struct smi_info *smi_info, |
321 | struct ipmi_smi_msg *msg) | 321 | struct ipmi_smi_msg *msg) |
322 | { | 322 | { |
323 | /* Deliver the message to the upper layer with the lock | 323 | /* Deliver the message to the upper layer. */ |
324 | released. */ | 324 | ipmi_smi_msg_received(smi_info->intf, msg); |
325 | |||
326 | if (smi_info->run_to_completion) { | ||
327 | ipmi_smi_msg_received(smi_info->intf, msg); | ||
328 | } else { | ||
329 | spin_unlock(&(smi_info->si_lock)); | ||
330 | ipmi_smi_msg_received(smi_info->intf, msg); | ||
331 | spin_lock(&(smi_info->si_lock)); | ||
332 | } | ||
333 | } | 325 | } |
334 | 326 | ||
335 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) | 327 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) |
@@ -481,9 +473,7 @@ static void handle_flags(struct smi_info *smi_info) | |||
481 | 473 | ||
482 | start_clear_flags(smi_info); | 474 | start_clear_flags(smi_info); |
483 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 475 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
484 | spin_unlock(&(smi_info->si_lock)); | ||
485 | ipmi_smi_watchdog_pretimeout(smi_info->intf); | 476 | ipmi_smi_watchdog_pretimeout(smi_info->intf); |
486 | spin_lock(&(smi_info->si_lock)); | ||
487 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { | 477 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { |
488 | /* Messages available. */ | 478 | /* Messages available. */ |
489 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 479 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |