aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel/xpc_channel.c
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2006-11-22 09:25:00 -0500
committerTony Luck <tony.luck@intel.com>2006-12-12 14:48:53 -0500
commita460ef8d0a98ac9ef6b829ae292c9b6c13bc0120 (patch)
treeda52a77a9fee80f98a8a82567814f33d2341234b /arch/ia64/sn/kernel/xpc_channel.c
parent1cf24bdbbbd2eb5439796dc399ab1649d150ed1d (diff)
[IA64] fix possible XPC deadlock when disconnecting
This patch eliminates a potential deadlock that is possible when XPC disconnects a channel to a partition that has gone down. This deadlock will occur if at least one of the kthreads created by XPC for the purpose of making callouts to the channel's registerer is detained in the registerer and will not be returning back to XPC until some registerer request occurs on the now downed partition. The potential for a deadlock is removed by ensuring that there always is a kthread available to make the channel disconnecting callout to the registerer. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/sn/kernel/xpc_channel.c')
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 1f3540826e68..c08db9c2375d 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -632,7 +632,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
632 ch->number, ch->partid); 632 ch->number, ch->partid);
633 633
634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 634 spin_unlock_irqrestore(&ch->lock, *irq_flags);
635 xpc_create_kthreads(ch, 1); 635 xpc_create_kthreads(ch, 1, 0);
636 spin_lock_irqsave(&ch->lock, *irq_flags); 636 spin_lock_irqsave(&ch->lock, *irq_flags);
637} 637}
638 638
@@ -754,12 +754,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
754 754
755 /* make sure all activity has settled down first */ 755 /* make sure all activity has settled down first */
756 756
757 if (atomic_read(&ch->references) > 0 || 757 if (atomic_read(&ch->kthreads_assigned) > 0 ||
758 ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 758 atomic_read(&ch->references) > 0) {
759 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) {
760 return; 759 return;
761 } 760 }
762 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 761 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
762 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
763 763
764 if (part->act_state == XPC_P_DEACTIVATING) { 764 if (part->act_state == XPC_P_DEACTIVATING) {
765 /* can't proceed until the other side disengages from us */ 765 /* can't proceed until the other side disengages from us */
@@ -1651,6 +1651,11 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1651 /* wake all idle kthreads so they can exit */ 1651 /* wake all idle kthreads so they can exit */
1652 if (atomic_read(&ch->kthreads_idle) > 0) { 1652 if (atomic_read(&ch->kthreads_idle) > 0) {
1653 wake_up_all(&ch->idle_wq); 1653 wake_up_all(&ch->idle_wq);
1654
1655 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1656 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1657 /* start a kthread that will do the xpcDisconnecting callout */
1658 xpc_create_kthreads(ch, 1, 1);
1654 } 1659 }
1655 1660
1656 /* wake those waiting to allocate an entry from the local msg queue */ 1661 /* wake those waiting to allocate an entry from the local msg queue */