aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2006-11-22 09:25:00 -0500
committerTony Luck <tony.luck@intel.com>2006-12-12 14:48:53 -0500
commita460ef8d0a98ac9ef6b829ae292c9b6c13bc0120 (patch)
treeda52a77a9fee80f98a8a82567814f33d2341234b /arch
parent1cf24bdbbbd2eb5439796dc399ab1649d150ed1d (diff)
[IA64] fix possible XPC deadlock when disconnecting
This patch eliminates a potential deadlock that is possible when XPC disconnects a channel to a partition that has gone down. This deadlock will occur if at least one of the kthreads created by XPC for the purpose of making callouts to the channel's registerer is detained in the registerer and will not be returning back to XPC until some registerer request occurs on the now downed partition. The potential for a deadlock is removed by ensuring that there always is a kthread available to make the channel disconnecting callout to the registerer. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c15
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c64
2 files changed, 53 insertions, 26 deletions
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 1f3540826e68..c08db9c2375d 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -632,7 +632,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
632 ch->number, ch->partid); 632 ch->number, ch->partid);
633 633
634 spin_unlock_irqrestore(&ch->lock, *irq_flags); 634 spin_unlock_irqrestore(&ch->lock, *irq_flags);
635 xpc_create_kthreads(ch, 1); 635 xpc_create_kthreads(ch, 1, 0);
636 spin_lock_irqsave(&ch->lock, *irq_flags); 636 spin_lock_irqsave(&ch->lock, *irq_flags);
637} 637}
638 638
@@ -754,12 +754,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
754 754
755 /* make sure all activity has settled down first */ 755 /* make sure all activity has settled down first */
756 756
757 if (atomic_read(&ch->references) > 0 || 757 if (atomic_read(&ch->kthreads_assigned) > 0 ||
758 ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && 758 atomic_read(&ch->references) > 0) {
759 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE))) {
760 return; 759 return;
761 } 760 }
762 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); 761 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
762 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
763 763
764 if (part->act_state == XPC_P_DEACTIVATING) { 764 if (part->act_state == XPC_P_DEACTIVATING) {
765 /* can't proceed until the other side disengages from us */ 765 /* can't proceed until the other side disengages from us */
@@ -1651,6 +1651,11 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1651 /* wake all idle kthreads so they can exit */ 1651 /* wake all idle kthreads so they can exit */
1652 if (atomic_read(&ch->kthreads_idle) > 0) { 1652 if (atomic_read(&ch->kthreads_idle) > 0) {
1653 wake_up_all(&ch->idle_wq); 1653 wake_up_all(&ch->idle_wq);
1654
1655 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
1656 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
1657 /* start a kthread that will do the xpcDisconnecting callout */
1658 xpc_create_kthreads(ch, 1, 1);
1654 } 1659 }
1655 1660
1656 /* wake those waiting to allocate an entry from the local msg queue */ 1661 /* wake those waiting to allocate an entry from the local msg queue */
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index fa96dfc0e1aa..7a387d237363 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -681,7 +681,7 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
681 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n", 681 dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
682 needed, ch->partid, ch->number); 682 needed, ch->partid, ch->number);
683 683
684 xpc_create_kthreads(ch, needed); 684 xpc_create_kthreads(ch, needed, 0);
685} 685}
686 686
687 687
@@ -775,26 +775,28 @@ xpc_daemonize_kthread(void *args)
775 xpc_kthread_waitmsgs(part, ch); 775 xpc_kthread_waitmsgs(part, ch);
776 } 776 }
777 777
778 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 778 /* let registerer know that connection is disconnecting */
779 spin_lock_irqsave(&ch->lock, irq_flags);
780 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
781 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
782 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
783 spin_unlock_irqrestore(&ch->lock, irq_flags);
784 779
785 xpc_disconnect_callout(ch, xpcDisconnecting); 780 spin_lock_irqsave(&ch->lock, irq_flags);
786 781 if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
787 spin_lock_irqsave(&ch->lock, irq_flags); 782 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
788 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; 783 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
789 }
790 spin_unlock_irqrestore(&ch->lock, irq_flags); 784 spin_unlock_irqrestore(&ch->lock, irq_flags);
785
786 xpc_disconnect_callout(ch, xpcDisconnecting);
787
788 spin_lock_irqsave(&ch->lock, irq_flags);
789 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
790 }
791 spin_unlock_irqrestore(&ch->lock, irq_flags);
792
793 if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
791 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 794 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
792 xpc_mark_partition_disengaged(part); 795 xpc_mark_partition_disengaged(part);
793 xpc_IPI_send_disengage(part); 796 xpc_IPI_send_disengage(part);
794 } 797 }
795 } 798 }
796 799
797
798 xpc_msgqueue_deref(ch); 800 xpc_msgqueue_deref(ch);
799 801
800 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", 802 dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
@@ -818,7 +820,8 @@ xpc_daemonize_kthread(void *args)
818 * partition. 820 * partition.
819 */ 821 */
820void 822void
821xpc_create_kthreads(struct xpc_channel *ch, int needed) 823xpc_create_kthreads(struct xpc_channel *ch, int needed,
824 int ignore_disconnecting)
822{ 825{
823 unsigned long irq_flags; 826 unsigned long irq_flags;
824 pid_t pid; 827 pid_t pid;
@@ -833,16 +836,38 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
833 * kthread. That kthread is responsible for doing the 836 * kthread. That kthread is responsible for doing the
834 * counterpart to the following before it exits. 837 * counterpart to the following before it exits.
835 */ 838 */
839 if (ignore_disconnecting) {
840 if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
841 /* kthreads assigned had gone to zero */
842 BUG_ON(!(ch->flags &
843 XPC_C_DISCONNECTINGCALLOUT_MADE));
844 break;
845 }
846
847 } else if (ch->flags & XPC_C_DISCONNECTING) {
848 break;
849
850 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
851 if (atomic_inc_return(&part->nchannels_engaged) == 1)
852 xpc_mark_partition_engaged(part);
853 }
836 (void) xpc_part_ref(part); 854 (void) xpc_part_ref(part);
837 xpc_msgqueue_ref(ch); 855 xpc_msgqueue_ref(ch);
838 if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
839 atomic_inc_return(&part->nchannels_engaged) == 1) {
840 xpc_mark_partition_engaged(part);
841 }
842 856
843 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 857 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
844 if (pid < 0) { 858 if (pid < 0) {
845 /* the fork failed */ 859 /* the fork failed */
860
861 /*
862 * NOTE: if (ignore_disconnecting &&
863 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
864 * then we'll deadlock if all other kthreads assigned
865 * to this channel are blocked in the channel's
866 * registerer, because the only thing that will unblock
867 * them is the xpcDisconnecting callout that this
868 * failed kernel_thread would have made.
869 */
870
846 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 871 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
847 atomic_dec_return(&part->nchannels_engaged) == 0) { 872 atomic_dec_return(&part->nchannels_engaged) == 0) {
848 xpc_mark_partition_disengaged(part); 873 xpc_mark_partition_disengaged(part);
@@ -857,9 +882,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
857 * Flag this as an error only if we have an 882 * Flag this as an error only if we have an
858 * insufficient #of kthreads for the channel 883 * insufficient #of kthreads for the channel
859 * to function. 884 * to function.
860 *
861 * No xpc_msgqueue_ref() is needed here since
862 * the channel mgr is doing this.
863 */ 885 */
864 spin_lock_irqsave(&ch->lock, irq_flags); 886 spin_lock_irqsave(&ch->lock, irq_flags);
865 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, 887 XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,