diff options
Diffstat (limited to 'arch/ia64/sn/kernel/xpc_channel.c')
-rw-r--r-- | arch/ia64/sn/kernel/xpc_channel.c | 216 |
1 files changed, 115 insertions, 101 deletions
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c index 94698bea7be0..195ac1b8e262 100644 --- a/arch/ia64/sn/kernel/xpc_channel.c +++ b/arch/ia64/sn/kernel/xpc_channel.c | |||
@@ -57,6 +57,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) | |||
57 | 57 | ||
58 | spin_lock_init(&ch->lock); | 58 | spin_lock_init(&ch->lock); |
59 | sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ | 59 | sema_init(&ch->msg_to_pull_sema, 1); /* mutex */ |
60 | sema_init(&ch->wdisconnect_sema, 0); /* event wait */ | ||
60 | 61 | ||
61 | atomic_set(&ch->n_on_msg_allocate_wq, 0); | 62 | atomic_set(&ch->n_on_msg_allocate_wq, 0); |
62 | init_waitqueue_head(&ch->msg_allocate_wq); | 63 | init_waitqueue_head(&ch->msg_allocate_wq); |
@@ -166,6 +167,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
166 | xpc_initialize_channels(part, partid); | 167 | xpc_initialize_channels(part, partid); |
167 | 168 | ||
168 | atomic_set(&part->nchannels_active, 0); | 169 | atomic_set(&part->nchannels_active, 0); |
170 | atomic_set(&part->nchannels_engaged, 0); | ||
169 | 171 | ||
170 | 172 | ||
171 | /* local_IPI_amo were set to 0 by an earlier memset() */ | 173 | /* local_IPI_amo were set to 0 by an earlier memset() */ |
@@ -555,8 +557,6 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) | |||
555 | sema_init(&ch->notify_queue[i].sema, 0); | 557 | sema_init(&ch->notify_queue[i].sema, 0); |
556 | } | 558 | } |
557 | 559 | ||
558 | sema_init(&ch->teardown_sema, 0); /* event wait */ | ||
559 | |||
560 | spin_lock_irqsave(&ch->lock, irq_flags); | 560 | spin_lock_irqsave(&ch->lock, irq_flags); |
561 | ch->flags |= XPC_C_SETUP; | 561 | ch->flags |= XPC_C_SETUP; |
562 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 562 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
@@ -626,6 +626,55 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
626 | 626 | ||
627 | 627 | ||
628 | /* | 628 | /* |
629 | * Notify those who wanted to be notified upon delivery of their message. | ||
630 | */ | ||
631 | static void | ||
632 | xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) | ||
633 | { | ||
634 | struct xpc_notify *notify; | ||
635 | u8 notify_type; | ||
636 | s64 get = ch->w_remote_GP.get - 1; | ||
637 | |||
638 | |||
639 | while (++get < put && atomic_read(&ch->n_to_notify) > 0) { | ||
640 | |||
641 | notify = &ch->notify_queue[get % ch->local_nentries]; | ||
642 | |||
643 | /* | ||
644 | * See if the notify entry indicates it was associated with | ||
645 | * a message who's sender wants to be notified. It is possible | ||
646 | * that it is, but someone else is doing or has done the | ||
647 | * notification. | ||
648 | */ | ||
649 | notify_type = notify->type; | ||
650 | if (notify_type == 0 || | ||
651 | cmpxchg(¬ify->type, notify_type, 0) != | ||
652 | notify_type) { | ||
653 | continue; | ||
654 | } | ||
655 | |||
656 | DBUG_ON(notify_type != XPC_N_CALL); | ||
657 | |||
658 | atomic_dec(&ch->n_to_notify); | ||
659 | |||
660 | if (notify->func != NULL) { | ||
661 | dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " | ||
662 | "msg_number=%ld, partid=%d, channel=%d\n", | ||
663 | (void *) notify, get, ch->partid, ch->number); | ||
664 | |||
665 | notify->func(reason, ch->partid, ch->number, | ||
666 | notify->key); | ||
667 | |||
668 | dev_dbg(xpc_chan, "notify->func() returned, " | ||
669 | "notify=0x%p, msg_number=%ld, partid=%d, " | ||
670 | "channel=%d\n", (void *) notify, get, | ||
671 | ch->partid, ch->number); | ||
672 | } | ||
673 | } | ||
674 | } | ||
675 | |||
676 | |||
677 | /* | ||
629 | * Free up message queues and other stuff that were allocated for the specified | 678 | * Free up message queues and other stuff that were allocated for the specified |
630 | * channel. | 679 | * channel. |
631 | * | 680 | * |
@@ -669,9 +718,6 @@ xpc_free_msgqueues(struct xpc_channel *ch) | |||
669 | ch->remote_msgqueue = NULL; | 718 | ch->remote_msgqueue = NULL; |
670 | kfree(ch->notify_queue); | 719 | kfree(ch->notify_queue); |
671 | ch->notify_queue = NULL; | 720 | ch->notify_queue = NULL; |
672 | |||
673 | /* in case someone is waiting for the teardown to complete */ | ||
674 | up(&ch->teardown_sema); | ||
675 | } | 721 | } |
676 | } | 722 | } |
677 | 723 | ||
@@ -683,7 +729,7 @@ static void | |||
683 | xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | 729 | xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) |
684 | { | 730 | { |
685 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 731 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
686 | u32 ch_flags = ch->flags; | 732 | u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); |
687 | 733 | ||
688 | 734 | ||
689 | DBUG_ON(!spin_is_locked(&ch->lock)); | 735 | DBUG_ON(!spin_is_locked(&ch->lock)); |
@@ -701,12 +747,13 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
701 | } | 747 | } |
702 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); | 748 | DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); |
703 | 749 | ||
704 | /* it's now safe to free the channel's message queues */ | 750 | if (part->act_state == XPC_P_DEACTIVATING) { |
705 | 751 | /* can't proceed until the other side disengages from us */ | |
706 | xpc_free_msgqueues(ch); | 752 | if (xpc_partition_engaged(1UL << ch->partid)) { |
707 | DBUG_ON(ch->flags & XPC_C_SETUP); | 753 | return; |
754 | } | ||
708 | 755 | ||
709 | if (part->act_state != XPC_P_DEACTIVATING) { | 756 | } else { |
710 | 757 | ||
711 | /* as long as the other side is up do the full protocol */ | 758 | /* as long as the other side is up do the full protocol */ |
712 | 759 | ||
@@ -724,16 +771,33 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
724 | } | 771 | } |
725 | } | 772 | } |
726 | 773 | ||
774 | /* wake those waiting for notify completion */ | ||
775 | if (atomic_read(&ch->n_to_notify) > 0) { | ||
776 | /* >>> we do callout while holding ch->lock */ | ||
777 | xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put); | ||
778 | } | ||
779 | |||
727 | /* both sides are disconnected now */ | 780 | /* both sides are disconnected now */ |
728 | 781 | ||
729 | ch->flags = XPC_C_DISCONNECTED; /* clear all flags, but this one */ | 782 | /* it's now safe to free the channel's message queues */ |
783 | xpc_free_msgqueues(ch); | ||
784 | |||
785 | /* mark disconnected, clear all other flags except XPC_C_WDISCONNECT */ | ||
786 | ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); | ||
730 | 787 | ||
731 | atomic_dec(&part->nchannels_active); | 788 | atomic_dec(&part->nchannels_active); |
732 | 789 | ||
733 | if (ch_flags & XPC_C_WASCONNECTED) { | 790 | if (channel_was_connected) { |
734 | dev_info(xpc_chan, "channel %d to partition %d disconnected, " | 791 | dev_info(xpc_chan, "channel %d to partition %d disconnected, " |
735 | "reason=%d\n", ch->number, ch->partid, ch->reason); | 792 | "reason=%d\n", ch->number, ch->partid, ch->reason); |
736 | } | 793 | } |
794 | |||
795 | /* wake the thread that is waiting for this channel to disconnect */ | ||
796 | if (ch->flags & XPC_C_WDISCONNECT) { | ||
797 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
798 | up(&ch->wdisconnect_sema); | ||
799 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
800 | } | ||
737 | } | 801 | } |
738 | 802 | ||
739 | 803 | ||
@@ -764,7 +828,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
764 | /* | 828 | /* |
765 | * If RCLOSEREQUEST is set, we're probably waiting for | 829 | * If RCLOSEREQUEST is set, we're probably waiting for |
766 | * RCLOSEREPLY. We should find it and a ROPENREQUEST packed | 830 | * RCLOSEREPLY. We should find it and a ROPENREQUEST packed |
767 | * with this RCLOSEQREUQEST in the IPI_flags. | 831 | * with this RCLOSEREQUEST in the IPI_flags. |
768 | */ | 832 | */ |
769 | 833 | ||
770 | if (ch->flags & XPC_C_RCLOSEREQUEST) { | 834 | if (ch->flags & XPC_C_RCLOSEREQUEST) { |
@@ -852,7 +916,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
852 | "channel=%d\n", args->msg_size, args->local_nentries, | 916 | "channel=%d\n", args->msg_size, args->local_nentries, |
853 | ch->partid, ch->number); | 917 | ch->partid, ch->number); |
854 | 918 | ||
855 | if ((ch->flags & XPC_C_DISCONNECTING) || | 919 | if ((ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) || |
856 | part->act_state == XPC_P_DEACTIVATING) { | 920 | part->act_state == XPC_P_DEACTIVATING) { |
857 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 921 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
858 | return; | 922 | return; |
@@ -1040,55 +1104,6 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1040 | 1104 | ||
1041 | 1105 | ||
1042 | /* | 1106 | /* |
1043 | * Notify those who wanted to be notified upon delivery of their message. | ||
1044 | */ | ||
1045 | static void | ||
1046 | xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) | ||
1047 | { | ||
1048 | struct xpc_notify *notify; | ||
1049 | u8 notify_type; | ||
1050 | s64 get = ch->w_remote_GP.get - 1; | ||
1051 | |||
1052 | |||
1053 | while (++get < put && atomic_read(&ch->n_to_notify) > 0) { | ||
1054 | |||
1055 | notify = &ch->notify_queue[get % ch->local_nentries]; | ||
1056 | |||
1057 | /* | ||
1058 | * See if the notify entry indicates it was associated with | ||
1059 | * a message who's sender wants to be notified. It is possible | ||
1060 | * that it is, but someone else is doing or has done the | ||
1061 | * notification. | ||
1062 | */ | ||
1063 | notify_type = notify->type; | ||
1064 | if (notify_type == 0 || | ||
1065 | cmpxchg(¬ify->type, notify_type, 0) != | ||
1066 | notify_type) { | ||
1067 | continue; | ||
1068 | } | ||
1069 | |||
1070 | DBUG_ON(notify_type != XPC_N_CALL); | ||
1071 | |||
1072 | atomic_dec(&ch->n_to_notify); | ||
1073 | |||
1074 | if (notify->func != NULL) { | ||
1075 | dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " | ||
1076 | "msg_number=%ld, partid=%d, channel=%d\n", | ||
1077 | (void *) notify, get, ch->partid, ch->number); | ||
1078 | |||
1079 | notify->func(reason, ch->partid, ch->number, | ||
1080 | notify->key); | ||
1081 | |||
1082 | dev_dbg(xpc_chan, "notify->func() returned, " | ||
1083 | "notify=0x%p, msg_number=%ld, partid=%d, " | ||
1084 | "channel=%d\n", (void *) notify, get, | ||
1085 | ch->partid, ch->number); | ||
1086 | } | ||
1087 | } | ||
1088 | } | ||
1089 | |||
1090 | |||
1091 | /* | ||
1092 | * Clear some of the msg flags in the local message queue. | 1107 | * Clear some of the msg flags in the local message queue. |
1093 | */ | 1108 | */ |
1094 | static inline void | 1109 | static inline void |
@@ -1240,6 +1255,7 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1240 | u64 IPI_amo, IPI_flags; | 1255 | u64 IPI_amo, IPI_flags; |
1241 | struct xpc_channel *ch; | 1256 | struct xpc_channel *ch; |
1242 | int ch_number; | 1257 | int ch_number; |
1258 | u32 ch_flags; | ||
1243 | 1259 | ||
1244 | 1260 | ||
1245 | IPI_amo = xpc_get_IPI_flags(part); | 1261 | IPI_amo = xpc_get_IPI_flags(part); |
@@ -1266,8 +1282,9 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1266 | xpc_process_openclose_IPI(part, ch_number, IPI_flags); | 1282 | xpc_process_openclose_IPI(part, ch_number, IPI_flags); |
1267 | } | 1283 | } |
1268 | 1284 | ||
1285 | ch_flags = ch->flags; /* need an atomic snapshot of flags */ | ||
1269 | 1286 | ||
1270 | if (ch->flags & XPC_C_DISCONNECTING) { | 1287 | if (ch_flags & XPC_C_DISCONNECTING) { |
1271 | spin_lock_irqsave(&ch->lock, irq_flags); | 1288 | spin_lock_irqsave(&ch->lock, irq_flags); |
1272 | xpc_process_disconnect(ch, &irq_flags); | 1289 | xpc_process_disconnect(ch, &irq_flags); |
1273 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1290 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
@@ -1278,9 +1295,9 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1278 | continue; | 1295 | continue; |
1279 | } | 1296 | } |
1280 | 1297 | ||
1281 | if (!(ch->flags & XPC_C_CONNECTED)) { | 1298 | if (!(ch_flags & XPC_C_CONNECTED)) { |
1282 | if (!(ch->flags & XPC_C_OPENREQUEST)) { | 1299 | if (!(ch_flags & XPC_C_OPENREQUEST)) { |
1283 | DBUG_ON(ch->flags & XPC_C_SETUP); | 1300 | DBUG_ON(ch_flags & XPC_C_SETUP); |
1284 | (void) xpc_connect_channel(ch); | 1301 | (void) xpc_connect_channel(ch); |
1285 | } else { | 1302 | } else { |
1286 | spin_lock_irqsave(&ch->lock, irq_flags); | 1303 | spin_lock_irqsave(&ch->lock, irq_flags); |
@@ -1305,8 +1322,8 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1305 | 1322 | ||
1306 | 1323 | ||
1307 | /* | 1324 | /* |
1308 | * XPC's heartbeat code calls this function to inform XPC that a partition has | 1325 | * XPC's heartbeat code calls this function to inform XPC that a partition is |
1309 | * gone down. XPC responds by tearing down the XPartition Communication | 1326 | * going down. XPC responds by tearing down the XPartition Communication |
1310 | * infrastructure used for the just downed partition. | 1327 | * infrastructure used for the just downed partition. |
1311 | * | 1328 | * |
1312 | * XPC's heartbeat code will never call this function and xpc_partition_up() | 1329 | * XPC's heartbeat code will never call this function and xpc_partition_up() |
@@ -1314,7 +1331,7 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1314 | * at the same time. | 1331 | * at the same time. |
1315 | */ | 1332 | */ |
1316 | void | 1333 | void |
1317 | xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason) | 1334 | xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) |
1318 | { | 1335 | { |
1319 | unsigned long irq_flags; | 1336 | unsigned long irq_flags; |
1320 | int ch_number; | 1337 | int ch_number; |
@@ -1330,12 +1347,11 @@ xpc_partition_down(struct xpc_partition *part, enum xpc_retval reason) | |||
1330 | } | 1347 | } |
1331 | 1348 | ||
1332 | 1349 | ||
1333 | /* disconnect all channels associated with the downed partition */ | 1350 | /* disconnect channels associated with the partition going down */ |
1334 | 1351 | ||
1335 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | 1352 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { |
1336 | ch = &part->channels[ch_number]; | 1353 | ch = &part->channels[ch_number]; |
1337 | 1354 | ||
1338 | |||
1339 | xpc_msgqueue_ref(ch); | 1355 | xpc_msgqueue_ref(ch); |
1340 | spin_lock_irqsave(&ch->lock, irq_flags); | 1356 | spin_lock_irqsave(&ch->lock, irq_flags); |
1341 | 1357 | ||
@@ -1370,6 +1386,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part) | |||
1370 | * this partition. | 1386 | * this partition. |
1371 | */ | 1387 | */ |
1372 | 1388 | ||
1389 | DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); | ||
1373 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); | 1390 | DBUG_ON(atomic_read(&part->nchannels_active) != 0); |
1374 | DBUG_ON(part->setup_state != XPC_P_SETUP); | 1391 | DBUG_ON(part->setup_state != XPC_P_SETUP); |
1375 | part->setup_state = XPC_P_WTEARDOWN; | 1392 | part->setup_state = XPC_P_WTEARDOWN; |
@@ -1506,8 +1523,12 @@ xpc_initiate_disconnect(int ch_number) | |||
1506 | 1523 | ||
1507 | spin_lock_irqsave(&ch->lock, irq_flags); | 1524 | spin_lock_irqsave(&ch->lock, irq_flags); |
1508 | 1525 | ||
1509 | XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, | 1526 | if (!(ch->flags & XPC_C_DISCONNECTED)) { |
1527 | ch->flags |= XPC_C_WDISCONNECT; | ||
1528 | |||
1529 | XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, | ||
1510 | &irq_flags); | 1530 | &irq_flags); |
1531 | } | ||
1511 | 1532 | ||
1512 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1533 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
1513 | 1534 | ||
@@ -1523,8 +1544,9 @@ xpc_initiate_disconnect(int ch_number) | |||
1523 | /* | 1544 | /* |
1524 | * To disconnect a channel, and reflect it back to all who may be waiting. | 1545 | * To disconnect a channel, and reflect it back to all who may be waiting. |
1525 | * | 1546 | * |
1526 | * >>> An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by | 1547 | * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by |
1527 | * >>> xpc_free_msgqueues(). | 1548 | * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by |
1549 | * xpc_disconnect_wait(). | ||
1528 | * | 1550 | * |
1529 | * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. | 1551 | * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN. |
1530 | */ | 1552 | */ |
@@ -1532,7 +1554,7 @@ void | |||
1532 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, | 1554 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, |
1533 | enum xpc_retval reason, unsigned long *irq_flags) | 1555 | enum xpc_retval reason, unsigned long *irq_flags) |
1534 | { | 1556 | { |
1535 | u32 flags; | 1557 | u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); |
1536 | 1558 | ||
1537 | 1559 | ||
1538 | DBUG_ON(!spin_is_locked(&ch->lock)); | 1560 | DBUG_ON(!spin_is_locked(&ch->lock)); |
@@ -1547,61 +1569,53 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1547 | 1569 | ||
1548 | XPC_SET_REASON(ch, reason, line); | 1570 | XPC_SET_REASON(ch, reason, line); |
1549 | 1571 | ||
1550 | flags = ch->flags; | 1572 | ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); |
1551 | /* some of these may not have been set */ | 1573 | /* some of these may not have been set */ |
1552 | ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | | 1574 | ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | |
1553 | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | | 1575 | XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | |
1554 | XPC_C_CONNECTING | XPC_C_CONNECTED); | 1576 | XPC_C_CONNECTING | XPC_C_CONNECTED); |
1555 | 1577 | ||
1556 | ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); | ||
1557 | xpc_IPI_send_closerequest(ch, irq_flags); | 1578 | xpc_IPI_send_closerequest(ch, irq_flags); |
1558 | 1579 | ||
1559 | if (flags & XPC_C_CONNECTED) { | 1580 | if (channel_was_connected) { |
1560 | ch->flags |= XPC_C_WASCONNECTED; | 1581 | ch->flags |= XPC_C_WASCONNECTED; |
1561 | } | 1582 | } |
1562 | 1583 | ||
1584 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
1585 | |||
1586 | /* wake all idle kthreads so they can exit */ | ||
1563 | if (atomic_read(&ch->kthreads_idle) > 0) { | 1587 | if (atomic_read(&ch->kthreads_idle) > 0) { |
1564 | /* wake all idle kthreads so they can exit */ | ||
1565 | wake_up_all(&ch->idle_wq); | 1588 | wake_up_all(&ch->idle_wq); |
1566 | } | 1589 | } |
1567 | 1590 | ||
1568 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
1569 | |||
1570 | |||
1571 | /* wake those waiting to allocate an entry from the local msg queue */ | 1591 | /* wake those waiting to allocate an entry from the local msg queue */ |
1572 | |||
1573 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { | 1592 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { |
1574 | wake_up(&ch->msg_allocate_wq); | 1593 | wake_up(&ch->msg_allocate_wq); |
1575 | } | 1594 | } |
1576 | 1595 | ||
1577 | /* wake those waiting for notify completion */ | ||
1578 | |||
1579 | if (atomic_read(&ch->n_to_notify) > 0) { | ||
1580 | xpc_notify_senders(ch, reason, ch->w_local_GP.put); | ||
1581 | } | ||
1582 | |||
1583 | spin_lock_irqsave(&ch->lock, *irq_flags); | 1596 | spin_lock_irqsave(&ch->lock, *irq_flags); |
1584 | } | 1597 | } |
1585 | 1598 | ||
1586 | 1599 | ||
1587 | void | 1600 | void |
1588 | xpc_disconnected_callout(struct xpc_channel *ch) | 1601 | xpc_disconnecting_callout(struct xpc_channel *ch) |
1589 | { | 1602 | { |
1590 | /* | 1603 | /* |
1591 | * Let the channel's registerer know that the channel is now | 1604 | * Let the channel's registerer know that the channel is being |
1592 | * disconnected. We don't want to do this if the registerer was never | 1605 | * disconnected. We don't want to do this if the registerer was never |
1593 | * informed of a connection being made, unless the disconnect was for | 1606 | * informed of a connection being made. |
1594 | * abnormal reasons. | ||
1595 | */ | 1607 | */ |
1596 | 1608 | ||
1597 | if (ch->func != NULL) { | 1609 | if (ch->func != NULL) { |
1598 | dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " | 1610 | dev_dbg(xpc_chan, "ch->func() called, reason=xpcDisconnecting," |
1599 | "channel=%d\n", ch->reason, ch->partid, ch->number); | 1611 | " partid=%d, channel=%d\n", ch->partid, ch->number); |
1600 | 1612 | ||
1601 | ch->func(ch->reason, ch->partid, ch->number, NULL, ch->key); | 1613 | ch->func(xpcDisconnecting, ch->partid, ch->number, NULL, |
1614 | ch->key); | ||
1602 | 1615 | ||
1603 | dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " | 1616 | dev_dbg(xpc_chan, "ch->func() returned, reason=" |
1604 | "channel=%d\n", ch->reason, ch->partid, ch->number); | 1617 | "xpcDisconnecting, partid=%d, channel=%d\n", |
1618 | ch->partid, ch->number); | ||
1605 | } | 1619 | } |
1606 | } | 1620 | } |
1607 | 1621 | ||
@@ -1848,7 +1862,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | |||
1848 | xpc_notify_func func, void *key) | 1862 | xpc_notify_func func, void *key) |
1849 | { | 1863 | { |
1850 | enum xpc_retval ret = xpcSuccess; | 1864 | enum xpc_retval ret = xpcSuccess; |
1851 | struct xpc_notify *notify = NULL; // >>> to keep the compiler happy!! | 1865 | struct xpc_notify *notify = notify; |
1852 | s64 put, msg_number = msg->number; | 1866 | s64 put, msg_number = msg->number; |
1853 | 1867 | ||
1854 | 1868 | ||