aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xpc_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c278
1 files changed, 94 insertions, 184 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 8780d5d00f6..563aaf4a2ff 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -26,7 +26,7 @@
26 * Caveats: 26 * Caveats:
27 * 27 *
28 * . We currently have no way to determine which nasid an IPI came 28 * . We currently have no way to determine which nasid an IPI came
29 * from. Thus, xpc_IPI_send() does a remote AMO write followed by 29 * from. Thus, >>> xpc_IPI_send() does a remote AMO write followed by
30 * an IPI. The AMO indicates where data is to be pulled from, so 30 * an IPI. The AMO indicates where data is to be pulled from, so
31 * after the IPI arrives, the remote partition checks the AMO word. 31 * after the IPI arrives, the remote partition checks the AMO word.
32 * The IPI can actually arrive before the AMO however, so other code 32 * The IPI can actually arrive before the AMO however, so other code
@@ -89,9 +89,9 @@ static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
89static int xpc_hb_check_min_interval = 10; 89static int xpc_hb_check_min_interval = 10;
90static int xpc_hb_check_max_interval = 120; 90static int xpc_hb_check_max_interval = 120;
91 91
92int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; 92int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
93static int xpc_disengage_request_min_timelimit; /* = 0 */ 93static int xpc_disengage_min_timelimit; /* = 0 */
94static int xpc_disengage_request_max_timelimit = 120; 94static int xpc_disengage_max_timelimit = 120;
95 95
96static ctl_table xpc_sys_xpc_hb_dir[] = { 96static ctl_table xpc_sys_xpc_hb_dir[] = {
97 { 97 {
@@ -124,14 +124,14 @@ static ctl_table xpc_sys_xpc_dir[] = {
124 .child = xpc_sys_xpc_hb_dir}, 124 .child = xpc_sys_xpc_hb_dir},
125 { 125 {
126 .ctl_name = CTL_UNNUMBERED, 126 .ctl_name = CTL_UNNUMBERED,
127 .procname = "disengage_request_timelimit", 127 .procname = "disengage_timelimit",
128 .data = &xpc_disengage_request_timelimit, 128 .data = &xpc_disengage_timelimit,
129 .maxlen = sizeof(int), 129 .maxlen = sizeof(int),
130 .mode = 0644, 130 .mode = 0644,
131 .proc_handler = &proc_dointvec_minmax, 131 .proc_handler = &proc_dointvec_minmax,
132 .strategy = &sysctl_intvec, 132 .strategy = &sysctl_intvec,
133 .extra1 = &xpc_disengage_request_min_timelimit, 133 .extra1 = &xpc_disengage_min_timelimit,
134 .extra2 = &xpc_disengage_request_max_timelimit}, 134 .extra2 = &xpc_disengage_max_timelimit},
135 {} 135 {}
136}; 136};
137static ctl_table xpc_sys_dir[] = { 137static ctl_table xpc_sys_dir[] = {
@@ -144,8 +144,8 @@ static ctl_table xpc_sys_dir[] = {
144}; 144};
145static struct ctl_table_header *xpc_sysctl; 145static struct ctl_table_header *xpc_sysctl;
146 146
147/* non-zero if any remote partition disengage request was timed out */ 147/* non-zero if any remote partition disengage was timed out */
148int xpc_disengage_request_timedout; 148int xpc_disengage_timedout;
149 149
150/* #of activate IRQs received */ 150/* #of activate IRQs received */
151atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0); 151atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0);
@@ -184,38 +184,36 @@ void (*xpc_online_heartbeat) (void);
184void (*xpc_check_remote_hb) (void); 184void (*xpc_check_remote_hb) (void);
185 185
186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); 186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
187void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
187u64 (*xpc_get_IPI_flags) (struct xpc_partition *part); 188u64 (*xpc_get_IPI_flags) (struct xpc_partition *part);
189void (*xpc_process_msg_IPI) (struct xpc_partition *part, int ch_number);
190int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch);
188struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); 191struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
189 192
190void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp, 193void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
191 u64 remote_rp_pa, int nasid); 194 u64 remote_rp_pa, int nasid);
195void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
196void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
197void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
192 198
193void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected); 199void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected);
194enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part); 200enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
195void (*xpc_teardown_infrastructure) (struct xpc_partition *part); 201void (*xpc_teardown_infrastructure) (struct xpc_partition *part);
196 202
197void (*xpc_mark_partition_engaged) (struct xpc_partition *part); 203void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
198void (*xpc_mark_partition_disengaged) (struct xpc_partition *part); 204int (*xpc_partition_engaged) (short partid);
199void (*xpc_request_partition_disengage) (struct xpc_partition *part); 205int (*xpc_any_partition_engaged) (void);
200void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *part); 206void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
201u64 (*xpc_partition_engaged) (u64 partid_mask); 207void (*xpc_assume_partition_disengaged) (short partid);
202u64 (*xpc_partition_disengage_requested) (u64 partid_mask); 208
203void (*xpc_clear_partition_engaged) (u64 partid_mask); 209void (*xpc_send_channel_closerequest) (struct xpc_channel *ch,
204void (*xpc_clear_partition_disengage_request) (u64 partid_mask); 210 unsigned long *irq_flags);
205 211void (*xpc_send_channel_closereply) (struct xpc_channel *ch,
206void (*xpc_IPI_send_local_activate) (int from_nasid); 212 unsigned long *irq_flags);
207void (*xpc_IPI_send_activated) (struct xpc_partition *part); 213void (*xpc_send_channel_openrequest) (struct xpc_channel *ch,
208void (*xpc_IPI_send_local_reactivate) (int from_nasid); 214 unsigned long *irq_flags);
209void (*xpc_IPI_send_disengage) (struct xpc_partition *part); 215void (*xpc_send_channel_openreply) (struct xpc_channel *ch,
210 216 unsigned long *irq_flags);
211void (*xpc_IPI_send_closerequest) (struct xpc_channel *ch,
212 unsigned long *irq_flags);
213void (*xpc_IPI_send_closereply) (struct xpc_channel *ch,
214 unsigned long *irq_flags);
215void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch,
216 unsigned long *irq_flags);
217void (*xpc_IPI_send_openreply) (struct xpc_channel *ch,
218 unsigned long *irq_flags);
219 217
220enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags, 218enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
221 void *payload, u16 payload_size, u8 notify_type, 219 void *payload, u16 payload_size, u8 notify_type,
@@ -223,19 +221,19 @@ enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
223void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg); 221void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
224 222
225/* 223/*
226 * Timer function to enforce the timelimit on the partition disengage request. 224 * Timer function to enforce the timelimit on the partition disengage.
227 */ 225 */
228static void 226static void
229xpc_timeout_partition_disengage_request(unsigned long data) 227xpc_timeout_partition_disengage(unsigned long data)
230{ 228{
231 struct xpc_partition *part = (struct xpc_partition *)data; 229 struct xpc_partition *part = (struct xpc_partition *)data;
232 230
233 DBUG_ON(time_is_after_jiffies(part->disengage_request_timeout)); 231 DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
234 232
235 (void)xpc_partition_disengaged(part); 233 (void)xpc_partition_disengaged(part);
236 234
237 DBUG_ON(part->disengage_request_timeout != 0); 235 DBUG_ON(part->disengage_timeout != 0);
238 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); 236 DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
239} 237}
240 238
241/* 239/*
@@ -464,7 +462,7 @@ xpc_activating(void *__partid)
464 462
465 if (part->reason == xpReactivating) { 463 if (part->reason == xpReactivating) {
466 /* interrupting ourselves results in activating partition */ 464 /* interrupting ourselves results in activating partition */
467 xpc_IPI_send_local_reactivate(part->reactivate_nasid); 465 xpc_request_partition_reactivation(part);
468 } 466 }
469 467
470 return 0; 468 return 0;
@@ -496,82 +494,6 @@ xpc_activate_partition(struct xpc_partition *part)
496 } 494 }
497} 495}
498 496
499/*
500 * Check to see if there is any channel activity to/from the specified
501 * partition.
502 */
503static void
504xpc_check_for_channel_activity(struct xpc_partition *part)
505{
506 u64 IPI_amo;
507 unsigned long irq_flags;
508
509/* this needs to be uncommented, but I'm thinking this function and the */
510/* ones that call it need to be moved into xpc_sn2.c... */
511 IPI_amo = 0; /* = xpc_IPI_receive(part->local_IPI_amo_va); */
512 if (IPI_amo == 0)
513 return;
514
515 spin_lock_irqsave(&part->IPI_lock, irq_flags);
516 part->local_IPI_amo |= IPI_amo;
517 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
518
519 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
520 XPC_PARTID(part), IPI_amo);
521
522 xpc_wakeup_channel_mgr(part);
523}
524
525/*
526 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
527 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
528 * than one partition, we use an AMO_t structure per partition to indicate
529 * whether a partition has sent an IPI or not. If it has, then wake up the
530 * associated kthread to handle it.
531 *
532 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
533 * running on other partitions.
534 *
535 * Noteworthy Arguments:
536 *
537 * irq - Interrupt ReQuest number. NOT USED.
538 *
539 * dev_id - partid of IPI's potential sender.
540 */
541irqreturn_t
542xpc_notify_IRQ_handler(int irq, void *dev_id)
543{
544 short partid = (short)(u64)dev_id;
545 struct xpc_partition *part = &xpc_partitions[partid];
546
547 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
548
549 if (xpc_part_ref(part)) {
550 xpc_check_for_channel_activity(part);
551
552 xpc_part_deref(part);
553 }
554 return IRQ_HANDLED;
555}
556
557/*
558 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
559 * because the write to their associated IPI amo completed after the IRQ/IPI
560 * was received.
561 */
562void
563xpc_dropped_IPI_check(struct xpc_partition *part)
564{
565 if (xpc_part_ref(part)) {
566 xpc_check_for_channel_activity(part);
567
568 part->dropped_IPI_timer.expires = jiffies +
569 XPC_P_DROPPED_IPI_WAIT_INTERVAL;
570 add_timer(&part->dropped_IPI_timer);
571 xpc_part_deref(part);
572 }
573}
574
575void 497void
576xpc_activate_kthreads(struct xpc_channel *ch, int needed) 498xpc_activate_kthreads(struct xpc_channel *ch, int needed)
577{ 499{
@@ -616,7 +538,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
616 do { 538 do {
617 /* deliver messages to their intended recipients */ 539 /* deliver messages to their intended recipients */
618 540
619 while (ch->w_local_GP.get < ch->w_remote_GP.put && 541 while (xpc_n_of_deliverable_msgs(ch) > 0 &&
620 !(ch->flags & XPC_C_DISCONNECTING)) { 542 !(ch->flags & XPC_C_DISCONNECTING)) {
621 xpc_deliver_msg(ch); 543 xpc_deliver_msg(ch);
622 } 544 }
@@ -632,7 +554,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
632 "wait_event_interruptible_exclusive()\n"); 554 "wait_event_interruptible_exclusive()\n");
633 555
634 (void)wait_event_interruptible_exclusive(ch->idle_wq, 556 (void)wait_event_interruptible_exclusive(ch->idle_wq,
635 (ch->w_local_GP.get < ch->w_remote_GP.put || 557 (xpc_n_of_deliverable_msgs(ch) > 0 ||
636 (ch->flags & XPC_C_DISCONNECTING))); 558 (ch->flags & XPC_C_DISCONNECTING)));
637 559
638 atomic_dec(&ch->kthreads_idle); 560 atomic_dec(&ch->kthreads_idle);
@@ -677,7 +599,7 @@ xpc_kthread_start(void *args)
677 * additional kthreads to help deliver them. We only 599 * additional kthreads to help deliver them. We only
678 * need one less than total #of messages to deliver. 600 * need one less than total #of messages to deliver.
679 */ 601 */
680 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 602 n_needed = xpc_n_of_deliverable_msgs(ch) - 1;
681 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) 603 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
682 xpc_activate_kthreads(ch, n_needed); 604 xpc_activate_kthreads(ch, n_needed);
683 605
@@ -703,11 +625,9 @@ xpc_kthread_start(void *args)
703 } 625 }
704 spin_unlock_irqrestore(&ch->lock, irq_flags); 626 spin_unlock_irqrestore(&ch->lock, irq_flags);
705 627
706 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 628 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
707 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 629 atomic_dec_return(&part->nchannels_engaged) == 0) {
708 xpc_mark_partition_disengaged(part); 630 xpc_indicate_partition_disengaged(part);
709 xpc_IPI_send_disengage(part);
710 }
711 } 631 }
712 632
713 xpc_msgqueue_deref(ch); 633 xpc_msgqueue_deref(ch);
@@ -758,9 +678,9 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
758 } else if (ch->flags & XPC_C_DISCONNECTING) { 678 } else if (ch->flags & XPC_C_DISCONNECTING) {
759 break; 679 break;
760 680
761 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) { 681 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
762 if (atomic_inc_return(&part->nchannels_engaged) == 1) 682 atomic_inc_return(&part->nchannels_engaged) == 1) {
763 xpc_mark_partition_engaged(part); 683 xpc_indicate_partition_engaged(part);
764 } 684 }
765 (void)xpc_part_ref(part); 685 (void)xpc_part_ref(part);
766 xpc_msgqueue_ref(ch); 686 xpc_msgqueue_ref(ch);
@@ -782,8 +702,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
782 702
783 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 703 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
784 atomic_dec_return(&part->nchannels_engaged) == 0) { 704 atomic_dec_return(&part->nchannels_engaged) == 0) {
785 xpc_mark_partition_disengaged(part); 705 xpc_indicate_partition_disengaged(part);
786 xpc_IPI_send_disengage(part);
787 } 706 }
788 xpc_msgqueue_deref(ch); 707 xpc_msgqueue_deref(ch);
789 xpc_part_deref(part); 708 xpc_part_deref(part);
@@ -862,7 +781,7 @@ xpc_do_exit(enum xp_retval reason)
862 short partid; 781 short partid;
863 int active_part_count, printed_waiting_msg = 0; 782 int active_part_count, printed_waiting_msg = 0;
864 struct xpc_partition *part; 783 struct xpc_partition *part;
865 unsigned long printmsg_time, disengage_request_timeout = 0; 784 unsigned long printmsg_time, disengage_timeout = 0;
866 785
867 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ 786 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
868 DBUG_ON(xpc_exiting == 1); 787 DBUG_ON(xpc_exiting == 1);
@@ -886,8 +805,8 @@ xpc_do_exit(enum xp_retval reason)
886 805
887 /* wait for all partitions to become inactive */ 806 /* wait for all partitions to become inactive */
888 807
889 printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 808 printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
890 xpc_disengage_request_timedout = 0; 809 xpc_disengage_timedout = 0;
891 810
892 do { 811 do {
893 active_part_count = 0; 812 active_part_count = 0;
@@ -904,36 +823,32 @@ xpc_do_exit(enum xp_retval reason)
904 823
905 XPC_DEACTIVATE_PARTITION(part, reason); 824 XPC_DEACTIVATE_PARTITION(part, reason);
906 825
907 if (part->disengage_request_timeout > 826 if (part->disengage_timeout > disengage_timeout)
908 disengage_request_timeout) { 827 disengage_timeout = part->disengage_timeout;
909 disengage_request_timeout =
910 part->disengage_request_timeout;
911 }
912 } 828 }
913 829
914 if (xpc_partition_engaged(-1UL)) { 830 if (xpc_any_partition_engaged()) {
915 if (time_is_before_jiffies(printmsg_time)) { 831 if (time_is_before_jiffies(printmsg_time)) {
916 dev_info(xpc_part, "waiting for remote " 832 dev_info(xpc_part, "waiting for remote "
917 "partitions to disengage, timeout in " 833 "partitions to deactivate, timeout in "
918 "%ld seconds\n", 834 "%ld seconds\n", (disengage_timeout -
919 (disengage_request_timeout - jiffies) 835 jiffies) / HZ);
920 / HZ);
921 printmsg_time = jiffies + 836 printmsg_time = jiffies +
922 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 837 (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
923 printed_waiting_msg = 1; 838 printed_waiting_msg = 1;
924 } 839 }
925 840
926 } else if (active_part_count > 0) { 841 } else if (active_part_count > 0) {
927 if (printed_waiting_msg) { 842 if (printed_waiting_msg) {
928 dev_info(xpc_part, "waiting for local partition" 843 dev_info(xpc_part, "waiting for local partition"
929 " to disengage\n"); 844 " to deactivate\n");
930 printed_waiting_msg = 0; 845 printed_waiting_msg = 0;
931 } 846 }
932 847
933 } else { 848 } else {
934 if (!xpc_disengage_request_timedout) { 849 if (!xpc_disengage_timedout) {
935 dev_info(xpc_part, "all partitions have " 850 dev_info(xpc_part, "all partitions have "
936 "disengaged\n"); 851 "deactivated\n");
937 } 852 }
938 break; 853 break;
939 } 854 }
@@ -943,7 +858,7 @@ xpc_do_exit(enum xp_retval reason)
943 858
944 } while (1); 859 } while (1);
945 860
946 DBUG_ON(xpc_partition_engaged(-1UL)); 861 DBUG_ON(xpc_any_partition_engaged());
947 DBUG_ON(xpc_any_hbs_allowed() != 0); 862 DBUG_ON(xpc_any_hbs_allowed() != 0);
948 863
949 /* indicate to others that our reserved page is uninitialized */ 864 /* indicate to others that our reserved page is uninitialized */
@@ -996,15 +911,16 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
996} 911}
997 912
998/* 913/*
999 * Notify other partitions to disengage from all references to our memory. 914 * Notify other partitions to deactivate from us by first disengaging from all
915 * references to our memory.
1000 */ 916 */
1001static void 917static void
1002xpc_die_disengage(void) 918xpc_die_deactivate(void)
1003{ 919{
1004 struct xpc_partition *part; 920 struct xpc_partition *part;
1005 short partid; 921 short partid;
1006 unsigned long engaged; 922 int any_engaged;
1007 long time, printmsg_time, disengage_request_timeout; 923 long time, printmsg_time, disengage_timeout;
1008 924
1009 /* keep xpc_hb_checker thread from doing anything (just in case) */ 925 /* keep xpc_hb_checker thread from doing anything (just in case) */
1010 xpc_exiting = 1; 926 xpc_exiting = 1;
@@ -1014,43 +930,37 @@ xpc_die_disengage(void)
1014 for (partid = 0; partid < xp_max_npartitions; partid++) { 930 for (partid = 0; partid < xp_max_npartitions; partid++) {
1015 part = &xpc_partitions[partid]; 931 part = &xpc_partitions[partid];
1016 932
1017 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 933 if (xpc_partition_engaged(partid) ||
1018 remote_vars_version)) {
1019
1020 /* just in case it was left set by an earlier XPC */
1021 xpc_clear_partition_engaged(1UL << partid);
1022 continue;
1023 }
1024
1025 if (xpc_partition_engaged(1UL << partid) ||
1026 part->act_state != XPC_P_INACTIVE) { 934 part->act_state != XPC_P_INACTIVE) {
1027 xpc_request_partition_disengage(part); 935 xpc_request_partition_deactivation(part);
1028 xpc_mark_partition_disengaged(part); 936 xpc_indicate_partition_disengaged(part);
1029 xpc_IPI_send_disengage(part);
1030 } 937 }
1031 } 938 }
1032 939
1033 time = rtc_time(); 940 time = rtc_time();
1034 printmsg_time = time + 941 printmsg_time = time +
1035 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); 942 (XPC_DEACTIVATE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1036 disengage_request_timeout = time + 943 disengage_timeout = time +
1037 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 944 (xpc_disengage_timelimit * sn_rtc_cycles_per_second);
1038 945
1039 /* wait for all other partitions to disengage from us */ 946 /*
947 * Though we requested that all other partitions deactivate from us,
948 * we only wait until they've all disengaged.
949 */
1040 950
1041 while (1) { 951 while (1) {
1042 engaged = xpc_partition_engaged(-1UL); 952 any_engaged = xpc_any_partition_engaged();
1043 if (!engaged) { 953 if (!any_engaged) {
1044 dev_info(xpc_part, "all partitions have disengaged\n"); 954 dev_info(xpc_part, "all partitions have deactivated\n");
1045 break; 955 break;
1046 } 956 }
1047 957
1048 time = rtc_time(); 958 time = rtc_time();
1049 if (time >= disengage_request_timeout) { 959 if (time >= disengage_timeout) {
1050 for (partid = 0; partid < xp_max_npartitions; 960 for (partid = 0; partid < xp_max_npartitions;
1051 partid++) { 961 partid++) {
1052 if (engaged & (1UL << partid)) { 962 if (xpc_partition_engaged(partid)) {
1053 dev_info(xpc_part, "disengage from " 963 dev_info(xpc_part, "deactivate from "
1054 "remote partition %d timed " 964 "remote partition %d timed "
1055 "out\n", partid); 965 "out\n", partid);
1056 } 966 }
@@ -1060,11 +970,11 @@ xpc_die_disengage(void)
1060 970
1061 if (time >= printmsg_time) { 971 if (time >= printmsg_time) {
1062 dev_info(xpc_part, "waiting for remote partitions to " 972 dev_info(xpc_part, "waiting for remote partitions to "
1063 "disengage, timeout in %ld seconds\n", 973 "deactivate, timeout in %ld seconds\n",
1064 (disengage_request_timeout - time) / 974 (disengage_timeout - time) /
1065 sn_rtc_cycles_per_second); 975 sn_rtc_cycles_per_second);
1066 printmsg_time = time + 976 printmsg_time = time +
1067 (XPC_DISENGAGE_PRINTMSG_INTERVAL * 977 (XPC_DEACTIVATE_PRINTMSG_INTERVAL *
1068 sn_rtc_cycles_per_second); 978 sn_rtc_cycles_per_second);
1069 } 979 }
1070 } 980 }
@@ -1084,7 +994,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1084 switch (event) { 994 switch (event) {
1085 case DIE_MACHINE_RESTART: 995 case DIE_MACHINE_RESTART:
1086 case DIE_MACHINE_HALT: 996 case DIE_MACHINE_HALT:
1087 xpc_die_disengage(); 997 xpc_die_deactivate();
1088 break; 998 break;
1089 999
1090 case DIE_KDEBUG_ENTER: 1000 case DIE_KDEBUG_ENTER:
@@ -1183,10 +1093,10 @@ xpc_init(void)
1183 part->act_state = XPC_P_INACTIVE; 1093 part->act_state = XPC_P_INACTIVE;
1184 XPC_SET_REASON(part, 0, 0); 1094 XPC_SET_REASON(part, 0, 0);
1185 1095
1186 init_timer(&part->disengage_request_timer); 1096 init_timer(&part->disengage_timer);
1187 part->disengage_request_timer.function = 1097 part->disengage_timer.function =
1188 xpc_timeout_partition_disengage_request; 1098 xpc_timeout_partition_disengage;
1189 part->disengage_request_timer.data = (unsigned long)part; 1099 part->disengage_timer.data = (unsigned long)part;
1190 1100
1191 part->setup_state = XPC_P_UNSET; 1101 part->setup_state = XPC_P_UNSET;
1192 init_waitqueue_head(&part->teardown_wq); 1102 init_waitqueue_head(&part->teardown_wq);
@@ -1295,9 +1205,9 @@ module_param(xpc_hb_check_interval, int, 0);
1295MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1205MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1296 "heartbeat checks."); 1206 "heartbeat checks.");
1297 1207
1298module_param(xpc_disengage_request_timelimit, int, 0); 1208module_param(xpc_disengage_timelimit, int, 0);
1299MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1209MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
1300 "for disengage request to complete."); 1210 "for disengage to complete.");
1301 1211
1302module_param(xpc_kdebug_ignore, int, 0); 1212module_param(xpc_kdebug_ignore, int, 0);
1303MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " 1213MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "