diff options
author | Tony Luck <tony.luck@intel.com> | 2005-10-28 18:27:03 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2005-10-28 18:27:03 -0400 |
commit | fac84ef26759a3725bfc53ae3abf21976360aff3 (patch) | |
tree | 94aa9362e72a0736948adc78cb43ebf44f59580b /arch/ia64/sn/kernel/xpc_main.c | |
parent | d73dee6ee4b554074f88e4ebd956ea4db8552d52 (diff) | |
parent | 279290294662d4a35d209fb7d7c46523cfa3d63d (diff) |
Pull xpc-disengage into release branch
Diffstat (limited to 'arch/ia64/sn/kernel/xpc_main.c')
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 330 |
1 files changed, 256 insertions, 74 deletions
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index ed7c21586e9..cece3c7c69b 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
55 | #include <linux/slab.h> | 55 | #include <linux/slab.h> |
56 | #include <linux/delay.h> | 56 | #include <linux/delay.h> |
57 | #include <linux/reboot.h> | ||
57 | #include <asm/sn/intr.h> | 58 | #include <asm/sn/intr.h> |
58 | #include <asm/sn/sn_sal.h> | 59 | #include <asm/sn/sn_sal.h> |
59 | #include <asm/uaccess.h> | 60 | #include <asm/uaccess.h> |
@@ -82,11 +83,17 @@ struct device *xpc_chan = &xpc_chan_dbg_subname; | |||
82 | 83 | ||
83 | /* systune related variables for /proc/sys directories */ | 84 | /* systune related variables for /proc/sys directories */ |
84 | 85 | ||
85 | static int xpc_hb_min = 1; | 86 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; |
86 | static int xpc_hb_max = 10; | 87 | static int xpc_hb_min_interval = 1; |
88 | static int xpc_hb_max_interval = 10; | ||
87 | 89 | ||
88 | static int xpc_hb_check_min = 10; | 90 | static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; |
89 | static int xpc_hb_check_max = 120; | 91 | static int xpc_hb_check_min_interval = 10; |
92 | static int xpc_hb_check_max_interval = 120; | ||
93 | |||
94 | int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; | ||
95 | static int xpc_disengage_request_min_timelimit = 0; | ||
96 | static int xpc_disengage_request_max_timelimit = 120; | ||
90 | 97 | ||
91 | static ctl_table xpc_sys_xpc_hb_dir[] = { | 98 | static ctl_table xpc_sys_xpc_hb_dir[] = { |
92 | { | 99 | { |
@@ -99,7 +106,8 @@ static ctl_table xpc_sys_xpc_hb_dir[] = { | |||
99 | &proc_dointvec_minmax, | 106 | &proc_dointvec_minmax, |
100 | &sysctl_intvec, | 107 | &sysctl_intvec, |
101 | NULL, | 108 | NULL, |
102 | &xpc_hb_min, &xpc_hb_max | 109 | &xpc_hb_min_interval, |
110 | &xpc_hb_max_interval | ||
103 | }, | 111 | }, |
104 | { | 112 | { |
105 | 2, | 113 | 2, |
@@ -111,7 +119,8 @@ static ctl_table xpc_sys_xpc_hb_dir[] = { | |||
111 | &proc_dointvec_minmax, | 119 | &proc_dointvec_minmax, |
112 | &sysctl_intvec, | 120 | &sysctl_intvec, |
113 | NULL, | 121 | NULL, |
114 | &xpc_hb_check_min, &xpc_hb_check_max | 122 | &xpc_hb_check_min_interval, |
123 | &xpc_hb_check_max_interval | ||
115 | }, | 124 | }, |
116 | {0} | 125 | {0} |
117 | }; | 126 | }; |
@@ -124,6 +133,19 @@ static ctl_table xpc_sys_xpc_dir[] = { | |||
124 | 0555, | 133 | 0555, |
125 | xpc_sys_xpc_hb_dir | 134 | xpc_sys_xpc_hb_dir |
126 | }, | 135 | }, |
136 | { | ||
137 | 2, | ||
138 | "disengage_request_timelimit", | ||
139 | &xpc_disengage_request_timelimit, | ||
140 | sizeof(int), | ||
141 | 0644, | ||
142 | NULL, | ||
143 | &proc_dointvec_minmax, | ||
144 | &sysctl_intvec, | ||
145 | NULL, | ||
146 | &xpc_disengage_request_min_timelimit, | ||
147 | &xpc_disengage_request_max_timelimit | ||
148 | }, | ||
127 | {0} | 149 | {0} |
128 | }; | 150 | }; |
129 | static ctl_table xpc_sys_dir[] = { | 151 | static ctl_table xpc_sys_dir[] = { |
@@ -148,10 +170,10 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); | |||
148 | 170 | ||
149 | static unsigned long xpc_hb_check_timeout; | 171 | static unsigned long xpc_hb_check_timeout; |
150 | 172 | ||
151 | /* xpc_hb_checker thread exited notification */ | 173 | /* notification that the xpc_hb_checker thread has exited */ |
152 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); | 174 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); |
153 | 175 | ||
154 | /* xpc_discovery thread exited notification */ | 176 | /* notification that the xpc_discovery thread has exited */ |
155 | static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); | 177 | static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); |
156 | 178 | ||
157 | 179 | ||
@@ -161,6 +183,30 @@ static struct timer_list xpc_hb_timer; | |||
161 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); | 183 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); |
162 | 184 | ||
163 | 185 | ||
186 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); | ||
187 | static struct notifier_block xpc_reboot_notifier = { | ||
188 | .notifier_call = xpc_system_reboot, | ||
189 | }; | ||
190 | |||
191 | |||
192 | /* | ||
193 | * Timer function to enforce the timelimit on the partition disengage request. | ||
194 | */ | ||
195 | static void | ||
196 | xpc_timeout_partition_disengage_request(unsigned long data) | ||
197 | { | ||
198 | struct xpc_partition *part = (struct xpc_partition *) data; | ||
199 | |||
200 | |||
201 | DBUG_ON(jiffies < part->disengage_request_timeout); | ||
202 | |||
203 | (void) xpc_partition_disengaged(part); | ||
204 | |||
205 | DBUG_ON(part->disengage_request_timeout != 0); | ||
206 | DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); | ||
207 | } | ||
208 | |||
209 | |||
164 | /* | 210 | /* |
165 | * Notify the heartbeat check thread that an IRQ has been received. | 211 | * Notify the heartbeat check thread that an IRQ has been received. |
166 | */ | 212 | */ |
@@ -214,12 +260,6 @@ xpc_hb_checker(void *ignore) | |||
214 | 260 | ||
215 | while (!(volatile int) xpc_exiting) { | 261 | while (!(volatile int) xpc_exiting) { |
216 | 262 | ||
217 | /* wait for IRQ or timeout */ | ||
218 | (void) wait_event_interruptible(xpc_act_IRQ_wq, | ||
219 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || | ||
220 | jiffies >= xpc_hb_check_timeout || | ||
221 | (volatile int) xpc_exiting)); | ||
222 | |||
223 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " | 263 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
224 | "been received\n", | 264 | "been received\n", |
225 | (int) (xpc_hb_check_timeout - jiffies), | 265 | (int) (xpc_hb_check_timeout - jiffies), |
@@ -240,6 +280,7 @@ xpc_hb_checker(void *ignore) | |||
240 | } | 280 | } |
241 | 281 | ||
242 | 282 | ||
283 | /* check for outstanding IRQs */ | ||
243 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); | 284 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); |
244 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { | 285 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { |
245 | force_IRQ = 0; | 286 | force_IRQ = 0; |
@@ -257,12 +298,18 @@ xpc_hb_checker(void *ignore) | |||
257 | xpc_hb_check_timeout = jiffies + | 298 | xpc_hb_check_timeout = jiffies + |
258 | (xpc_hb_check_interval * HZ); | 299 | (xpc_hb_check_interval * HZ); |
259 | } | 300 | } |
301 | |||
302 | /* wait for IRQ or timeout */ | ||
303 | (void) wait_event_interruptible(xpc_act_IRQ_wq, | ||
304 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || | ||
305 | jiffies >= xpc_hb_check_timeout || | ||
306 | (volatile int) xpc_exiting)); | ||
260 | } | 307 | } |
261 | 308 | ||
262 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); | 309 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
263 | 310 | ||
264 | 311 | ||
265 | /* mark this thread as inactive */ | 312 | /* mark this thread as having exited */ |
266 | up(&xpc_hb_checker_exited); | 313 | up(&xpc_hb_checker_exited); |
267 | return 0; | 314 | return 0; |
268 | } | 315 | } |
@@ -282,7 +329,7 @@ xpc_initiate_discovery(void *ignore) | |||
282 | 329 | ||
283 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | 330 | dev_dbg(xpc_part, "discovery thread is exiting\n"); |
284 | 331 | ||
285 | /* mark this thread as inactive */ | 332 | /* mark this thread as having exited */ |
286 | up(&xpc_discovery_exited); | 333 | up(&xpc_discovery_exited); |
287 | return 0; | 334 | return 0; |
288 | } | 335 | } |
@@ -309,7 +356,7 @@ xpc_make_first_contact(struct xpc_partition *part) | |||
309 | "partition %d\n", XPC_PARTID(part)); | 356 | "partition %d\n", XPC_PARTID(part)); |
310 | 357 | ||
311 | /* wait a 1/4 of a second or so */ | 358 | /* wait a 1/4 of a second or so */ |
312 | msleep_interruptible(250); | 359 | (void) msleep_interruptible(250); |
313 | 360 | ||
314 | if (part->act_state == XPC_P_DEACTIVATING) { | 361 | if (part->act_state == XPC_P_DEACTIVATING) { |
315 | return part->reason; | 362 | return part->reason; |
@@ -336,7 +383,8 @@ static void | |||
336 | xpc_channel_mgr(struct xpc_partition *part) | 383 | xpc_channel_mgr(struct xpc_partition *part) |
337 | { | 384 | { |
338 | while (part->act_state != XPC_P_DEACTIVATING || | 385 | while (part->act_state != XPC_P_DEACTIVATING || |
339 | atomic_read(&part->nchannels_active) > 0) { | 386 | atomic_read(&part->nchannels_active) > 0 || |
387 | !xpc_partition_disengaged(part)) { | ||
340 | 388 | ||
341 | xpc_process_channel_activity(part); | 389 | xpc_process_channel_activity(part); |
342 | 390 | ||
@@ -360,7 +408,8 @@ xpc_channel_mgr(struct xpc_partition *part) | |||
360 | (volatile u64) part->local_IPI_amo != 0 || | 408 | (volatile u64) part->local_IPI_amo != 0 || |
361 | ((volatile u8) part->act_state == | 409 | ((volatile u8) part->act_state == |
362 | XPC_P_DEACTIVATING && | 410 | XPC_P_DEACTIVATING && |
363 | atomic_read(&part->nchannels_active) == 0))); | 411 | atomic_read(&part->nchannels_active) == 0 && |
412 | xpc_partition_disengaged(part)))); | ||
364 | atomic_set(&part->channel_mgr_requests, 1); | 413 | atomic_set(&part->channel_mgr_requests, 1); |
365 | 414 | ||
366 | // >>> Does it need to wakeup periodically as well? In case we | 415 | // >>> Does it need to wakeup periodically as well? In case we |
@@ -482,7 +531,7 @@ xpc_activating(void *__partid) | |||
482 | return 0; | 531 | return 0; |
483 | } | 532 | } |
484 | 533 | ||
485 | XPC_ALLOW_HB(partid, xpc_vars); | 534 | xpc_allow_hb(partid, xpc_vars); |
486 | xpc_IPI_send_activated(part); | 535 | xpc_IPI_send_activated(part); |
487 | 536 | ||
488 | 537 | ||
@@ -492,6 +541,7 @@ xpc_activating(void *__partid) | |||
492 | */ | 541 | */ |
493 | (void) xpc_partition_up(part); | 542 | (void) xpc_partition_up(part); |
494 | 543 | ||
544 | xpc_disallow_hb(partid, xpc_vars); | ||
495 | xpc_mark_partition_inactive(part); | 545 | xpc_mark_partition_inactive(part); |
496 | 546 | ||
497 | if (part->reason == xpcReactivating) { | 547 | if (part->reason == xpcReactivating) { |
@@ -670,6 +720,7 @@ xpc_daemonize_kthread(void *args) | |||
670 | struct xpc_partition *part = &xpc_partitions[partid]; | 720 | struct xpc_partition *part = &xpc_partitions[partid]; |
671 | struct xpc_channel *ch; | 721 | struct xpc_channel *ch; |
672 | int n_needed; | 722 | int n_needed; |
723 | unsigned long irq_flags; | ||
673 | 724 | ||
674 | 725 | ||
675 | daemonize("xpc%02dc%d", partid, ch_number); | 726 | daemonize("xpc%02dc%d", partid, ch_number); |
@@ -680,11 +731,14 @@ xpc_daemonize_kthread(void *args) | |||
680 | ch = &part->channels[ch_number]; | 731 | ch = &part->channels[ch_number]; |
681 | 732 | ||
682 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | 733 | if (!(ch->flags & XPC_C_DISCONNECTING)) { |
683 | DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); | ||
684 | 734 | ||
685 | /* let registerer know that connection has been established */ | 735 | /* let registerer know that connection has been established */ |
686 | 736 | ||
687 | if (atomic_read(&ch->kthreads_assigned) == 1) { | 737 | spin_lock_irqsave(&ch->lock, irq_flags); |
738 | if (!(ch->flags & XPC_C_CONNECTCALLOUT)) { | ||
739 | ch->flags |= XPC_C_CONNECTCALLOUT; | ||
740 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
741 | |||
688 | xpc_connected_callout(ch); | 742 | xpc_connected_callout(ch); |
689 | 743 | ||
690 | /* | 744 | /* |
@@ -699,16 +753,28 @@ xpc_daemonize_kthread(void *args) | |||
699 | !(ch->flags & XPC_C_DISCONNECTING)) { | 753 | !(ch->flags & XPC_C_DISCONNECTING)) { |
700 | xpc_activate_kthreads(ch, n_needed); | 754 | xpc_activate_kthreads(ch, n_needed); |
701 | } | 755 | } |
756 | } else { | ||
757 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
702 | } | 758 | } |
703 | 759 | ||
704 | xpc_kthread_waitmsgs(part, ch); | 760 | xpc_kthread_waitmsgs(part, ch); |
705 | } | 761 | } |
706 | 762 | ||
707 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 763 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { |
708 | ((ch->flags & XPC_C_CONNECTCALLOUT) || | 764 | spin_lock_irqsave(&ch->lock, irq_flags); |
709 | (ch->reason != xpcUnregistering && | 765 | if ((ch->flags & XPC_C_CONNECTCALLOUT) && |
710 | ch->reason != xpcOtherUnregistering))) { | 766 | !(ch->flags & XPC_C_DISCONNECTCALLOUT)) { |
711 | xpc_disconnected_callout(ch); | 767 | ch->flags |= XPC_C_DISCONNECTCALLOUT; |
768 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
769 | |||
770 | xpc_disconnecting_callout(ch); | ||
771 | } else { | ||
772 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
773 | } | ||
774 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { | ||
775 | xpc_mark_partition_disengaged(part); | ||
776 | xpc_IPI_send_disengage(part); | ||
777 | } | ||
712 | } | 778 | } |
713 | 779 | ||
714 | 780 | ||
@@ -740,12 +806,33 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
740 | unsigned long irq_flags; | 806 | unsigned long irq_flags; |
741 | pid_t pid; | 807 | pid_t pid; |
742 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); | 808 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
809 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
743 | 810 | ||
744 | 811 | ||
745 | while (needed-- > 0) { | 812 | while (needed-- > 0) { |
813 | |||
814 | /* | ||
815 | * The following is done on behalf of the newly created | ||
816 | * kthread. That kthread is responsible for doing the | ||
817 | * counterpart to the following before it exits. | ||
818 | */ | ||
819 | (void) xpc_part_ref(part); | ||
820 | xpc_msgqueue_ref(ch); | ||
821 | if (atomic_inc_return(&ch->kthreads_assigned) == 1 && | ||
822 | atomic_inc_return(&part->nchannels_engaged) == 1) { | ||
823 | xpc_mark_partition_engaged(part); | ||
824 | } | ||
825 | |||
746 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); | 826 | pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); |
747 | if (pid < 0) { | 827 | if (pid < 0) { |
748 | /* the fork failed */ | 828 | /* the fork failed */ |
829 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | ||
830 | atomic_dec_return(&part->nchannels_engaged) == 0) { | ||
831 | xpc_mark_partition_disengaged(part); | ||
832 | xpc_IPI_send_disengage(part); | ||
833 | } | ||
834 | xpc_msgqueue_deref(ch); | ||
835 | xpc_part_deref(part); | ||
749 | 836 | ||
750 | if (atomic_read(&ch->kthreads_assigned) < | 837 | if (atomic_read(&ch->kthreads_assigned) < |
751 | ch->kthreads_idle_limit) { | 838 | ch->kthreads_idle_limit) { |
@@ -765,14 +852,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
765 | break; | 852 | break; |
766 | } | 853 | } |
767 | 854 | ||
768 | /* | ||
769 | * The following is done on behalf of the newly created | ||
770 | * kthread. That kthread is responsible for doing the | ||
771 | * counterpart to the following before it exits. | ||
772 | */ | ||
773 | (void) xpc_part_ref(&xpc_partitions[ch->partid]); | ||
774 | xpc_msgqueue_ref(ch); | ||
775 | atomic_inc(&ch->kthreads_assigned); | ||
776 | ch->kthreads_created++; // >>> temporary debug only!!! | 855 | ch->kthreads_created++; // >>> temporary debug only!!! |
777 | } | 856 | } |
778 | } | 857 | } |
@@ -781,87 +860,142 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
781 | void | 860 | void |
782 | xpc_disconnect_wait(int ch_number) | 861 | xpc_disconnect_wait(int ch_number) |
783 | { | 862 | { |
863 | unsigned long irq_flags; | ||
784 | partid_t partid; | 864 | partid_t partid; |
785 | struct xpc_partition *part; | 865 | struct xpc_partition *part; |
786 | struct xpc_channel *ch; | 866 | struct xpc_channel *ch; |
867 | int wakeup_channel_mgr; | ||
787 | 868 | ||
788 | 869 | ||
789 | /* now wait for all callouts to the caller's function to cease */ | 870 | /* now wait for all callouts to the caller's function to cease */ |
790 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 871 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
791 | part = &xpc_partitions[partid]; | 872 | part = &xpc_partitions[partid]; |
792 | 873 | ||
793 | if (xpc_part_ref(part)) { | 874 | if (!xpc_part_ref(part)) { |
794 | ch = &part->channels[ch_number]; | 875 | continue; |
876 | } | ||
795 | 877 | ||
796 | // >>> how do we keep from falling into the window between our check and going | 878 | ch = &part->channels[ch_number]; |
797 | // >>> down and coming back up where sema is re-inited? | ||
798 | if (ch->flags & XPC_C_SETUP) { | ||
799 | (void) down(&ch->teardown_sema); | ||
800 | } | ||
801 | 879 | ||
880 | if (!(ch->flags & XPC_C_WDISCONNECT)) { | ||
802 | xpc_part_deref(part); | 881 | xpc_part_deref(part); |
882 | continue; | ||
883 | } | ||
884 | |||
885 | (void) down(&ch->wdisconnect_sema); | ||
886 | |||
887 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
888 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); | ||
889 | wakeup_channel_mgr = 0; | ||
890 | |||
891 | if (ch->delayed_IPI_flags) { | ||
892 | if (part->act_state != XPC_P_DEACTIVATING) { | ||
893 | spin_lock(&part->IPI_lock); | ||
894 | XPC_SET_IPI_FLAGS(part->local_IPI_amo, | ||
895 | ch->number, ch->delayed_IPI_flags); | ||
896 | spin_unlock(&part->IPI_lock); | ||
897 | wakeup_channel_mgr = 1; | ||
898 | } | ||
899 | ch->delayed_IPI_flags = 0; | ||
803 | } | 900 | } |
901 | |||
902 | ch->flags &= ~XPC_C_WDISCONNECT; | ||
903 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
904 | |||
905 | if (wakeup_channel_mgr) { | ||
906 | xpc_wakeup_channel_mgr(part); | ||
907 | } | ||
908 | |||
909 | xpc_part_deref(part); | ||
804 | } | 910 | } |
805 | } | 911 | } |
806 | 912 | ||
807 | 913 | ||
808 | static void | 914 | static void |
809 | xpc_do_exit(void) | 915 | xpc_do_exit(enum xpc_retval reason) |
810 | { | 916 | { |
811 | partid_t partid; | 917 | partid_t partid; |
812 | int active_part_count; | 918 | int active_part_count; |
813 | struct xpc_partition *part; | 919 | struct xpc_partition *part; |
920 | unsigned long printmsg_time; | ||
814 | 921 | ||
815 | 922 | ||
816 | /* now it's time to eliminate our heartbeat */ | 923 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ |
817 | del_timer_sync(&xpc_hb_timer); | 924 | DBUG_ON(xpc_exiting == 1); |
818 | xpc_vars->heartbeating_to_mask = 0; | ||
819 | |||
820 | /* indicate to others that our reserved page is uninitialized */ | ||
821 | xpc_rsvd_page->vars_pa = 0; | ||
822 | |||
823 | /* | ||
824 | * Ignore all incoming interrupts. Without interupts the heartbeat | ||
825 | * checker won't activate any new partitions that may come up. | ||
826 | */ | ||
827 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
828 | 925 | ||
829 | /* | 926 | /* |
830 | * Cause the heartbeat checker and the discovery threads to exit. | 927 | * Let the heartbeat checker thread and the discovery thread |
831 | * We don't want them attempting to activate new partitions as we | 928 | * (if one is running) know that they should exit. Also wake up |
832 | * try to deactivate the existing ones. | 929 | * the heartbeat checker thread in case it's sleeping. |
833 | */ | 930 | */ |
834 | xpc_exiting = 1; | 931 | xpc_exiting = 1; |
835 | wake_up_interruptible(&xpc_act_IRQ_wq); | 932 | wake_up_interruptible(&xpc_act_IRQ_wq); |
836 | 933 | ||
837 | /* wait for the heartbeat checker thread to mark itself inactive */ | 934 | /* ignore all incoming interrupts */ |
838 | down(&xpc_hb_checker_exited); | 935 | free_irq(SGI_XPC_ACTIVATE, NULL); |
839 | 936 | ||
840 | /* wait for the discovery thread to mark itself inactive */ | 937 | /* wait for the discovery thread to exit */ |
841 | down(&xpc_discovery_exited); | 938 | down(&xpc_discovery_exited); |
842 | 939 | ||
940 | /* wait for the heartbeat checker thread to exit */ | ||
941 | down(&xpc_hb_checker_exited); | ||
843 | 942 | ||
844 | msleep_interruptible(300); | 943 | |
944 | /* sleep for a 1/3 of a second or so */ | ||
945 | (void) msleep_interruptible(300); | ||
845 | 946 | ||
846 | 947 | ||
847 | /* wait for all partitions to become inactive */ | 948 | /* wait for all partitions to become inactive */ |
848 | 949 | ||
950 | printmsg_time = jiffies; | ||
951 | |||
849 | do { | 952 | do { |
850 | active_part_count = 0; | 953 | active_part_count = 0; |
851 | 954 | ||
852 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 955 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
853 | part = &xpc_partitions[partid]; | 956 | part = &xpc_partitions[partid]; |
854 | if (part->act_state != XPC_P_INACTIVE) { | ||
855 | active_part_count++; | ||
856 | 957 | ||
857 | XPC_DEACTIVATE_PARTITION(part, xpcUnloading); | 958 | if (xpc_partition_disengaged(part) && |
959 | part->act_state == XPC_P_INACTIVE) { | ||
960 | continue; | ||
858 | } | 961 | } |
962 | |||
963 | active_part_count++; | ||
964 | |||
965 | XPC_DEACTIVATE_PARTITION(part, reason); | ||
859 | } | 966 | } |
860 | 967 | ||
861 | if (active_part_count) | 968 | if (active_part_count == 0) { |
862 | msleep_interruptible(300); | 969 | break; |
863 | } while (active_part_count > 0); | 970 | } |
864 | 971 | ||
972 | if (jiffies >= printmsg_time) { | ||
973 | dev_info(xpc_part, "waiting for partitions to " | ||
974 | "deactivate/disengage, active count=%d, remote " | ||
975 | "engaged=0x%lx\n", active_part_count, | ||
976 | xpc_partition_engaged(1UL << partid)); | ||
977 | |||
978 | printmsg_time = jiffies + | ||
979 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); | ||
980 | } | ||
981 | |||
982 | /* sleep for a 1/3 of a second or so */ | ||
983 | (void) msleep_interruptible(300); | ||
984 | |||
985 | } while (1); | ||
986 | |||
987 | DBUG_ON(xpc_partition_engaged(-1UL)); | ||
988 | |||
989 | |||
990 | /* indicate to others that our reserved page is uninitialized */ | ||
991 | xpc_rsvd_page->vars_pa = 0; | ||
992 | |||
993 | /* now it's time to eliminate our heartbeat */ | ||
994 | del_timer_sync(&xpc_hb_timer); | ||
995 | DBUG_ON(xpc_vars->heartbeating_to_mask != 0); | ||
996 | |||
997 | /* take ourselves off of the reboot_notifier_list */ | ||
998 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); | ||
865 | 999 | ||
866 | /* close down protections for IPI operations */ | 1000 | /* close down protections for IPI operations */ |
867 | xpc_restrict_IPI_ops(); | 1001 | xpc_restrict_IPI_ops(); |
@@ -876,6 +1010,34 @@ xpc_do_exit(void) | |||
876 | } | 1010 | } |
877 | 1011 | ||
878 | 1012 | ||
1013 | /* | ||
1014 | * This function is called when the system is being rebooted. | ||
1015 | */ | ||
1016 | static int | ||
1017 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | ||
1018 | { | ||
1019 | enum xpc_retval reason; | ||
1020 | |||
1021 | |||
1022 | switch (event) { | ||
1023 | case SYS_RESTART: | ||
1024 | reason = xpcSystemReboot; | ||
1025 | break; | ||
1026 | case SYS_HALT: | ||
1027 | reason = xpcSystemHalt; | ||
1028 | break; | ||
1029 | case SYS_POWER_OFF: | ||
1030 | reason = xpcSystemPoweroff; | ||
1031 | break; | ||
1032 | default: | ||
1033 | reason = xpcSystemGoingDown; | ||
1034 | } | ||
1035 | |||
1036 | xpc_do_exit(reason); | ||
1037 | return NOTIFY_DONE; | ||
1038 | } | ||
1039 | |||
1040 | |||
879 | int __init | 1041 | int __init |
880 | xpc_init(void) | 1042 | xpc_init(void) |
881 | { | 1043 | { |
@@ -891,11 +1053,11 @@ xpc_init(void) | |||
891 | 1053 | ||
892 | /* | 1054 | /* |
893 | * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng | 1055 | * xpc_remote_copy_buffer is used as a temporary buffer for bte_copy'ng |
894 | * both a partition's reserved page and its XPC variables. Its size was | 1056 | * various portions of a partition's reserved page. Its size is based |
895 | * based on the size of a reserved page. So we need to ensure that the | 1057 | * on the size of the reserved page header and part_nasids mask. So we |
896 | * XPC variables will fit as well. | 1058 | * need to ensure that the other items will fit as well. |
897 | */ | 1059 | */ |
898 | if (XPC_VARS_ALIGNED_SIZE > XPC_RSVD_PAGE_ALIGNED_SIZE) { | 1060 | if (XPC_RP_VARS_SIZE > XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES) { |
899 | dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); | 1061 | dev_err(xpc_part, "xpc_remote_copy_buffer is not big enough\n"); |
900 | return -EPERM; | 1062 | return -EPERM; |
901 | } | 1063 | } |
@@ -924,6 +1086,12 @@ xpc_init(void) | |||
924 | spin_lock_init(&part->act_lock); | 1086 | spin_lock_init(&part->act_lock); |
925 | part->act_state = XPC_P_INACTIVE; | 1087 | part->act_state = XPC_P_INACTIVE; |
926 | XPC_SET_REASON(part, 0, 0); | 1088 | XPC_SET_REASON(part, 0, 0); |
1089 | |||
1090 | init_timer(&part->disengage_request_timer); | ||
1091 | part->disengage_request_timer.function = | ||
1092 | xpc_timeout_partition_disengage_request; | ||
1093 | part->disengage_request_timer.data = (unsigned long) part; | ||
1094 | |||
927 | part->setup_state = XPC_P_UNSET; | 1095 | part->setup_state = XPC_P_UNSET; |
928 | init_waitqueue_head(&part->teardown_wq); | 1096 | init_waitqueue_head(&part->teardown_wq); |
929 | atomic_set(&part->references, 0); | 1097 | atomic_set(&part->references, 0); |
@@ -980,6 +1148,13 @@ xpc_init(void) | |||
980 | } | 1148 | } |
981 | 1149 | ||
982 | 1150 | ||
1151 | /* add ourselves to the reboot_notifier_list */ | ||
1152 | ret = register_reboot_notifier(&xpc_reboot_notifier); | ||
1153 | if (ret != 0) { | ||
1154 | dev_warn(xpc_part, "can't register reboot notifier\n"); | ||
1155 | } | ||
1156 | |||
1157 | |||
983 | /* | 1158 | /* |
984 | * Set the beating to other partitions into motion. This is | 1159 | * Set the beating to other partitions into motion. This is |
985 | * the last requirement for other partitions' discovery to | 1160 | * the last requirement for other partitions' discovery to |
@@ -1001,6 +1176,9 @@ xpc_init(void) | |||
1001 | /* indicate to others that our reserved page is uninitialized */ | 1176 | /* indicate to others that our reserved page is uninitialized */ |
1002 | xpc_rsvd_page->vars_pa = 0; | 1177 | xpc_rsvd_page->vars_pa = 0; |
1003 | 1178 | ||
1179 | /* take ourselves off of the reboot_notifier_list */ | ||
1180 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); | ||
1181 | |||
1004 | del_timer_sync(&xpc_hb_timer); | 1182 | del_timer_sync(&xpc_hb_timer); |
1005 | free_irq(SGI_XPC_ACTIVATE, NULL); | 1183 | free_irq(SGI_XPC_ACTIVATE, NULL); |
1006 | xpc_restrict_IPI_ops(); | 1184 | xpc_restrict_IPI_ops(); |
@@ -1024,7 +1202,7 @@ xpc_init(void) | |||
1024 | /* mark this new thread as a non-starter */ | 1202 | /* mark this new thread as a non-starter */ |
1025 | up(&xpc_discovery_exited); | 1203 | up(&xpc_discovery_exited); |
1026 | 1204 | ||
1027 | xpc_do_exit(); | 1205 | xpc_do_exit(xpcUnloading); |
1028 | return -EBUSY; | 1206 | return -EBUSY; |
1029 | } | 1207 | } |
1030 | 1208 | ||
@@ -1043,7 +1221,7 @@ module_init(xpc_init); | |||
1043 | void __exit | 1221 | void __exit |
1044 | xpc_exit(void) | 1222 | xpc_exit(void) |
1045 | { | 1223 | { |
1046 | xpc_do_exit(); | 1224 | xpc_do_exit(xpcUnloading); |
1047 | } | 1225 | } |
1048 | module_exit(xpc_exit); | 1226 | module_exit(xpc_exit); |
1049 | 1227 | ||
@@ -1060,3 +1238,7 @@ module_param(xpc_hb_check_interval, int, 0); | |||
1060 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " | 1238 | MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " |
1061 | "heartbeat checks."); | 1239 | "heartbeat checks."); |
1062 | 1240 | ||
1241 | module_param(xpc_disengage_request_timelimit, int, 0); | ||
1242 | MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " | ||
1243 | "for disengage request to complete."); | ||
1244 | |||