diff options
Diffstat (limited to 'arch/ia64/sn/kernel/xpc_main.c')
-rw-r--r-- | arch/ia64/sn/kernel/xpc_main.c | 242 |
1 files changed, 179 insertions, 63 deletions
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c index bb1d5cf30440..feece200b3c3 100644 --- a/arch/ia64/sn/kernel/xpc_main.c +++ b/arch/ia64/sn/kernel/xpc_main.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <linux/interrupt.h> | 54 | #include <linux/interrupt.h> |
55 | #include <linux/slab.h> | 55 | #include <linux/slab.h> |
56 | #include <linux/delay.h> | 56 | #include <linux/delay.h> |
57 | #include <linux/reboot.h> | ||
57 | #include <asm/sn/intr.h> | 58 | #include <asm/sn/intr.h> |
58 | #include <asm/sn/sn_sal.h> | 59 | #include <asm/sn/sn_sal.h> |
59 | #include <asm/uaccess.h> | 60 | #include <asm/uaccess.h> |
@@ -82,11 +83,13 @@ struct device *xpc_chan = &xpc_chan_dbg_subname; | |||
82 | 83 | ||
83 | /* systune related variables for /proc/sys directories */ | 84 | /* systune related variables for /proc/sys directories */ |
84 | 85 | ||
85 | static int xpc_hb_min = 1; | 86 | static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL; |
86 | static int xpc_hb_max = 10; | 87 | static int xpc_hb_min_interval = 1; |
88 | static int xpc_hb_max_interval = 10; | ||
87 | 89 | ||
88 | static int xpc_hb_check_min = 10; | 90 | static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL; |
89 | static int xpc_hb_check_max = 120; | 91 | static int xpc_hb_check_min_interval = 10; |
92 | static int xpc_hb_check_max_interval = 120; | ||
90 | 93 | ||
91 | static ctl_table xpc_sys_xpc_hb_dir[] = { | 94 | static ctl_table xpc_sys_xpc_hb_dir[] = { |
92 | { | 95 | { |
@@ -99,7 +102,8 @@ static ctl_table xpc_sys_xpc_hb_dir[] = { | |||
99 | &proc_dointvec_minmax, | 102 | &proc_dointvec_minmax, |
100 | &sysctl_intvec, | 103 | &sysctl_intvec, |
101 | NULL, | 104 | NULL, |
102 | &xpc_hb_min, &xpc_hb_max | 105 | &xpc_hb_min_interval, |
106 | &xpc_hb_max_interval | ||
103 | }, | 107 | }, |
104 | { | 108 | { |
105 | 2, | 109 | 2, |
@@ -111,7 +115,8 @@ static ctl_table xpc_sys_xpc_hb_dir[] = { | |||
111 | &proc_dointvec_minmax, | 115 | &proc_dointvec_minmax, |
112 | &sysctl_intvec, | 116 | &sysctl_intvec, |
113 | NULL, | 117 | NULL, |
114 | &xpc_hb_check_min, &xpc_hb_check_max | 118 | &xpc_hb_check_min_interval, |
119 | &xpc_hb_check_max_interval | ||
115 | }, | 120 | }, |
116 | {0} | 121 | {0} |
117 | }; | 122 | }; |
@@ -148,11 +153,11 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq); | |||
148 | 153 | ||
149 | static unsigned long xpc_hb_check_timeout; | 154 | static unsigned long xpc_hb_check_timeout; |
150 | 155 | ||
151 | /* xpc_hb_checker thread exited notification */ | 156 | /* used as an indication of when the xpc_hb_checker thread is inactive */ |
152 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited); | 157 | static DECLARE_MUTEX_LOCKED(xpc_hb_checker_inactive); |
153 | 158 | ||
154 | /* xpc_discovery thread exited notification */ | 159 | /* used as an indication of when the xpc_discovery thread is inactive */ |
155 | static DECLARE_MUTEX_LOCKED(xpc_discovery_exited); | 160 | static DECLARE_MUTEX_LOCKED(xpc_discovery_inactive); |
156 | 161 | ||
157 | 162 | ||
158 | static struct timer_list xpc_hb_timer; | 163 | static struct timer_list xpc_hb_timer; |
@@ -161,6 +166,30 @@ static struct timer_list xpc_hb_timer; | |||
161 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); | 166 | static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *); |
162 | 167 | ||
163 | 168 | ||
169 | static int xpc_system_reboot(struct notifier_block *, unsigned long, void *); | ||
170 | static struct notifier_block xpc_reboot_notifier = { | ||
171 | .notifier_call = xpc_system_reboot, | ||
172 | }; | ||
173 | |||
174 | |||
175 | /* | ||
176 | * Timer function to enforce the timelimit on the partition disengage request. | ||
177 | */ | ||
178 | static void | ||
179 | xpc_timeout_partition_disengage_request(unsigned long data) | ||
180 | { | ||
181 | struct xpc_partition *part = (struct xpc_partition *) data; | ||
182 | |||
183 | |||
184 | DBUG_ON(XPC_TICKS < part->disengage_request_timeout); | ||
185 | |||
186 | (void) xpc_partition_disengaged(part); | ||
187 | |||
188 | DBUG_ON(part->disengage_request_timeout != 0); | ||
189 | DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); | ||
190 | } | ||
191 | |||
192 | |||
164 | /* | 193 | /* |
165 | * Notify the heartbeat check thread that an IRQ has been received. | 194 | * Notify the heartbeat check thread that an IRQ has been received. |
166 | */ | 195 | */ |
@@ -214,12 +243,6 @@ xpc_hb_checker(void *ignore) | |||
214 | 243 | ||
215 | while (!(volatile int) xpc_exiting) { | 244 | while (!(volatile int) xpc_exiting) { |
216 | 245 | ||
217 | /* wait for IRQ or timeout */ | ||
218 | (void) wait_event_interruptible(xpc_act_IRQ_wq, | ||
219 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || | ||
220 | jiffies >= xpc_hb_check_timeout || | ||
221 | (volatile int) xpc_exiting)); | ||
222 | |||
223 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " | 246 | dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " |
224 | "been received\n", | 247 | "been received\n", |
225 | (int) (xpc_hb_check_timeout - jiffies), | 248 | (int) (xpc_hb_check_timeout - jiffies), |
@@ -240,6 +263,7 @@ xpc_hb_checker(void *ignore) | |||
240 | } | 263 | } |
241 | 264 | ||
242 | 265 | ||
266 | /* check for outstanding IRQs */ | ||
243 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); | 267 | new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd); |
244 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { | 268 | if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { |
245 | force_IRQ = 0; | 269 | force_IRQ = 0; |
@@ -257,13 +281,19 @@ xpc_hb_checker(void *ignore) | |||
257 | xpc_hb_check_timeout = jiffies + | 281 | xpc_hb_check_timeout = jiffies + |
258 | (xpc_hb_check_interval * HZ); | 282 | (xpc_hb_check_interval * HZ); |
259 | } | 283 | } |
284 | |||
285 | /* wait for IRQ or timeout */ | ||
286 | (void) wait_event_interruptible(xpc_act_IRQ_wq, | ||
287 | (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) || | ||
288 | jiffies >= xpc_hb_check_timeout || | ||
289 | (volatile int) xpc_exiting)); | ||
260 | } | 290 | } |
261 | 291 | ||
262 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); | 292 | dev_dbg(xpc_part, "heartbeat checker is exiting\n"); |
263 | 293 | ||
264 | 294 | ||
265 | /* mark this thread as inactive */ | 295 | /* mark this thread as inactive */ |
266 | up(&xpc_hb_checker_exited); | 296 | up(&xpc_hb_checker_inactive); |
267 | return 0; | 297 | return 0; |
268 | } | 298 | } |
269 | 299 | ||
@@ -283,7 +313,7 @@ xpc_initiate_discovery(void *ignore) | |||
283 | dev_dbg(xpc_part, "discovery thread is exiting\n"); | 313 | dev_dbg(xpc_part, "discovery thread is exiting\n"); |
284 | 314 | ||
285 | /* mark this thread as inactive */ | 315 | /* mark this thread as inactive */ |
286 | up(&xpc_discovery_exited); | 316 | up(&xpc_discovery_inactive); |
287 | return 0; | 317 | return 0; |
288 | } | 318 | } |
289 | 319 | ||
@@ -309,7 +339,7 @@ xpc_make_first_contact(struct xpc_partition *part) | |||
309 | "partition %d\n", XPC_PARTID(part)); | 339 | "partition %d\n", XPC_PARTID(part)); |
310 | 340 | ||
311 | /* wait a 1/4 of a second or so */ | 341 | /* wait a 1/4 of a second or so */ |
312 | msleep_interruptible(250); | 342 | (void) msleep_interruptible(250); |
313 | 343 | ||
314 | if (part->act_state == XPC_P_DEACTIVATING) { | 344 | if (part->act_state == XPC_P_DEACTIVATING) { |
315 | return part->reason; | 345 | return part->reason; |
@@ -336,7 +366,8 @@ static void | |||
336 | xpc_channel_mgr(struct xpc_partition *part) | 366 | xpc_channel_mgr(struct xpc_partition *part) |
337 | { | 367 | { |
338 | while (part->act_state != XPC_P_DEACTIVATING || | 368 | while (part->act_state != XPC_P_DEACTIVATING || |
339 | atomic_read(&part->nchannels_active) > 0) { | 369 | atomic_read(&part->nchannels_active) > 0 || |
370 | !xpc_partition_disengaged(part)) { | ||
340 | 371 | ||
341 | xpc_process_channel_activity(part); | 372 | xpc_process_channel_activity(part); |
342 | 373 | ||
@@ -360,7 +391,8 @@ xpc_channel_mgr(struct xpc_partition *part) | |||
360 | (volatile u64) part->local_IPI_amo != 0 || | 391 | (volatile u64) part->local_IPI_amo != 0 || |
361 | ((volatile u8) part->act_state == | 392 | ((volatile u8) part->act_state == |
362 | XPC_P_DEACTIVATING && | 393 | XPC_P_DEACTIVATING && |
363 | atomic_read(&part->nchannels_active) == 0))); | 394 | atomic_read(&part->nchannels_active) == 0 && |
395 | xpc_partition_disengaged(part)))); | ||
364 | atomic_set(&part->channel_mgr_requests, 1); | 396 | atomic_set(&part->channel_mgr_requests, 1); |
365 | 397 | ||
366 | // >>> Does it need to wakeup periodically as well? In case we | 398 | // >>> Does it need to wakeup periodically as well? In case we |
@@ -482,7 +514,7 @@ xpc_activating(void *__partid) | |||
482 | return 0; | 514 | return 0; |
483 | } | 515 | } |
484 | 516 | ||
485 | XPC_ALLOW_HB(partid, xpc_vars); | 517 | xpc_allow_hb(partid, xpc_vars); |
486 | xpc_IPI_send_activated(part); | 518 | xpc_IPI_send_activated(part); |
487 | 519 | ||
488 | 520 | ||
@@ -492,6 +524,7 @@ xpc_activating(void *__partid) | |||
492 | */ | 524 | */ |
493 | (void) xpc_partition_up(part); | 525 | (void) xpc_partition_up(part); |
494 | 526 | ||
527 | xpc_disallow_hb(partid, xpc_vars); | ||
495 | xpc_mark_partition_inactive(part); | 528 | xpc_mark_partition_inactive(part); |
496 | 529 | ||
497 | if (part->reason == xpcReactivating) { | 530 | if (part->reason == xpcReactivating) { |
@@ -704,11 +737,14 @@ xpc_daemonize_kthread(void *args) | |||
704 | xpc_kthread_waitmsgs(part, ch); | 737 | xpc_kthread_waitmsgs(part, ch); |
705 | } | 738 | } |
706 | 739 | ||
707 | if (atomic_dec_return(&ch->kthreads_assigned) == 0 && | 740 | if (atomic_dec_return(&ch->kthreads_assigned) == 0) { |
708 | ((ch->flags & XPC_C_CONNECTCALLOUT) || | 741 | if (ch->flags & XPC_C_CONNECTCALLOUT) { |
709 | (ch->reason != xpcUnregistering && | 742 | xpc_disconnecting_callout(ch); |
710 | ch->reason != xpcOtherUnregistering))) { | 743 | } |
711 | xpc_disconnected_callout(ch); | 744 | if (atomic_dec_return(&part->nchannels_engaged) == 0) { |
745 | xpc_mark_partition_disengaged(part); | ||
746 | xpc_IPI_send_disengage(part); | ||
747 | } | ||
712 | } | 748 | } |
713 | 749 | ||
714 | 750 | ||
@@ -740,6 +776,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
740 | unsigned long irq_flags; | 776 | unsigned long irq_flags; |
741 | pid_t pid; | 777 | pid_t pid; |
742 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); | 778 | u64 args = XPC_PACK_ARGS(ch->partid, ch->number); |
779 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | ||
743 | 780 | ||
744 | 781 | ||
745 | while (needed-- > 0) { | 782 | while (needed-- > 0) { |
@@ -770,9 +807,13 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
770 | * kthread. That kthread is responsible for doing the | 807 | * kthread. That kthread is responsible for doing the |
771 | * counterpart to the following before it exits. | 808 | * counterpart to the following before it exits. |
772 | */ | 809 | */ |
773 | (void) xpc_part_ref(&xpc_partitions[ch->partid]); | 810 | (void) xpc_part_ref(part); |
774 | xpc_msgqueue_ref(ch); | 811 | xpc_msgqueue_ref(ch); |
775 | atomic_inc(&ch->kthreads_assigned); | 812 | if (atomic_inc_return(&ch->kthreads_assigned) == 1) { |
813 | if (atomic_inc_return(&part->nchannels_engaged) == 1) { | ||
814 | xpc_mark_partition_engaged(part); | ||
815 | } | ||
816 | } | ||
776 | ch->kthreads_created++; // >>> temporary debug only!!! | 817 | ch->kthreads_created++; // >>> temporary debug only!!! |
777 | } | 818 | } |
778 | } | 819 | } |
@@ -781,6 +822,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed) | |||
781 | void | 822 | void |
782 | xpc_disconnect_wait(int ch_number) | 823 | xpc_disconnect_wait(int ch_number) |
783 | { | 824 | { |
825 | unsigned long irq_flags; | ||
784 | partid_t partid; | 826 | partid_t partid; |
785 | struct xpc_partition *part; | 827 | struct xpc_partition *part; |
786 | struct xpc_channel *ch; | 828 | struct xpc_channel *ch; |
@@ -793,10 +835,13 @@ xpc_disconnect_wait(int ch_number) | |||
793 | if (xpc_part_ref(part)) { | 835 | if (xpc_part_ref(part)) { |
794 | ch = &part->channels[ch_number]; | 836 | ch = &part->channels[ch_number]; |
795 | 837 | ||
796 | // >>> how do we keep from falling into the window between our check and going | 838 | if (ch->flags & XPC_C_WDISCONNECT) { |
797 | // >>> down and coming back up where sema is re-inited? | 839 | if (!(ch->flags & XPC_C_DISCONNECTED)) { |
798 | if (ch->flags & XPC_C_SETUP) { | 840 | (void) down(&ch->wdisconnect_sema); |
799 | (void) down(&ch->teardown_sema); | 841 | } |
842 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
843 | ch->flags &= ~XPC_C_WDISCONNECT; | ||
844 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
800 | } | 845 | } |
801 | 846 | ||
802 | xpc_part_deref(part); | 847 | xpc_part_deref(part); |
@@ -806,62 +851,89 @@ xpc_disconnect_wait(int ch_number) | |||
806 | 851 | ||
807 | 852 | ||
808 | static void | 853 | static void |
809 | xpc_do_exit(void) | 854 | xpc_do_exit(enum xpc_retval reason) |
810 | { | 855 | { |
811 | partid_t partid; | 856 | partid_t partid; |
812 | int active_part_count; | 857 | int active_part_count; |
813 | struct xpc_partition *part; | 858 | struct xpc_partition *part; |
859 | unsigned long printmsg_time; | ||
814 | 860 | ||
815 | 861 | ||
816 | /* now it's time to eliminate our heartbeat */ | 862 | /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ |
817 | del_timer_sync(&xpc_hb_timer); | 863 | DBUG_ON(xpc_exiting == 1); |
818 | xpc_vars->heartbeating_to_mask = 0; | ||
819 | |||
820 | /* indicate to others that our reserved page is uninitialized */ | ||
821 | xpc_rsvd_page->vars_pa = 0; | ||
822 | 864 | ||
823 | /* | 865 | /* |
824 | * Ignore all incoming interrupts. Without interupts the heartbeat | 866 | * Let the heartbeat checker thread and the discovery thread |
825 | * checker won't activate any new partitions that may come up. | 867 | * (if one is running) know that they should exit. Also wake up |
826 | */ | 868 | * the heartbeat checker thread in case it's sleeping. |
827 | free_irq(SGI_XPC_ACTIVATE, NULL); | ||
828 | |||
829 | /* | ||
830 | * Cause the heartbeat checker and the discovery threads to exit. | ||
831 | * We don't want them attempting to activate new partitions as we | ||
832 | * try to deactivate the existing ones. | ||
833 | */ | 869 | */ |
834 | xpc_exiting = 1; | 870 | xpc_exiting = 1; |
835 | wake_up_interruptible(&xpc_act_IRQ_wq); | 871 | wake_up_interruptible(&xpc_act_IRQ_wq); |
836 | 872 | ||
837 | /* wait for the heartbeat checker thread to mark itself inactive */ | 873 | /* ignore all incoming interrupts */ |
838 | down(&xpc_hb_checker_exited); | 874 | free_irq(SGI_XPC_ACTIVATE, NULL); |
839 | 875 | ||
840 | /* wait for the discovery thread to mark itself inactive */ | 876 | /* wait for the discovery thread to mark itself inactive */ |
841 | down(&xpc_discovery_exited); | 877 | down(&xpc_discovery_inactive); |
878 | |||
879 | /* wait for the heartbeat checker thread to mark itself inactive */ | ||
880 | down(&xpc_hb_checker_inactive); | ||
842 | 881 | ||
843 | 882 | ||
844 | msleep_interruptible(300); | 883 | /* sleep for a 1/3 of a second or so */ |
884 | (void) msleep_interruptible(300); | ||
845 | 885 | ||
846 | 886 | ||
847 | /* wait for all partitions to become inactive */ | 887 | /* wait for all partitions to become inactive */ |
848 | 888 | ||
889 | printmsg_time = jiffies; | ||
890 | |||
849 | do { | 891 | do { |
850 | active_part_count = 0; | 892 | active_part_count = 0; |
851 | 893 | ||
852 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { | 894 | for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { |
853 | part = &xpc_partitions[partid]; | 895 | part = &xpc_partitions[partid]; |
854 | if (part->act_state != XPC_P_INACTIVE) { | 896 | if (xpc_partition_disengaged(part) && |
855 | active_part_count++; | 897 | part->act_state == XPC_P_INACTIVE) { |
856 | 898 | continue; | |
857 | XPC_DEACTIVATE_PARTITION(part, xpcUnloading); | ||
858 | } | 899 | } |
900 | |||
901 | active_part_count++; | ||
902 | |||
903 | XPC_DEACTIVATE_PARTITION(part, reason); | ||
904 | } | ||
905 | |||
906 | if (active_part_count == 0) { | ||
907 | break; | ||
908 | } | ||
909 | |||
910 | if (jiffies >= printmsg_time) { | ||
911 | dev_info(xpc_part, "waiting for partitions to " | ||
912 | "deactivate/disengage, active count=%d, remote " | ||
913 | "engaged=0x%lx\n", active_part_count, | ||
914 | xpc_partition_engaged(1UL << partid)); | ||
915 | |||
916 | printmsg_time = jiffies + | ||
917 | (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); | ||
859 | } | 918 | } |
860 | 919 | ||
861 | if (active_part_count) | 920 | /* sleep for a 1/3 of a second or so */ |
862 | msleep_interruptible(300); | 921 | (void) msleep_interruptible(300); |
863 | } while (active_part_count > 0); | 922 | |
923 | } while (1); | ||
924 | |||
925 | DBUG_ON(xpc_partition_engaged(-1UL)); | ||
926 | |||
927 | |||
928 | /* indicate to others that our reserved page is uninitialized */ | ||
929 | xpc_rsvd_page->vars_pa = 0; | ||
930 | |||
931 | /* now it's time to eliminate our heartbeat */ | ||
932 | del_timer_sync(&xpc_hb_timer); | ||
933 | DBUG_ON(xpc_vars->heartbeating_to_mask == 0); | ||
864 | 934 | ||
935 | /* take ourselves off of the reboot_notifier_list */ | ||
936 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); | ||
865 | 937 | ||
866 | /* close down protections for IPI operations */ | 938 | /* close down protections for IPI operations */ |
867 | xpc_restrict_IPI_ops(); | 939 | xpc_restrict_IPI_ops(); |
@@ -876,6 +948,34 @@ xpc_do_exit(void) | |||
876 | } | 948 | } |
877 | 949 | ||
878 | 950 | ||
951 | /* | ||
952 | * This function is called when the system is being rebooted. | ||
953 | */ | ||
954 | static int | ||
955 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | ||
956 | { | ||
957 | enum xpc_retval reason; | ||
958 | |||
959 | |||
960 | switch (event) { | ||
961 | case SYS_RESTART: | ||
962 | reason = xpcSystemReboot; | ||
963 | break; | ||
964 | case SYS_HALT: | ||
965 | reason = xpcSystemHalt; | ||
966 | break; | ||
967 | case SYS_POWER_OFF: | ||
968 | reason = xpcSystemPoweroff; | ||
969 | break; | ||
970 | default: | ||
971 | reason = xpcSystemGoingDown; | ||
972 | } | ||
973 | |||
974 | xpc_do_exit(reason); | ||
975 | return NOTIFY_DONE; | ||
976 | } | ||
977 | |||
978 | |||
879 | int __init | 979 | int __init |
880 | xpc_init(void) | 980 | xpc_init(void) |
881 | { | 981 | { |
@@ -920,6 +1020,12 @@ xpc_init(void) | |||
920 | spin_lock_init(&part->act_lock); | 1020 | spin_lock_init(&part->act_lock); |
921 | part->act_state = XPC_P_INACTIVE; | 1021 | part->act_state = XPC_P_INACTIVE; |
922 | XPC_SET_REASON(part, 0, 0); | 1022 | XPC_SET_REASON(part, 0, 0); |
1023 | |||
1024 | init_timer(&part->disengage_request_timer); | ||
1025 | part->disengage_request_timer.function = | ||
1026 | xpc_timeout_partition_disengage_request; | ||
1027 | part->disengage_request_timer.data = (unsigned long) part; | ||
1028 | |||
923 | part->setup_state = XPC_P_UNSET; | 1029 | part->setup_state = XPC_P_UNSET; |
924 | init_waitqueue_head(&part->teardown_wq); | 1030 | init_waitqueue_head(&part->teardown_wq); |
925 | atomic_set(&part->references, 0); | 1031 | atomic_set(&part->references, 0); |
@@ -976,6 +1082,13 @@ xpc_init(void) | |||
976 | } | 1082 | } |
977 | 1083 | ||
978 | 1084 | ||
1085 | /* add ourselves to the reboot_notifier_list */ | ||
1086 | ret = register_reboot_notifier(&xpc_reboot_notifier); | ||
1087 | if (ret != 0) { | ||
1088 | dev_warn(xpc_part, "can't register reboot notifier\n"); | ||
1089 | } | ||
1090 | |||
1091 | |||
979 | /* | 1092 | /* |
980 | * Set the beating to other partitions into motion. This is | 1093 | * Set the beating to other partitions into motion. This is |
981 | * the last requirement for other partitions' discovery to | 1094 | * the last requirement for other partitions' discovery to |
@@ -997,6 +1110,9 @@ xpc_init(void) | |||
997 | /* indicate to others that our reserved page is uninitialized */ | 1110 | /* indicate to others that our reserved page is uninitialized */ |
998 | xpc_rsvd_page->vars_pa = 0; | 1111 | xpc_rsvd_page->vars_pa = 0; |
999 | 1112 | ||
1113 | /* take ourselves off of the reboot_notifier_list */ | ||
1114 | (void) unregister_reboot_notifier(&xpc_reboot_notifier); | ||
1115 | |||
1000 | del_timer_sync(&xpc_hb_timer); | 1116 | del_timer_sync(&xpc_hb_timer); |
1001 | free_irq(SGI_XPC_ACTIVATE, NULL); | 1117 | free_irq(SGI_XPC_ACTIVATE, NULL); |
1002 | xpc_restrict_IPI_ops(); | 1118 | xpc_restrict_IPI_ops(); |
@@ -1018,9 +1134,9 @@ xpc_init(void) | |||
1018 | dev_err(xpc_part, "failed while forking discovery thread\n"); | 1134 | dev_err(xpc_part, "failed while forking discovery thread\n"); |
1019 | 1135 | ||
1020 | /* mark this new thread as a non-starter */ | 1136 | /* mark this new thread as a non-starter */ |
1021 | up(&xpc_discovery_exited); | 1137 | up(&xpc_discovery_inactive); |
1022 | 1138 | ||
1023 | xpc_do_exit(); | 1139 | xpc_do_exit(xpcUnloading); |
1024 | return -EBUSY; | 1140 | return -EBUSY; |
1025 | } | 1141 | } |
1026 | 1142 | ||
@@ -1039,7 +1155,7 @@ module_init(xpc_init); | |||
1039 | void __exit | 1155 | void __exit |
1040 | xpc_exit(void) | 1156 | xpc_exit(void) |
1041 | { | 1157 | { |
1042 | xpc_do_exit(); | 1158 | xpc_do_exit(xpcUnloading); |
1043 | } | 1159 | } |
1044 | module_exit(xpc_exit); | 1160 | module_exit(xpc_exit); |
1045 | 1161 | ||