aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel/xpc_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/kernel/xpc_main.c')
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c146
1 files changed, 106 insertions, 40 deletions
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index feece200b3c3..db349c6d4c58 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -91,6 +91,10 @@ static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
91static int xpc_hb_check_min_interval = 10; 91static int xpc_hb_check_min_interval = 10;
92static int xpc_hb_check_max_interval = 120; 92static int xpc_hb_check_max_interval = 120;
93 93
94int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
95static int xpc_disengage_request_min_timelimit = 0;
96static int xpc_disengage_request_max_timelimit = 120;
97
94static ctl_table xpc_sys_xpc_hb_dir[] = { 98static ctl_table xpc_sys_xpc_hb_dir[] = {
95 { 99 {
96 1, 100 1,
@@ -129,6 +133,19 @@ static ctl_table xpc_sys_xpc_dir[] = {
129 0555, 133 0555,
130 xpc_sys_xpc_hb_dir 134 xpc_sys_xpc_hb_dir
131 }, 135 },
136 {
137 2,
138 "disengage_request_timelimit",
139 &xpc_disengage_request_timelimit,
140 sizeof(int),
141 0644,
142 NULL,
143 &proc_dointvec_minmax,
144 &sysctl_intvec,
145 NULL,
146 &xpc_disengage_request_min_timelimit,
147 &xpc_disengage_request_max_timelimit
148 },
132 {0} 149 {0}
133}; 150};
134static ctl_table xpc_sys_dir[] = { 151static ctl_table xpc_sys_dir[] = {
@@ -153,11 +170,11 @@ static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
153 170
154static unsigned long xpc_hb_check_timeout; 171static unsigned long xpc_hb_check_timeout;
155 172
156/* used as an indication of when the xpc_hb_checker thread is inactive */ 173/* notification that the xpc_hb_checker thread has exited */
157static DECLARE_MUTEX_LOCKED(xpc_hb_checker_inactive); 174static DECLARE_MUTEX_LOCKED(xpc_hb_checker_exited);
158 175
159/* used as an indication of when the xpc_discovery thread is inactive */ 176/* notification that the xpc_discovery thread has exited */
160static DECLARE_MUTEX_LOCKED(xpc_discovery_inactive); 177static DECLARE_MUTEX_LOCKED(xpc_discovery_exited);
161 178
162 179
163static struct timer_list xpc_hb_timer; 180static struct timer_list xpc_hb_timer;
@@ -181,7 +198,7 @@ xpc_timeout_partition_disengage_request(unsigned long data)
181 struct xpc_partition *part = (struct xpc_partition *) data; 198 struct xpc_partition *part = (struct xpc_partition *) data;
182 199
183 200
184 DBUG_ON(XPC_TICKS < part->disengage_request_timeout); 201 DBUG_ON(jiffies < part->disengage_request_timeout);
185 202
186 (void) xpc_partition_disengaged(part); 203 (void) xpc_partition_disengaged(part);
187 204
@@ -292,8 +309,8 @@ xpc_hb_checker(void *ignore)
292 dev_dbg(xpc_part, "heartbeat checker is exiting\n"); 309 dev_dbg(xpc_part, "heartbeat checker is exiting\n");
293 310
294 311
295 /* mark this thread as inactive */ 312 /* mark this thread as having exited */
296 up(&xpc_hb_checker_inactive); 313 up(&xpc_hb_checker_exited);
297 return 0; 314 return 0;
298} 315}
299 316
@@ -312,8 +329,8 @@ xpc_initiate_discovery(void *ignore)
312 329
313 dev_dbg(xpc_part, "discovery thread is exiting\n"); 330 dev_dbg(xpc_part, "discovery thread is exiting\n");
314 331
315 /* mark this thread as inactive */ 332 /* mark this thread as having exited */
316 up(&xpc_discovery_inactive); 333 up(&xpc_discovery_exited);
317 return 0; 334 return 0;
318} 335}
319 336
@@ -703,6 +720,7 @@ xpc_daemonize_kthread(void *args)
703 struct xpc_partition *part = &xpc_partitions[partid]; 720 struct xpc_partition *part = &xpc_partitions[partid];
704 struct xpc_channel *ch; 721 struct xpc_channel *ch;
705 int n_needed; 722 int n_needed;
723 unsigned long irq_flags;
706 724
707 725
708 daemonize("xpc%02dc%d", partid, ch_number); 726 daemonize("xpc%02dc%d", partid, ch_number);
@@ -713,11 +731,14 @@ xpc_daemonize_kthread(void *args)
713 ch = &part->channels[ch_number]; 731 ch = &part->channels[ch_number];
714 732
715 if (!(ch->flags & XPC_C_DISCONNECTING)) { 733 if (!(ch->flags & XPC_C_DISCONNECTING)) {
716 DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
717 734
718 /* let registerer know that connection has been established */ 735 /* let registerer know that connection has been established */
719 736
720 if (atomic_read(&ch->kthreads_assigned) == 1) { 737 spin_lock_irqsave(&ch->lock, irq_flags);
738 if (!(ch->flags & XPC_C_CONNECTCALLOUT)) {
739 ch->flags |= XPC_C_CONNECTCALLOUT;
740 spin_unlock_irqrestore(&ch->lock, irq_flags);
741
721 xpc_connected_callout(ch); 742 xpc_connected_callout(ch);
722 743
723 /* 744 /*
@@ -732,14 +753,23 @@ xpc_daemonize_kthread(void *args)
732 !(ch->flags & XPC_C_DISCONNECTING)) { 753 !(ch->flags & XPC_C_DISCONNECTING)) {
733 xpc_activate_kthreads(ch, n_needed); 754 xpc_activate_kthreads(ch, n_needed);
734 } 755 }
756 } else {
757 spin_unlock_irqrestore(&ch->lock, irq_flags);
735 } 758 }
736 759
737 xpc_kthread_waitmsgs(part, ch); 760 xpc_kthread_waitmsgs(part, ch);
738 } 761 }
739 762
740 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 763 if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
741 if (ch->flags & XPC_C_CONNECTCALLOUT) { 764 spin_lock_irqsave(&ch->lock, irq_flags);
765 if ((ch->flags & XPC_C_CONNECTCALLOUT) &&
766 !(ch->flags & XPC_C_DISCONNECTCALLOUT)) {
767 ch->flags |= XPC_C_DISCONNECTCALLOUT;
768 spin_unlock_irqrestore(&ch->lock, irq_flags);
769
742 xpc_disconnecting_callout(ch); 770 xpc_disconnecting_callout(ch);
771 } else {
772 spin_unlock_irqrestore(&ch->lock, irq_flags);
743 } 773 }
744 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 774 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
745 xpc_mark_partition_disengaged(part); 775 xpc_mark_partition_disengaged(part);
@@ -780,9 +810,29 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
780 810
781 811
782 while (needed-- > 0) { 812 while (needed-- > 0) {
813
814 /*
815 * The following is done on behalf of the newly created
816 * kthread. That kthread is responsible for doing the
817 * counterpart to the following before it exits.
818 */
819 (void) xpc_part_ref(part);
820 xpc_msgqueue_ref(ch);
821 if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
822 atomic_inc_return(&part->nchannels_engaged) == 1) {
823 xpc_mark_partition_engaged(part);
824 }
825
783 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0); 826 pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
784 if (pid < 0) { 827 if (pid < 0) {
785 /* the fork failed */ 828 /* the fork failed */
829 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
830 atomic_dec_return(&part->nchannels_engaged) == 0) {
831 xpc_mark_partition_disengaged(part);
832 xpc_IPI_send_disengage(part);
833 }
834 xpc_msgqueue_deref(ch);
835 xpc_part_deref(part);
786 836
787 if (atomic_read(&ch->kthreads_assigned) < 837 if (atomic_read(&ch->kthreads_assigned) <
788 ch->kthreads_idle_limit) { 838 ch->kthreads_idle_limit) {
@@ -802,18 +852,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed)
802 break; 852 break;
803 } 853 }
804 854
805 /*
806 * The following is done on behalf of the newly created
807 * kthread. That kthread is responsible for doing the
808 * counterpart to the following before it exits.
809 */
810 (void) xpc_part_ref(part);
811 xpc_msgqueue_ref(ch);
812 if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
813 if (atomic_inc_return(&part->nchannels_engaged) == 1) {
814 xpc_mark_partition_engaged(part);
815 }
816 }
817 ch->kthreads_created++; // >>> temporary debug only!!! 855 ch->kthreads_created++; // >>> temporary debug only!!!
818 } 856 }
819} 857}
@@ -826,26 +864,49 @@ xpc_disconnect_wait(int ch_number)
826 partid_t partid; 864 partid_t partid;
827 struct xpc_partition *part; 865 struct xpc_partition *part;
828 struct xpc_channel *ch; 866 struct xpc_channel *ch;
867 int wakeup_channel_mgr;
829 868
830 869
831 /* now wait for all callouts to the caller's function to cease */ 870 /* now wait for all callouts to the caller's function to cease */
832 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 871 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
833 part = &xpc_partitions[partid]; 872 part = &xpc_partitions[partid];
834 873
835 if (xpc_part_ref(part)) { 874 if (!xpc_part_ref(part)) {
836 ch = &part->channels[ch_number]; 875 continue;
876 }
837 877
838 if (ch->flags & XPC_C_WDISCONNECT) { 878 ch = &part->channels[ch_number];
839 if (!(ch->flags & XPC_C_DISCONNECTED)) {
840 (void) down(&ch->wdisconnect_sema);
841 }
842 spin_lock_irqsave(&ch->lock, irq_flags);
843 ch->flags &= ~XPC_C_WDISCONNECT;
844 spin_unlock_irqrestore(&ch->lock, irq_flags);
845 }
846 879
880 if (!(ch->flags & XPC_C_WDISCONNECT)) {
847 xpc_part_deref(part); 881 xpc_part_deref(part);
882 continue;
848 } 883 }
884
885 (void) down(&ch->wdisconnect_sema);
886
887 spin_lock_irqsave(&ch->lock, irq_flags);
888 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
889 wakeup_channel_mgr = 0;
890
891 if (ch->delayed_IPI_flags) {
892 if (part->act_state != XPC_P_DEACTIVATING) {
893 spin_lock(&part->IPI_lock);
894 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
895 ch->number, ch->delayed_IPI_flags);
896 spin_unlock(&part->IPI_lock);
897 wakeup_channel_mgr = 1;
898 }
899 ch->delayed_IPI_flags = 0;
900 }
901
902 ch->flags &= ~XPC_C_WDISCONNECT;
903 spin_unlock_irqrestore(&ch->lock, irq_flags);
904
905 if (wakeup_channel_mgr) {
906 xpc_wakeup_channel_mgr(part);
907 }
908
909 xpc_part_deref(part);
849 } 910 }
850} 911}
851 912
@@ -873,11 +934,11 @@ xpc_do_exit(enum xpc_retval reason)
873 /* ignore all incoming interrupts */ 934 /* ignore all incoming interrupts */
874 free_irq(SGI_XPC_ACTIVATE, NULL); 935 free_irq(SGI_XPC_ACTIVATE, NULL);
875 936
876 /* wait for the discovery thread to mark itself inactive */ 937 /* wait for the discovery thread to exit */
877 down(&xpc_discovery_inactive); 938 down(&xpc_discovery_exited);
878 939
879 /* wait for the heartbeat checker thread to mark itself inactive */ 940 /* wait for the heartbeat checker thread to exit */
880 down(&xpc_hb_checker_inactive); 941 down(&xpc_hb_checker_exited);
881 942
882 943
883 /* sleep for a 1/3 of a second or so */ 944 /* sleep for a 1/3 of a second or so */
@@ -893,6 +954,7 @@ xpc_do_exit(enum xpc_retval reason)
893 954
894 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { 955 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
895 part = &xpc_partitions[partid]; 956 part = &xpc_partitions[partid];
957
896 if (xpc_partition_disengaged(part) && 958 if (xpc_partition_disengaged(part) &&
897 part->act_state == XPC_P_INACTIVE) { 959 part->act_state == XPC_P_INACTIVE) {
898 continue; 960 continue;
@@ -930,7 +992,7 @@ xpc_do_exit(enum xpc_retval reason)
930 992
931 /* now it's time to eliminate our heartbeat */ 993 /* now it's time to eliminate our heartbeat */
932 del_timer_sync(&xpc_hb_timer); 994 del_timer_sync(&xpc_hb_timer);
933 DBUG_ON(xpc_vars->heartbeating_to_mask == 0); 995 DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
934 996
935 /* take ourselves off of the reboot_notifier_list */ 997 /* take ourselves off of the reboot_notifier_list */
936 (void) unregister_reboot_notifier(&xpc_reboot_notifier); 998 (void) unregister_reboot_notifier(&xpc_reboot_notifier);
@@ -1134,7 +1196,7 @@ xpc_init(void)
1134 dev_err(xpc_part, "failed while forking discovery thread\n"); 1196 dev_err(xpc_part, "failed while forking discovery thread\n");
1135 1197
1136 /* mark this new thread as a non-starter */ 1198 /* mark this new thread as a non-starter */
1137 up(&xpc_discovery_inactive); 1199 up(&xpc_discovery_exited);
1138 1200
1139 xpc_do_exit(xpcUnloading); 1201 xpc_do_exit(xpcUnloading);
1140 return -EBUSY; 1202 return -EBUSY;
@@ -1172,3 +1234,7 @@ module_param(xpc_hb_check_interval, int, 0);
1172MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1234MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1173 "heartbeat checks."); 1235 "heartbeat checks.");
1174 1236
1237module_param(xpc_disengage_request_timelimit, int, 0);
1238MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1239 "for disengage request to complete.");
1240