aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xpc_main.c
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-07-30 01:34:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-30 12:41:49 -0400
commit7fb5e59d63deda89a8eefdbd5b3c8d622076afd4 (patch)
tree4c78f9e016dd0998e8539a1da358b4ba961db8e9 /drivers/misc/sgi-xp/xpc_main.c
parenta47d5dac9d8481766382f8cf1483dd581df38b99 (diff)
sgi-xp: separate chctl_flags from XPC's notify IRQ
Tie current IPI references to either XPC's notify IRQ or channel control flags. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_main.c')
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c59
1 files changed, 29 insertions, 30 deletions
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 563aaf4a2ff6..43f5b686ecf3 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -25,18 +25,18 @@
25 * 25 *
26 * Caveats: 26 * Caveats:
27 * 27 *
28 * . We currently have no way to determine which nasid an IPI came 28 * . Currently on sn2, we have no way to determine which nasid an IRQ
29 * from. Thus, >>> xpc_IPI_send() does a remote AMO write followed by 29 * came from. Thus, xpc_send_IRQ_sn2() does a remote AMO write
30 * an IPI. The AMO indicates where data is to be pulled from, so 30 * followed by an IPI. The AMO indicates where data is to be pulled
31 * after the IPI arrives, the remote partition checks the AMO word. 31 * from, so after the IPI arrives, the remote partition checks the AMO
32 * The IPI can actually arrive before the AMO however, so other code 32 * word. The IPI can actually arrive before the AMO however, so other
33 * must periodically check for this case. Also, remote AMO operations 33 * code must periodically check for this case. Also, remote AMO
34 * do not reliably time out. Thus we do a remote PIO read solely to 34 * operations do not reliably time out. Thus we do a remote PIO read
35 * know whether the remote partition is down and whether we should 35 * solely to know whether the remote partition is down and whether we
36 * stop sending IPIs to it. This remote PIO read operation is set up 36 * should stop sending IPIs to it. This remote PIO read operation is
37 * in a special nofault region so SAL knows to ignore (and cleanup) 37 * set up in a special nofault region so SAL knows to ignore (and
38 * any errors due to the remote AMO write, PIO read, and/or PIO 38 * cleanup) any errors due to the remote AMO write, PIO read, and/or
39 * write operations. 39 * PIO write operations.
40 * 40 *
41 * If/when new hardware solves this IPI problem, we should abandon 41 * If/when new hardware solves this IPI problem, we should abandon
42 * the current approach. 42 * the current approach.
@@ -185,8 +185,8 @@ void (*xpc_check_remote_hb) (void);
185 185
186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); 186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
187void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); 187void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
188u64 (*xpc_get_IPI_flags) (struct xpc_partition *part); 188u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
189void (*xpc_process_msg_IPI) (struct xpc_partition *part, int ch_number); 189void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
190int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch); 190int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch);
191struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); 191struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
192 192
@@ -206,14 +206,14 @@ int (*xpc_any_partition_engaged) (void);
206void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part); 206void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
207void (*xpc_assume_partition_disengaged) (short partid); 207void (*xpc_assume_partition_disengaged) (short partid);
208 208
209void (*xpc_send_channel_closerequest) (struct xpc_channel *ch, 209void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
210 unsigned long *irq_flags);
211void (*xpc_send_channel_closereply) (struct xpc_channel *ch,
212 unsigned long *irq_flags); 210 unsigned long *irq_flags);
213void (*xpc_send_channel_openrequest) (struct xpc_channel *ch, 211void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
214 unsigned long *irq_flags); 212 unsigned long *irq_flags);
215void (*xpc_send_channel_openreply) (struct xpc_channel *ch, 213void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
216 unsigned long *irq_flags); 214 unsigned long *irq_flags);
215void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
216 unsigned long *irq_flags);
217 217
218enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags, 218enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
219 void *payload, u16 payload_size, u8 notify_type, 219 void *payload, u16 payload_size, u8 notify_type,
@@ -302,7 +302,7 @@ xpc_hb_checker(void *ignore)
302 302
303 /* 303 /*
304 * We need to periodically recheck to ensure no 304 * We need to periodically recheck to ensure no
305 * IPI/AMO pairs have been missed. That check 305 * IRQ/AMO pairs have been missed. That check
306 * must always reset xpc_hb_check_timeout. 306 * must always reset xpc_hb_check_timeout.
307 */ 307 */
308 force_IRQ = 1; 308 force_IRQ = 1;
@@ -378,7 +378,7 @@ xpc_channel_mgr(struct xpc_partition *part)
378 atomic_read(&part->nchannels_active) > 0 || 378 atomic_read(&part->nchannels_active) > 0 ||
379 !xpc_partition_disengaged(part)) { 379 !xpc_partition_disengaged(part)) {
380 380
381 xpc_process_channel_activity(part); 381 xpc_process_sent_chctl_flags(part);
382 382
383 /* 383 /*
384 * Wait until we've been requested to activate kthreads or 384 * Wait until we've been requested to activate kthreads or
@@ -396,7 +396,7 @@ xpc_channel_mgr(struct xpc_partition *part)
396 atomic_dec(&part->channel_mgr_requests); 396 atomic_dec(&part->channel_mgr_requests);
397 (void)wait_event_interruptible(part->channel_mgr_wq, 397 (void)wait_event_interruptible(part->channel_mgr_wq,
398 (atomic_read(&part->channel_mgr_requests) > 0 || 398 (atomic_read(&part->channel_mgr_requests) > 0 ||
399 part->local_IPI_amo != 0 || 399 part->chctl.all_flags != 0 ||
400 (part->act_state == XPC_P_DEACTIVATING && 400 (part->act_state == XPC_P_DEACTIVATING &&
401 atomic_read(&part->nchannels_active) == 0 && 401 atomic_read(&part->nchannels_active) == 0 &&
402 xpc_partition_disengaged(part)))); 402 xpc_partition_disengaged(part))));
@@ -753,16 +753,15 @@ xpc_disconnect_wait(int ch_number)
753 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 753 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
754 wakeup_channel_mgr = 0; 754 wakeup_channel_mgr = 0;
755 755
756 if (ch->delayed_IPI_flags) { 756 if (ch->delayed_chctl_flags) {
757 if (part->act_state != XPC_P_DEACTIVATING) { 757 if (part->act_state != XPC_P_DEACTIVATING) {
758 spin_lock(&part->IPI_lock); 758 spin_lock(&part->chctl_lock);
759 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 759 part->chctl.flags[ch->number] |=
760 ch->number, 760 ch->delayed_chctl_flags;
761 ch->delayed_IPI_flags); 761 spin_unlock(&part->chctl_lock);
762 spin_unlock(&part->IPI_lock);
763 wakeup_channel_mgr = 1; 762 wakeup_channel_mgr = 1;
764 } 763 }
765 ch->delayed_IPI_flags = 0; 764 ch->delayed_chctl_flags = 0;
766 } 765 }
767 766
768 ch->flags &= ~XPC_C_WDISCONNECT; 767 ch->flags &= ~XPC_C_WDISCONNECT;