aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-07-30 01:34:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-30 12:41:49 -0400
commit7fb5e59d63deda89a8eefdbd5b3c8d622076afd4 (patch)
tree4c78f9e016dd0998e8539a1da358b4ba961db8e9 /drivers
parenta47d5dac9d8481766382f8cf1483dd581df38b99 (diff)
sgi-xp: separate chctl_flags from XPC's notify IRQ
Tie current IPI references to either XPC's notify IRQ or channel control flags. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/sgi-xp/xpc.h124
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c135
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c59
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c301
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c10
5 files changed, 327 insertions, 302 deletions
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index b04cfbed9581..26a1725f68ad 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -186,9 +186,10 @@ struct xpc_vars_part_sn2 {
186 u64 openclose_args_pa; /* physical address of open and close args */ 186 u64 openclose_args_pa; /* physical address of open and close args */
187 u64 GPs_pa; /* physical address of Get/Put values */ 187 u64 GPs_pa; /* physical address of Get/Put values */
188 188
189 u64 IPI_amo_pa; /* physical address of IPI AMO_t structure */ 189 u64 chctl_amo_pa; /* physical address of chctl flags' AMO_t */
190 int IPI_nasid; /* nasid of where to send IPIs */ 190
191 int IPI_phys_cpuid; /* physical CPU ID of where to send IPIs */ 191 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
192 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
192 193
193 u8 nchannels; /* #of defined channels supported */ 194 u8 nchannels; /* #of defined channels supported */
194 195
@@ -407,7 +408,7 @@ struct xpc_channel {
407 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 408 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
408 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 409 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
409 410
410 u8 delayed_IPI_flags; /* IPI flags received, but delayed */ 411 u8 delayed_chctl_flags; /* chctl flags received, but delayed */
411 /* action until channel disconnected */ 412 /* action until channel disconnected */
412 413
413 /* queue of msg senders who want to be notified when msg received */ 414 /* queue of msg senders who want to be notified when msg received */
@@ -470,6 +471,54 @@ struct xpc_channel {
470#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ 471#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */
471 472
472/* 473/*
474 * The channel control flags (chctl) union consists of a 64-bit variable which
475 * is divided up into eight bytes, ordered from right to left. Byte zero
476 * pertains to channel 0, byte one to channel 1, and so on. Each channel's byte
477 * can have one or more of the chctl flags set in it.
478 */
479
480union xpc_channel_ctl_flags {
481 u64 all_flags;
482 u8 flags[XPC_MAX_NCHANNELS];
483};
484
485/* chctl flags */
486#define XPC_CHCTL_CLOSEREQUEST 0x01
487#define XPC_CHCTL_CLOSEREPLY 0x02
488#define XPC_CHCTL_OPENREQUEST 0x04
489#define XPC_CHCTL_OPENREPLY 0x08
490#define XPC_CHCTL_MSGREQUEST 0x10
491
492#define XPC_OPENCLOSE_CHCTL_FLAGS \
493 (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \
494 XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY)
495#define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST
496
497static inline int
498xpc_any_openclose_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
499{
500 int ch_number;
501
502 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
503 if (chctl->flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS)
504 return 1;
505 }
506 return 0;
507}
508
509static inline int
510xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl)
511{
512 int ch_number;
513
514 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) {
515 if (chctl->flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
516 return 1;
517 }
518 return 0;
519}
520
521/*
473 * Manages channels on a partition basis. There is one of these structures 522 * Manages channels on a partition basis. There is one of these structures
474 * for each partition (a partition will never utilize the structure that 523 * for each partition (a partition will never utilize the structure that
475 * represents itself). 524 * represents itself).
@@ -494,12 +543,12 @@ struct xpc_partition_sn2 {
494 543
495 u64 remote_openclose_args_pa; /* phys addr of remote's args */ 544 u64 remote_openclose_args_pa; /* phys addr of remote's args */
496 545
497 int remote_IPI_nasid; /* nasid of where to send IPIs */ 546 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
498 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */ 547 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
499 char IPI_owner[8]; /* IPI owner's name */ 548 char notify_IRQ_owner[8]; /* notify IRQ's owner's name */
500 549
501 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */ 550 AMO_t *remote_chctl_amo_va; /* address of remote chctl flags' AMO_t */
502 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */ 551 AMO_t *local_chctl_amo_va; /* address of chctl flags' AMO_t */
503 552
504 struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */ 553 struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
505}; 554};
@@ -536,7 +585,10 @@ struct xpc_partition {
536 atomic_t nchannels_engaged; /* #of channels engaged with remote part */ 585 atomic_t nchannels_engaged; /* #of channels engaged with remote part */
537 struct xpc_channel *channels; /* array of channel structures */ 586 struct xpc_channel *channels; /* array of channel structures */
538 587
539 /* fields used to pass args when opening or closing a channel */ 588 /* fields used for managing channel avialability and activity */
589
590 union xpc_channel_ctl_flags chctl; /* chctl flags yet to be processed */
591 spinlock_t chctl_lock; /* chctl flags lock */
540 592
541 void *local_openclose_args_base; /* base address of kmalloc'd space */ 593 void *local_openclose_args_base; /* base address of kmalloc'd space */
542 struct xpc_openclose_args *local_openclose_args; /* local's args */ 594 struct xpc_openclose_args *local_openclose_args; /* local's args */
@@ -544,11 +596,6 @@ struct xpc_partition {
544 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 596 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
545 /* args */ 597 /* args */
546 598
547 /* IPI sending, receiving and handling related fields */
548
549 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
550 spinlock_t IPI_lock; /* IPI handler lock */
551
552 /* channel manager related fields */ 599 /* channel manager related fields */
553 600
554 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 601 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
@@ -580,11 +627,12 @@ struct xpc_partition {
580#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 627#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
581 628
582/* 629/*
583 * struct xpc_partition IPI_timer #of seconds to wait before checking for 630 * struct xpc_partition_sn2's dropped notify IRQ timer is set to wait the
584 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 631 * following interval #of seconds before checking for dropped notify IRQs.
585 * after the IPI was received. 632 * These can occur whenever an IRQ's associated amo write doesn't complete
633 * until after the IRQ was received.
586 */ 634 */
587#define XPC_P_DROPPED_IPI_WAIT_INTERVAL (0.25 * HZ) 635#define XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL (0.25 * HZ)
588 636
589/* number of seconds to wait for other partitions to disengage */ 637/* number of seconds to wait for other partitions to disengage */
590#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90 638#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90
@@ -617,9 +665,9 @@ extern void (*xpc_offline_heartbeat) (void);
617extern void (*xpc_online_heartbeat) (void); 665extern void (*xpc_online_heartbeat) (void);
618extern void (*xpc_check_remote_hb) (void); 666extern void (*xpc_check_remote_hb) (void);
619extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); 667extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
620extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *); 668extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *);
621extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); 669extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *);
622extern void (*xpc_process_msg_IPI) (struct xpc_partition *, int); 670extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int);
623extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *); 671extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *);
624extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); 672extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *);
625extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, u64, 673extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, u64,
@@ -638,14 +686,13 @@ extern int (*xpc_any_partition_engaged) (void);
638extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *); 686extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *);
639extern void (*xpc_assume_partition_disengaged) (short); 687extern void (*xpc_assume_partition_disengaged) (short);
640 688
641extern void (*xpc_send_channel_closerequest) (struct xpc_channel *, 689extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *,
642 unsigned long *);
643extern void (*xpc_send_channel_closereply) (struct xpc_channel *,
644 unsigned long *); 690 unsigned long *);
645extern void (*xpc_send_channel_openrequest) (struct xpc_channel *, 691extern void (*xpc_send_chctl_closereply) (struct xpc_channel *,
646 unsigned long *); 692 unsigned long *);
647extern void (*xpc_send_channel_openreply) (struct xpc_channel *, 693extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *,
648 unsigned long *); 694 unsigned long *);
695extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *);
649 696
650extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16, 697extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16,
651 u8, xpc_notify_func, void *); 698 u8, xpc_notify_func, void *);
@@ -689,7 +736,7 @@ extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16);
689extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16, 736extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16,
690 xpc_notify_func, void *); 737 xpc_notify_func, void *);
691extern void xpc_initiate_received(short, int, void *); 738extern void xpc_initiate_received(short, int, void *);
692extern void xpc_process_channel_activity(struct xpc_partition *); 739extern void xpc_process_sent_chctl_flags(struct xpc_partition *);
693extern void xpc_connected_callout(struct xpc_channel *); 740extern void xpc_connected_callout(struct xpc_channel *);
694extern void xpc_deliver_msg(struct xpc_channel *); 741extern void xpc_deliver_msg(struct xpc_channel *);
695extern void xpc_disconnect_channel(const int, struct xpc_channel *, 742extern void xpc_disconnect_channel(const int, struct xpc_channel *,
@@ -799,25 +846,4 @@ xpc_part_ref(struct xpc_partition *part)
799 (_p)->reason_line = _line; \ 846 (_p)->reason_line = _line; \
800 } 847 }
801 848
802/*
803 * The sending and receiving of IPIs includes the setting of an >>>AMO variable
804 * to indicate the reason the IPI was sent. The 64-bit variable is divided
805 * up into eight bytes, ordered from right to left. Byte zero pertains to
806 * channel 0, byte one to channel 1, and so on. Each byte is described by
807 * the following IPI flags.
808 */
809
810#define XPC_IPI_CLOSEREQUEST 0x01
811#define XPC_IPI_CLOSEREPLY 0x02
812#define XPC_IPI_OPENREQUEST 0x04
813#define XPC_IPI_OPENREPLY 0x08
814#define XPC_IPI_MSGREQUEST 0x10
815
816/* given an >>>AMO variable and a channel#, get its associated IPI flags */
817#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
818#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
819
820#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0fUL)
821#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010UL)
822
823#endif /* _DRIVERS_MISC_SGIXP_XPC_H */ 849#endif /* _DRIVERS_MISC_SGIXP_XPC_H */
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 48b16136305e..0d3c153d1d0b 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -201,7 +201,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
201 201
202 if (!(ch->flags & XPC_C_OPENREPLY)) { 202 if (!(ch->flags & XPC_C_OPENREPLY)) {
203 ch->flags |= XPC_C_OPENREPLY; 203 ch->flags |= XPC_C_OPENREPLY;
204 xpc_send_channel_openreply(ch, irq_flags); 204 xpc_send_chctl_openreply(ch, irq_flags);
205 } 205 }
206 206
207 if (!(ch->flags & XPC_C_ROPENREPLY)) 207 if (!(ch->flags & XPC_C_ROPENREPLY))
@@ -307,7 +307,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
307 307
308 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 308 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
309 ch->flags |= XPC_C_CLOSEREPLY; 309 ch->flags |= XPC_C_CLOSEREPLY;
310 xpc_send_channel_closereply(ch, irq_flags); 310 xpc_send_chctl_closereply(ch, irq_flags);
311 } 311 }
312 312
313 if (!(ch->flags & XPC_C_RCLOSEREPLY)) 313 if (!(ch->flags & XPC_C_RCLOSEREPLY))
@@ -344,15 +344,15 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
344 if (ch->flags & XPC_C_WDISCONNECT) { 344 if (ch->flags & XPC_C_WDISCONNECT) {
345 /* we won't lose the CPU since we're holding ch->lock */ 345 /* we won't lose the CPU since we're holding ch->lock */
346 complete(&ch->wdisconnect_wait); 346 complete(&ch->wdisconnect_wait);
347 } else if (ch->delayed_IPI_flags) { 347 } else if (ch->delayed_chctl_flags) {
348 if (part->act_state != XPC_P_DEACTIVATING) { 348 if (part->act_state != XPC_P_DEACTIVATING) {
349 /* time to take action on any delayed IPI flags */ 349 /* time to take action on any delayed chctl flags */
350 spin_lock(&part->IPI_lock); 350 spin_lock(&part->chctl_lock);
351 XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, 351 part->chctl.flags[ch->number] |=
352 ch->delayed_IPI_flags); 352 ch->delayed_chctl_flags;
353 spin_unlock(&part->IPI_lock); 353 spin_unlock(&part->chctl_lock);
354 } 354 }
355 ch->delayed_IPI_flags = 0; 355 ch->delayed_chctl_flags = 0;
356 } 356 }
357} 357}
358 358
@@ -360,8 +360,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
360 * Process a change in the channel's remote connection state. 360 * Process a change in the channel's remote connection state.
361 */ 361 */
362static void 362static void
363xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, 363xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
364 u8 IPI_flags) 364 u8 chctl_flags)
365{ 365{
366 unsigned long irq_flags; 366 unsigned long irq_flags;
367 struct xpc_openclose_args *args = 367 struct xpc_openclose_args *args =
@@ -376,24 +376,24 @@ again:
376 if ((ch->flags & XPC_C_DISCONNECTED) && 376 if ((ch->flags & XPC_C_DISCONNECTED) &&
377 (ch->flags & XPC_C_WDISCONNECT)) { 377 (ch->flags & XPC_C_WDISCONNECT)) {
378 /* 378 /*
379 * Delay processing IPI flags until thread waiting disconnect 379 * Delay processing chctl flags until thread waiting disconnect
380 * has had a chance to see that the channel is disconnected. 380 * has had a chance to see that the channel is disconnected.
381 */ 381 */
382 ch->delayed_IPI_flags |= IPI_flags; 382 ch->delayed_chctl_flags |= chctl_flags;
383 spin_unlock_irqrestore(&ch->lock, irq_flags); 383 spin_unlock_irqrestore(&ch->lock, irq_flags);
384 return; 384 return;
385 } 385 }
386 386
387 if (IPI_flags & XPC_IPI_CLOSEREQUEST) { 387 if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
388 388
389 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREQUEST (reason=%d) received " 389 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
390 "from partid=%d, channel=%d\n", args->reason, 390 "from partid=%d, channel=%d\n", args->reason,
391 ch->partid, ch->number); 391 ch->partid, ch->number);
392 392
393 /* 393 /*
394 * If RCLOSEREQUEST is set, we're probably waiting for 394 * If RCLOSEREQUEST is set, we're probably waiting for
395 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed 395 * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
396 * with this RCLOSEREQUEST in the IPI_flags. 396 * with this RCLOSEREQUEST in the chctl_flags.
397 */ 397 */
398 398
399 if (ch->flags & XPC_C_RCLOSEREQUEST) { 399 if (ch->flags & XPC_C_RCLOSEREQUEST) {
@@ -402,8 +402,8 @@ again:
402 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); 402 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
403 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); 403 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
404 404
405 DBUG_ON(!(IPI_flags & XPC_IPI_CLOSEREPLY)); 405 DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
406 IPI_flags &= ~XPC_IPI_CLOSEREPLY; 406 chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
407 ch->flags |= XPC_C_RCLOSEREPLY; 407 ch->flags |= XPC_C_RCLOSEREPLY;
408 408
409 /* both sides have finished disconnecting */ 409 /* both sides have finished disconnecting */
@@ -413,17 +413,15 @@ again:
413 } 413 }
414 414
415 if (ch->flags & XPC_C_DISCONNECTED) { 415 if (ch->flags & XPC_C_DISCONNECTED) {
416 if (!(IPI_flags & XPC_IPI_OPENREQUEST)) { 416 if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
417 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, 417 if (part->chctl.flags[ch_number] &
418 ch_number) & 418 XPC_CHCTL_OPENREQUEST) {
419 XPC_IPI_OPENREQUEST)) { 419
420 420 DBUG_ON(ch->delayed_chctl_flags != 0);
421 DBUG_ON(ch->delayed_IPI_flags != 0); 421 spin_lock(&part->chctl_lock);
422 spin_lock(&part->IPI_lock); 422 part->chctl.flags[ch_number] |=
423 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 423 XPC_CHCTL_CLOSEREQUEST;
424 ch_number, 424 spin_unlock(&part->chctl_lock);
425 XPC_IPI_CLOSEREQUEST);
426 spin_unlock(&part->IPI_lock);
427 } 425 }
428 spin_unlock_irqrestore(&ch->lock, irq_flags); 426 spin_unlock_irqrestore(&ch->lock, irq_flags);
429 return; 427 return;
@@ -436,7 +434,7 @@ again:
436 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); 434 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
437 } 435 }
438 436
439 IPI_flags &= ~(XPC_IPI_OPENREQUEST | XPC_IPI_OPENREPLY); 437 chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY);
440 438
441 /* 439 /*
442 * The meaningful CLOSEREQUEST connection state fields are: 440 * The meaningful CLOSEREQUEST connection state fields are:
@@ -454,7 +452,7 @@ again:
454 452
455 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); 453 XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
456 454
457 DBUG_ON(IPI_flags & XPC_IPI_CLOSEREPLY); 455 DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
458 spin_unlock_irqrestore(&ch->lock, irq_flags); 456 spin_unlock_irqrestore(&ch->lock, irq_flags);
459 return; 457 return;
460 } 458 }
@@ -462,10 +460,10 @@ again:
462 xpc_process_disconnect(ch, &irq_flags); 460 xpc_process_disconnect(ch, &irq_flags);
463 } 461 }
464 462
465 if (IPI_flags & XPC_IPI_CLOSEREPLY) { 463 if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
466 464
467 dev_dbg(xpc_chan, "XPC_IPI_CLOSEREPLY received from partid=%d," 465 dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
468 " channel=%d\n", ch->partid, ch->number); 466 "%d, channel=%d\n", ch->partid, ch->number);
469 467
470 if (ch->flags & XPC_C_DISCONNECTED) { 468 if (ch->flags & XPC_C_DISCONNECTED) {
471 DBUG_ON(part->act_state != XPC_P_DEACTIVATING); 469 DBUG_ON(part->act_state != XPC_P_DEACTIVATING);
@@ -476,15 +474,14 @@ again:
476 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); 474 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
477 475
478 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { 476 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
479 if ((XPC_GET_IPI_FLAGS(part->local_IPI_amo, ch_number) 477 if (part->chctl.flags[ch_number] &
480 & XPC_IPI_CLOSEREQUEST)) { 478 XPC_CHCTL_CLOSEREQUEST) {
481 479
482 DBUG_ON(ch->delayed_IPI_flags != 0); 480 DBUG_ON(ch->delayed_chctl_flags != 0);
483 spin_lock(&part->IPI_lock); 481 spin_lock(&part->chctl_lock);
484 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 482 part->chctl.flags[ch_number] |=
485 ch_number, 483 XPC_CHCTL_CLOSEREPLY;
486 XPC_IPI_CLOSEREPLY); 484 spin_unlock(&part->chctl_lock);
487 spin_unlock(&part->IPI_lock);
488 } 485 }
489 spin_unlock_irqrestore(&ch->lock, irq_flags); 486 spin_unlock_irqrestore(&ch->lock, irq_flags);
490 return; 487 return;
@@ -498,9 +495,9 @@ again:
498 } 495 }
499 } 496 }
500 497
501 if (IPI_flags & XPC_IPI_OPENREQUEST) { 498 if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
502 499
503 dev_dbg(xpc_chan, "XPC_IPI_OPENREQUEST (msg_size=%d, " 500 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (msg_size=%d, "
504 "local_nentries=%d) received from partid=%d, " 501 "local_nentries=%d) received from partid=%d, "
505 "channel=%d\n", args->msg_size, args->local_nentries, 502 "channel=%d\n", args->msg_size, args->local_nentries,
506 ch->partid, ch->number); 503 ch->partid, ch->number);
@@ -512,7 +509,7 @@ again:
512 } 509 }
513 510
514 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { 511 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
515 ch->delayed_IPI_flags |= XPC_IPI_OPENREQUEST; 512 ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
516 spin_unlock_irqrestore(&ch->lock, irq_flags); 513 spin_unlock_irqrestore(&ch->lock, irq_flags);
517 return; 514 return;
518 } 515 }
@@ -554,13 +551,13 @@ again:
554 xpc_process_connect(ch, &irq_flags); 551 xpc_process_connect(ch, &irq_flags);
555 } 552 }
556 553
557 if (IPI_flags & XPC_IPI_OPENREPLY) { 554 if (chctl_flags & XPC_CHCTL_OPENREPLY) {
558 555
559 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY (local_msgqueue_pa=0x%lx, " 556 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
560 "local_nentries=%d, remote_nentries=%d) received from " 557 "0x%lx, local_nentries=%d, remote_nentries=%d) "
561 "partid=%d, channel=%d\n", args->local_msgqueue_pa, 558 "received from partid=%d, channel=%d\n",
562 args->local_nentries, args->remote_nentries, 559 args->local_msgqueue_pa, args->local_nentries,
563 ch->partid, ch->number); 560 args->remote_nentries, ch->partid, ch->number);
564 561
565 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { 562 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) {
566 spin_unlock_irqrestore(&ch->lock, irq_flags); 563 spin_unlock_irqrestore(&ch->lock, irq_flags);
@@ -591,7 +588,7 @@ again:
591 ch->remote_msgqueue_pa = args->local_msgqueue_pa; 588 ch->remote_msgqueue_pa = args->local_msgqueue_pa;
592 589
593 if (args->local_nentries < ch->remote_nentries) { 590 if (args->local_nentries < ch->remote_nentries) {
594 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " 591 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
595 "remote_nentries=%d, old remote_nentries=%d, " 592 "remote_nentries=%d, old remote_nentries=%d, "
596 "partid=%d, channel=%d\n", 593 "partid=%d, channel=%d\n",
597 args->local_nentries, ch->remote_nentries, 594 args->local_nentries, ch->remote_nentries,
@@ -600,7 +597,7 @@ again:
600 ch->remote_nentries = args->local_nentries; 597 ch->remote_nentries = args->local_nentries;
601 } 598 }
602 if (args->remote_nentries < ch->local_nentries) { 599 if (args->remote_nentries < ch->local_nentries) {
603 dev_dbg(xpc_chan, "XPC_IPI_OPENREPLY: new " 600 dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
604 "local_nentries=%d, old local_nentries=%d, " 601 "local_nentries=%d, old local_nentries=%d, "
605 "partid=%d, channel=%d\n", 602 "partid=%d, channel=%d\n",
606 args->remote_nentries, ch->local_nentries, 603 args->remote_nentries, ch->local_nentries,
@@ -690,7 +687,7 @@ xpc_connect_channel(struct xpc_channel *ch)
690 /* initiate the connection */ 687 /* initiate the connection */
691 688
692 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 689 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
693 xpc_send_channel_openrequest(ch, &irq_flags); 690 xpc_send_chctl_openrequest(ch, &irq_flags);
694 691
695 xpc_process_connect(ch, &irq_flags); 692 xpc_process_connect(ch, &irq_flags);
696 693
@@ -700,15 +697,15 @@ xpc_connect_channel(struct xpc_channel *ch)
700} 697}
701 698
702void 699void
703xpc_process_channel_activity(struct xpc_partition *part) 700xpc_process_sent_chctl_flags(struct xpc_partition *part)
704{ 701{
705 unsigned long irq_flags; 702 unsigned long irq_flags;
706 u64 IPI_amo, IPI_flags; 703 union xpc_channel_ctl_flags chctl;
707 struct xpc_channel *ch; 704 struct xpc_channel *ch;
708 int ch_number; 705 int ch_number;
709 u32 ch_flags; 706 u32 ch_flags;
710 707
711 IPI_amo = xpc_get_IPI_flags(part); 708 chctl.all_flags = xpc_get_chctl_all_flags(part);
712 709
713 /* 710 /*
714 * Initiate channel connections for registered channels. 711 * Initiate channel connections for registered channels.
@@ -721,14 +718,14 @@ xpc_process_channel_activity(struct xpc_partition *part)
721 ch = &part->channels[ch_number]; 718 ch = &part->channels[ch_number];
722 719
723 /* 720 /*
724 * Process any open or close related IPI flags, and then deal 721 * Process any open or close related chctl flags, and then deal
725 * with connecting or disconnecting the channel as required. 722 * with connecting or disconnecting the channel as required.
726 */ 723 */
727 724
728 IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); 725 if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
729 726 xpc_process_openclose_chctl_flags(part, ch_number,
730 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) 727 chctl.flags[ch_number]);
731 xpc_process_openclose_IPI(part, ch_number, IPI_flags); 728 }
732 729
733 ch_flags = ch->flags; /* need an atomic snapshot of flags */ 730 ch_flags = ch->flags; /* need an atomic snapshot of flags */
734 731
@@ -755,13 +752,13 @@ xpc_process_channel_activity(struct xpc_partition *part)
755 } 752 }
756 753
757 /* 754 /*
758 * Process any message related IPI flags, this may involve the 755 * Process any message related chctl flags, this may involve
759 * activation of kthreads to deliver any pending messages sent 756 * the activation of kthreads to deliver any pending messages
760 * from the other partition. 757 * sent from the other partition.
761 */ 758 */
762 759
763 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) 760 if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
764 xpc_process_msg_IPI(part, ch_number); 761 xpc_process_msg_chctl_flags(part, ch_number);
765 } 762 }
766} 763}
767 764
@@ -937,7 +934,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
937 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 934 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
938 XPC_C_CONNECTING | XPC_C_CONNECTED); 935 XPC_C_CONNECTING | XPC_C_CONNECTED);
939 936
940 xpc_send_channel_closerequest(ch, irq_flags); 937 xpc_send_chctl_closerequest(ch, irq_flags);
941 938
942 if (channel_was_connected) 939 if (channel_was_connected)
943 ch->flags |= XPC_C_WASCONNECTED; 940 ch->flags |= XPC_C_WASCONNECTED;
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 563aaf4a2ff6..43f5b686ecf3 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -25,18 +25,18 @@
25 * 25 *
26 * Caveats: 26 * Caveats:
27 * 27 *
28 * . We currently have no way to determine which nasid an IPI came 28 * . Currently on sn2, we have no way to determine which nasid an IRQ
29 * from. Thus, >>> xpc_IPI_send() does a remote AMO write followed by 29 * came from. Thus, xpc_send_IRQ_sn2() does a remote AMO write
30 * an IPI. The AMO indicates where data is to be pulled from, so 30 * followed by an IPI. The AMO indicates where data is to be pulled
31 * after the IPI arrives, the remote partition checks the AMO word. 31 * from, so after the IPI arrives, the remote partition checks the AMO
32 * The IPI can actually arrive before the AMO however, so other code 32 * word. The IPI can actually arrive before the AMO however, so other
33 * must periodically check for this case. Also, remote AMO operations 33 * code must periodically check for this case. Also, remote AMO
34 * do not reliably time out. Thus we do a remote PIO read solely to 34 * operations do not reliably time out. Thus we do a remote PIO read
35 * know whether the remote partition is down and whether we should 35 * solely to know whether the remote partition is down and whether we
36 * stop sending IPIs to it. This remote PIO read operation is set up 36 * should stop sending IPIs to it. This remote PIO read operation is
37 * in a special nofault region so SAL knows to ignore (and cleanup) 37 * set up in a special nofault region so SAL knows to ignore (and
38 * any errors due to the remote AMO write, PIO read, and/or PIO 38 * cleanup) any errors due to the remote AMO write, PIO read, and/or
39 * write operations. 39 * PIO write operations.
40 * 40 *
41 * If/when new hardware solves this IPI problem, we should abandon 41 * If/when new hardware solves this IPI problem, we should abandon
42 * the current approach. 42 * the current approach.
@@ -185,8 +185,8 @@ void (*xpc_check_remote_hb) (void);
185 185
186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); 186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
187void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); 187void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
188u64 (*xpc_get_IPI_flags) (struct xpc_partition *part); 188u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
189void (*xpc_process_msg_IPI) (struct xpc_partition *part, int ch_number); 189void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
190int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch); 190int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch);
191struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); 191struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
192 192
@@ -206,14 +206,14 @@ int (*xpc_any_partition_engaged) (void);
206void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part); 206void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
207void (*xpc_assume_partition_disengaged) (short partid); 207void (*xpc_assume_partition_disengaged) (short partid);
208 208
209void (*xpc_send_channel_closerequest) (struct xpc_channel *ch, 209void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
210 unsigned long *irq_flags);
211void (*xpc_send_channel_closereply) (struct xpc_channel *ch,
212 unsigned long *irq_flags); 210 unsigned long *irq_flags);
213void (*xpc_send_channel_openrequest) (struct xpc_channel *ch, 211void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
214 unsigned long *irq_flags); 212 unsigned long *irq_flags);
215void (*xpc_send_channel_openreply) (struct xpc_channel *ch, 213void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
216 unsigned long *irq_flags); 214 unsigned long *irq_flags);
215void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
216 unsigned long *irq_flags);
217 217
218enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags, 218enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
219 void *payload, u16 payload_size, u8 notify_type, 219 void *payload, u16 payload_size, u8 notify_type,
@@ -302,7 +302,7 @@ xpc_hb_checker(void *ignore)
302 302
303 /* 303 /*
304 * We need to periodically recheck to ensure no 304 * We need to periodically recheck to ensure no
305 * IPI/AMO pairs have been missed. That check 305 * IRQ/AMO pairs have been missed. That check
306 * must always reset xpc_hb_check_timeout. 306 * must always reset xpc_hb_check_timeout.
307 */ 307 */
308 force_IRQ = 1; 308 force_IRQ = 1;
@@ -378,7 +378,7 @@ xpc_channel_mgr(struct xpc_partition *part)
378 atomic_read(&part->nchannels_active) > 0 || 378 atomic_read(&part->nchannels_active) > 0 ||
379 !xpc_partition_disengaged(part)) { 379 !xpc_partition_disengaged(part)) {
380 380
381 xpc_process_channel_activity(part); 381 xpc_process_sent_chctl_flags(part);
382 382
383 /* 383 /*
384 * Wait until we've been requested to activate kthreads or 384 * Wait until we've been requested to activate kthreads or
@@ -396,7 +396,7 @@ xpc_channel_mgr(struct xpc_partition *part)
396 atomic_dec(&part->channel_mgr_requests); 396 atomic_dec(&part->channel_mgr_requests);
397 (void)wait_event_interruptible(part->channel_mgr_wq, 397 (void)wait_event_interruptible(part->channel_mgr_wq,
398 (atomic_read(&part->channel_mgr_requests) > 0 || 398 (atomic_read(&part->channel_mgr_requests) > 0 ||
399 part->local_IPI_amo != 0 || 399 part->chctl.all_flags != 0 ||
400 (part->act_state == XPC_P_DEACTIVATING && 400 (part->act_state == XPC_P_DEACTIVATING &&
401 atomic_read(&part->nchannels_active) == 0 && 401 atomic_read(&part->nchannels_active) == 0 &&
402 xpc_partition_disengaged(part)))); 402 xpc_partition_disengaged(part))));
@@ -753,16 +753,15 @@ xpc_disconnect_wait(int ch_number)
753 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); 753 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
754 wakeup_channel_mgr = 0; 754 wakeup_channel_mgr = 0;
755 755
756 if (ch->delayed_IPI_flags) { 756 if (ch->delayed_chctl_flags) {
757 if (part->act_state != XPC_P_DEACTIVATING) { 757 if (part->act_state != XPC_P_DEACTIVATING) {
758 spin_lock(&part->IPI_lock); 758 spin_lock(&part->chctl_lock);
759 XPC_SET_IPI_FLAGS(part->local_IPI_amo, 759 part->chctl.flags[ch->number] |=
760 ch->number, 760 ch->delayed_chctl_flags;
761 ch->delayed_IPI_flags); 761 spin_unlock(&part->chctl_lock);
762 spin_unlock(&part->IPI_lock);
763 wakeup_channel_mgr = 1; 762 wakeup_channel_mgr = 1;
764 } 763 }
765 ch->delayed_IPI_flags = 0; 764 ch->delayed_chctl_flags = 0;
766 } 765 }
767 766
768 ch->flags &= ~XPC_C_WDISCONNECT; 767 ch->flags &= ~XPC_C_WDISCONNECT;
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 69d74bd56899..0fef7d86a5a2 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -104,20 +104,20 @@ xpc_disallow_IPI_ops_sn2(void)
104} 104}
105 105
106/* 106/*
107 * The following set of macros and functions are used for the sending and 107 * The following set of functions are used for the sending and receiving of
108 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 108 * IRQs (also known as IPIs). There are two flavors of IRQs, one that is
109 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and 109 * associated with partition activity (SGI_XPC_ACTIVATE) and the other that
110 * the other that is associated with channel activity (SGI_XPC_NOTIFY). 110 * is associated with channel activity (SGI_XPC_NOTIFY).
111 */ 111 */
112 112
113static u64 113static u64
114xpc_IPI_receive_sn2(AMO_t *amo) 114xpc_receive_IRQ_amo_sn2(AMO_t *amo)
115{ 115{
116 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); 116 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
117} 117}
118 118
119static enum xp_retval 119static enum xp_retval
120xpc_IPI_send_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) 120xpc_send_IRQ_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
121{ 121{
122 int ret = 0; 122 int ret = 0;
123 unsigned long irq_flags; 123 unsigned long irq_flags;
@@ -131,7 +131,7 @@ xpc_IPI_send_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
131 * We must always use the nofault function regardless of whether we 131 * We must always use the nofault function regardless of whether we
132 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 132 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
133 * didn't, we'd never know that the other partition is down and would 133 * didn't, we'd never know that the other partition is down and would
134 * keep sending IPIs and AMOs to it until the heartbeat times out. 134 * keep sending IRQs and AMOs to it until the heartbeat times out.
135 */ 135 */
136 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), 136 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
137 xp_nofault_PIOR_target)); 137 xp_nofault_PIOR_target));
@@ -142,16 +142,16 @@ xpc_IPI_send_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
142} 142}
143 143
144static AMO_t * 144static AMO_t *
145xpc_IPI_init_sn2(int index) 145xpc_init_IRQ_amo_sn2(int index)
146{ 146{
147 AMO_t *amo = xpc_vars->amos_page + index; 147 AMO_t *amo = xpc_vars->amos_page + index;
148 148
149 (void)xpc_IPI_receive_sn2(amo); /* clear AMO variable */ 149 (void)xpc_receive_IRQ_amo_sn2(amo); /* clear AMO variable */
150 return amo; 150 return amo;
151} 151}
152 152
153/* 153/*
154 * IPIs associated with SGI_XPC_ACTIVATE IRQ. 154 * Functions associated with SGI_XPC_ACTIVATE IRQ.
155 */ 155 */
156 156
157/* 157/*
@@ -166,23 +166,23 @@ xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
166} 166}
167 167
168/* 168/*
169 * Flag the appropriate AMO variable and send an IPI to the specified node. 169 * Flag the appropriate AMO variable and send an IRQ to the specified node.
170 */ 170 */
171static void 171static void
172xpc_activate_IRQ_send_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, 172xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid,
173 int to_phys_cpuid) 173 int to_phys_cpuid)
174{ 174{
175 int w_index = XPC_NASID_W_INDEX(from_nasid); 175 int w_index = XPC_NASID_W_INDEX(from_nasid);
176 int b_index = XPC_NASID_B_INDEX(from_nasid); 176 int b_index = XPC_NASID_B_INDEX(from_nasid);
177 AMO_t *amos = (AMO_t *)__va(amos_page_pa + 177 AMO_t *amos = (AMO_t *)__va(amos_page_pa +
178 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); 178 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
179 179
180 (void)xpc_IPI_send_sn2(&amos[w_index], (1UL << b_index), to_nasid, 180 (void)xpc_send_IRQ_sn2(&amos[w_index], (1UL << b_index), to_nasid,
181 to_phys_cpuid, SGI_XPC_ACTIVATE); 181 to_phys_cpuid, SGI_XPC_ACTIVATE);
182} 182}
183 183
184static void 184static void
185xpc_activate_IRQ_send_local_sn2(int from_nasid) 185xpc_send_local_activate_IRQ_sn2(int from_nasid)
186{ 186{
187 int w_index = XPC_NASID_W_INDEX(from_nasid); 187 int w_index = XPC_NASID_W_INDEX(from_nasid);
188 int b_index = XPC_NASID_B_INDEX(from_nasid); 188 int b_index = XPC_NASID_B_INDEX(from_nasid);
@@ -197,29 +197,29 @@ xpc_activate_IRQ_send_local_sn2(int from_nasid)
197} 197}
198 198
199/* 199/*
200 * IPIs associated with SGI_XPC_NOTIFY IRQ. 200 * Functions associated with SGI_XPC_NOTIFY IRQ.
201 */ 201 */
202 202
203/* 203/*
204 * Check to see if there is any channel activity to/from the specified 204 * Check to see if any chctl flags were sent from the specified partition.
205 * partition.
206 */ 205 */
207static void 206static void
208xpc_check_for_channel_activity_sn2(struct xpc_partition *part) 207xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
209{ 208{
210 u64 IPI_amo; 209 union xpc_channel_ctl_flags chctl;
211 unsigned long irq_flags; 210 unsigned long irq_flags;
212 211
213 IPI_amo = xpc_IPI_receive_sn2(part->sn.sn2.local_IPI_amo_va); 212 chctl.all_flags = xpc_receive_IRQ_amo_sn2(part->sn.sn2.
214 if (IPI_amo == 0) 213 local_chctl_amo_va);
214 if (chctl.all_flags == 0)
215 return; 215 return;
216 216
217 spin_lock_irqsave(&part->IPI_lock, irq_flags); 217 spin_lock_irqsave(&part->chctl_lock, irq_flags);
218 part->local_IPI_amo |= IPI_amo; 218 part->chctl.all_flags |= chctl.all_flags;
219 spin_unlock_irqrestore(&part->IPI_lock, irq_flags); 219 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
220 220
221 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n", 221 dev_dbg(xpc_chan, "received notify IRQ from partid=%d, chctl.all_flags="
222 XPC_PARTID(part), IPI_amo); 222 "0x%lx\n", XPC_PARTID(part), chctl.all_flags);
223 223
224 xpc_wakeup_channel_mgr(part); 224 xpc_wakeup_channel_mgr(part);
225} 225}
@@ -228,17 +228,17 @@ xpc_check_for_channel_activity_sn2(struct xpc_partition *part)
228 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified 228 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
229 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more 229 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
230 * than one partition, we use an AMO_t structure per partition to indicate 230 * than one partition, we use an AMO_t structure per partition to indicate
231 * whether a partition has sent an IPI or not. If it has, then wake up the 231 * whether a partition has sent an IRQ or not. If it has, then wake up the
232 * associated kthread to handle it. 232 * associated kthread to handle it.
233 * 233 *
234 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC 234 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IRQs sent by XPC
235 * running on other partitions. 235 * running on other partitions.
236 * 236 *
237 * Noteworthy Arguments: 237 * Noteworthy Arguments:
238 * 238 *
239 * irq - Interrupt ReQuest number. NOT USED. 239 * irq - Interrupt ReQuest number. NOT USED.
240 * 240 *
241 * dev_id - partid of IPI's potential sender. 241 * dev_id - partid of IRQ's potential sender.
242 */ 242 */
243static irqreturn_t 243static irqreturn_t
244xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) 244xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
@@ -249,7 +249,7 @@ xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
249 DBUG_ON(partid < 0 || partid >= xp_max_npartitions); 249 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
250 250
251 if (xpc_part_ref(part)) { 251 if (xpc_part_ref(part)) {
252 xpc_check_for_channel_activity_sn2(part); 252 xpc_check_for_sent_chctl_flags_sn2(part);
253 253
254 xpc_part_deref(part); 254 xpc_part_deref(part);
255 } 255 }
@@ -257,45 +257,47 @@ xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
257} 257}
258 258
259/* 259/*
260 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IPIs on the floor 260 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IRQs on the floor
261 * because the write to their associated IPI amo completed after the IRQ/IPI 261 * because the write to their associated amo variable completed after the IRQ
262 * was received. 262 * was received.
263 */ 263 */
264static void 264static void
265xpc_dropped_notify_IRQ_check_sn2(struct xpc_partition *part) 265xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part)
266{ 266{
267 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 267 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
268 268
269 if (xpc_part_ref(part)) { 269 if (xpc_part_ref(part)) {
270 xpc_check_for_channel_activity_sn2(part); 270 xpc_check_for_sent_chctl_flags_sn2(part);
271 271
272 part_sn2->dropped_notify_IRQ_timer.expires = jiffies + 272 part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
273 XPC_P_DROPPED_IPI_WAIT_INTERVAL; 273 XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
274 add_timer(&part_sn2->dropped_notify_IRQ_timer); 274 add_timer(&part_sn2->dropped_notify_IRQ_timer);
275 xpc_part_deref(part); 275 xpc_part_deref(part);
276 } 276 }
277} 277}
278 278
279/* 279/*
280 * Send an IPI to the remote partition that is associated with the 280 * Send a notify IRQ to the remote partition that is associated with the
281 * specified channel. 281 * specified channel.
282 */ 282 */
283static void 283static void
284xpc_notify_IRQ_send_sn2(struct xpc_channel *ch, u8 ipi_flag, 284xpc_send_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
285 char *ipi_flag_string, unsigned long *irq_flags) 285 char *chctl_flag_string, unsigned long *irq_flags)
286{ 286{
287 struct xpc_partition *part = &xpc_partitions[ch->partid]; 287 struct xpc_partition *part = &xpc_partitions[ch->partid];
288 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 288 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
289 union xpc_channel_ctl_flags chctl = { 0 };
289 enum xp_retval ret; 290 enum xp_retval ret;
290 291
291 if (likely(part->act_state != XPC_P_DEACTIVATING)) { 292 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
292 ret = xpc_IPI_send_sn2(part_sn2->remote_IPI_amo_va, 293 chctl.flags[ch->number] = chctl_flag;
293 (u64)ipi_flag << (ch->number * 8), 294 ret = xpc_send_IRQ_sn2(part_sn2->remote_chctl_amo_va,
294 part_sn2->remote_IPI_nasid, 295 chctl.all_flags,
295 part_sn2->remote_IPI_phys_cpuid, 296 part_sn2->notify_IRQ_nasid,
297 part_sn2->notify_IRQ_phys_cpuid,
296 SGI_XPC_NOTIFY); 298 SGI_XPC_NOTIFY);
297 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 299 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
298 ipi_flag_string, ch->partid, ch->number, ret); 300 chctl_flag_string, ch->partid, ch->number, ret);
299 if (unlikely(ret != xpSuccess)) { 301 if (unlikely(ret != xpSuccess)) {
300 if (irq_flags != NULL) 302 if (irq_flags != NULL)
301 spin_unlock_irqrestore(&ch->lock, *irq_flags); 303 spin_unlock_irqrestore(&ch->lock, *irq_flags);
@@ -306,78 +308,78 @@ xpc_notify_IRQ_send_sn2(struct xpc_channel *ch, u8 ipi_flag,
306 } 308 }
307} 309}
308 310
309#define XPC_NOTIFY_IRQ_SEND_SN2(_ch, _ipi_f, _irq_f) \ 311#define XPC_SEND_NOTIFY_IRQ_SN2(_ch, _ipi_f, _irq_f) \
310 xpc_notify_IRQ_send_sn2(_ch, _ipi_f, #_ipi_f, _irq_f) 312 xpc_send_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f, _irq_f)
311 313
312/* 314/*
313 * Make it look like the remote partition, which is associated with the 315 * Make it look like the remote partition, which is associated with the
314 * specified channel, sent us an IPI. This faked IPI will be handled 316 * specified channel, sent us a notify IRQ. This faked IRQ will be handled
315 * by xpc_dropped_notify_IRQ_check_sn2(). 317 * by xpc_check_for_dropped_notify_IRQ_sn2().
316 */ 318 */
317static void 319static void
318xpc_notify_IRQ_send_local_sn2(struct xpc_channel *ch, u8 ipi_flag, 320xpc_send_local_notify_IRQ_sn2(struct xpc_channel *ch, u8 chctl_flag,
319 char *ipi_flag_string) 321 char *chctl_flag_string)
320{ 322{
321 struct xpc_partition *part = &xpc_partitions[ch->partid]; 323 struct xpc_partition *part = &xpc_partitions[ch->partid];
324 union xpc_channel_ctl_flags chctl = { 0 };
322 325
323 FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_IPI_amo_va->variable), 326 chctl.flags[ch->number] = chctl_flag;
324 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8))); 327 FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_chctl_amo_va->
328 variable), FETCHOP_OR, chctl.all_flags);
325 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", 329 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
326 ipi_flag_string, ch->partid, ch->number); 330 chctl_flag_string, ch->partid, ch->number);
327} 331}
328 332
329#define XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(_ch, _ipi_f) \ 333#define XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(_ch, _ipi_f) \
330 xpc_notify_IRQ_send_local_sn2(_ch, _ipi_f, #_ipi_f) 334 xpc_send_local_notify_IRQ_sn2(_ch, _ipi_f, #_ipi_f)
331 335
332static void 336static void
333xpc_send_channel_closerequest_sn2(struct xpc_channel *ch, 337xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch,
334 unsigned long *irq_flags) 338 unsigned long *irq_flags)
335{ 339{
336 struct xpc_openclose_args *args = ch->local_openclose_args; 340 struct xpc_openclose_args *args = ch->local_openclose_args;
337 341
338 args->reason = ch->reason; 342 args->reason = ch->reason;
339 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREQUEST, irq_flags); 343 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags);
340} 344}
341 345
342static void 346static void
343xpc_send_channel_closereply_sn2(struct xpc_channel *ch, 347xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
344 unsigned long *irq_flags)
345{ 348{
346 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREPLY, irq_flags); 349 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREPLY, irq_flags);
347} 350}
348 351
349static void 352static void
350xpc_send_channel_openrequest_sn2(struct xpc_channel *ch, 353xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
351 unsigned long *irq_flags)
352{ 354{
353 struct xpc_openclose_args *args = ch->local_openclose_args; 355 struct xpc_openclose_args *args = ch->local_openclose_args;
354 356
355 args->msg_size = ch->msg_size; 357 args->msg_size = ch->msg_size;
356 args->local_nentries = ch->local_nentries; 358 args->local_nentries = ch->local_nentries;
357 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_OPENREQUEST, irq_flags); 359 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags);
358} 360}
359 361
360static void 362static void
361xpc_send_channel_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) 363xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
362{ 364{
363 struct xpc_openclose_args *args = ch->local_openclose_args; 365 struct xpc_openclose_args *args = ch->local_openclose_args;
364 366
365 args->remote_nentries = ch->remote_nentries; 367 args->remote_nentries = ch->remote_nentries;
366 args->local_nentries = ch->local_nentries; 368 args->local_nentries = ch->local_nentries;
367 args->local_msgqueue_pa = __pa(ch->local_msgqueue); 369 args->local_msgqueue_pa = __pa(ch->local_msgqueue);
368 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_OPENREPLY, irq_flags); 370 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags);
369} 371}
370 372
371static void 373static void
372xpc_send_channel_msgrequest_sn2(struct xpc_channel *ch) 374xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch)
373{ 375{
374 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_MSGREQUEST, NULL); 376 XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL);
375} 377}
376 378
377static void 379static void
378xpc_send_channel_local_msgrequest_sn2(struct xpc_channel *ch) 380xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch)
379{ 381{
380 XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(ch, XPC_IPI_MSGREQUEST); 382 XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST);
381} 383}
382 384
383/* 385/*
@@ -402,7 +404,7 @@ xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
402 * We must always use the nofault function regardless of whether we 404 * We must always use the nofault function regardless of whether we
403 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 405 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
404 * didn't, we'd never know that the other partition is down and would 406 * didn't, we'd never know that the other partition is down and would
405 * keep sending IPIs and AMOs to it until the heartbeat times out. 407 * keep sending IRQs and AMOs to it until the heartbeat times out.
406 */ 408 */
407 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 409 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
408 variable), 410 variable),
@@ -429,7 +431,7 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
429 * We must always use the nofault function regardless of whether we 431 * We must always use the nofault function regardless of whether we
430 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 432 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
431 * didn't, we'd never know that the other partition is down and would 433 * didn't, we'd never know that the other partition is down and would
432 * keep sending IPIs and AMOs to it until the heartbeat times out. 434 * keep sending IRQs and AMOs to it until the heartbeat times out.
433 */ 435 */
434 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 436 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
435 variable), 437 variable),
@@ -441,7 +443,7 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
441 * Send activate IRQ to get other side to see that we've cleared our 443 * Send activate IRQ to get other side to see that we've cleared our
442 * bit in their engaged partitions AMO. 444 * bit in their engaged partitions AMO.
443 */ 445 */
444 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa, 446 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
445 cnodeid_to_nasid(0), 447 cnodeid_to_nasid(0),
446 part_sn2->activate_IRQ_nasid, 448 part_sn2->activate_IRQ_nasid,
447 part_sn2->activate_IRQ_phys_cpuid); 449 part_sn2->activate_IRQ_phys_cpuid);
@@ -595,11 +597,11 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
595 597
596 /* initialize the activate IRQ related AMO variables */ 598 /* initialize the activate IRQ related AMO variables */
597 for (i = 0; i < xp_nasid_mask_words; i++) 599 for (i = 0; i < xp_nasid_mask_words; i++)
598 (void)xpc_IPI_init_sn2(XPC_ACTIVATE_IRQ_AMOS + i); 600 (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS + i);
599 601
600 /* initialize the engaged remote partitions related AMO variables */ 602 /* initialize the engaged remote partitions related AMO variables */
601 (void)xpc_IPI_init_sn2(XPC_ENGAGED_PARTITIONS_AMO); 603 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO);
602 (void)xpc_IPI_init_sn2(XPC_DEACTIVATE_REQUEST_AMO); 604 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO);
603 605
604 return xpSuccess; 606 return xpSuccess;
605} 607}
@@ -729,13 +731,13 @@ static void
729xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, 731xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
730 u64 remote_rp_pa, int nasid) 732 u64 remote_rp_pa, int nasid)
731{ 733{
732 xpc_activate_IRQ_send_local_sn2(nasid); 734 xpc_send_local_activate_IRQ_sn2(nasid);
733} 735}
734 736
735static void 737static void
736xpc_request_partition_reactivation_sn2(struct xpc_partition *part) 738xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
737{ 739{
738 xpc_activate_IRQ_send_local_sn2(part->sn.sn2.activate_IRQ_nasid); 740 xpc_send_local_activate_IRQ_sn2(part->sn.sn2.activate_IRQ_nasid);
739} 741}
740 742
741static void 743static void
@@ -755,7 +757,7 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
755 * We must always use the nofault function regardless of whether we 757 * We must always use the nofault function regardless of whether we
756 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 758 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
757 * didn't, we'd never know that the other partition is down and would 759 * didn't, we'd never know that the other partition is down and would
758 * keep sending IPIs and AMOs to it until the heartbeat times out. 760 * keep sending IRQs and AMOs to it until the heartbeat times out.
759 */ 761 */
760 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 762 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
761 variable), 763 variable),
@@ -767,7 +769,7 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
767 * Send activate IRQ to get other side to see that we've set our 769 * Send activate IRQ to get other side to see that we've set our
768 * bit in their deactivate request AMO. 770 * bit in their deactivate request AMO.
769 */ 771 */
770 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa, 772 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
771 cnodeid_to_nasid(0), 773 cnodeid_to_nasid(0),
772 part_sn2->activate_IRQ_nasid, 774 part_sn2->activate_IRQ_nasid,
773 part_sn2->activate_IRQ_phys_cpuid); 775 part_sn2->activate_IRQ_phys_cpuid);
@@ -789,7 +791,7 @@ xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
789 * We must always use the nofault function regardless of whether we 791 * We must always use the nofault function regardless of whether we
790 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 792 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
791 * didn't, we'd never know that the other partition is down and would 793 * didn't, we'd never know that the other partition is down and would
792 * keep sending IPIs and AMOs to it until the heartbeat times out. 794 * keep sending IRQs and AMOs to it until the heartbeat times out.
793 */ 795 */
794 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 796 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
795 variable), 797 variable),
@@ -861,11 +863,11 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
861} 863}
862 864
863/* 865/*
864 * Prior code has determined the nasid which generated an IPI. Inspect 866 * Prior code has determined the nasid which generated a activate IRQ.
865 * that nasid to determine if its partition needs to be activated or 867 * Inspect that nasid to determine if its partition needs to be activated
866 * deactivated. 868 * or deactivated.
867 * 869 *
868 * A partition is consider "awaiting activation" if our partition 870 * A partition is considered "awaiting activation" if our partition
869 * flags indicate it is not active and it has a heartbeat. A 871 * flags indicate it is not active and it has a heartbeat. A
870 * partition is considered "awaiting deactivation" if our partition 872 * partition is considered "awaiting deactivation" if our partition
871 * flags indicate it is active but it has no heartbeat or it is not 873 * flags indicate it is active but it has no heartbeat or it is not
@@ -997,7 +999,7 @@ xpc_identify_activate_IRQ_sender_sn2(void)
997 if (xpc_exiting) 999 if (xpc_exiting)
998 break; 1000 break;
999 1001
1000 nasid_mask = xpc_IPI_receive_sn2(&act_amos[word]); 1002 nasid_mask = xpc_receive_IRQ_amo_sn2(&act_amos[word]);
1001 if (nasid_mask == 0) { 1003 if (nasid_mask == 0) {
1002 /* no IRQs from nasids in this variable */ 1004 /* no IRQs from nasids in this variable */
1003 continue; 1005 continue;
@@ -1117,20 +1119,20 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1117 1119
1118 part_sn2->remote_openclose_args_pa = 0; 1120 part_sn2->remote_openclose_args_pa = 0;
1119 1121
1120 part_sn2->local_IPI_amo_va = xpc_IPI_init_sn2(partid); 1122 part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid);
1121 part->local_IPI_amo = 0; 1123 part->chctl.all_flags = 0;
1122 spin_lock_init(&part->IPI_lock); 1124 spin_lock_init(&part->chctl_lock);
1123 1125
1124 part_sn2->remote_IPI_nasid = 0; 1126 part_sn2->notify_IRQ_nasid = 0;
1125 part_sn2->remote_IPI_phys_cpuid = 0; 1127 part_sn2->notify_IRQ_phys_cpuid = 0;
1126 part_sn2->remote_IPI_amo_va = NULL; 1128 part_sn2->remote_chctl_amo_va = NULL;
1127 1129
1128 atomic_set(&part->channel_mgr_requests, 1); 1130 atomic_set(&part->channel_mgr_requests, 1);
1129 init_waitqueue_head(&part->channel_mgr_wq); 1131 init_waitqueue_head(&part->channel_mgr_wq);
1130 1132
1131 sprintf(part_sn2->IPI_owner, "xpc%02d", partid); 1133 sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid);
1132 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, 1134 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
1133 IRQF_SHARED, part_sn2->IPI_owner, 1135 IRQF_SHARED, part_sn2->notify_IRQ_owner,
1134 (void *)(u64)partid); 1136 (void *)(u64)partid);
1135 if (ret != 0) { 1137 if (ret != 0) {
1136 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " 1138 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
@@ -1139,13 +1141,13 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1139 goto out_5; 1141 goto out_5;
1140 } 1142 }
1141 1143
1142 /* Setup a timer to check for dropped IPIs */ 1144 /* Setup a timer to check for dropped notify IRQs */
1143 timer = &part_sn2->dropped_notify_IRQ_timer; 1145 timer = &part_sn2->dropped_notify_IRQ_timer;
1144 init_timer(timer); 1146 init_timer(timer);
1145 timer->function = 1147 timer->function =
1146 (void (*)(unsigned long))xpc_dropped_notify_IRQ_check_sn2; 1148 (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2;
1147 timer->data = (unsigned long)part; 1149 timer->data = (unsigned long)part;
1148 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT_INTERVAL; 1150 timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL;
1149 add_timer(timer); 1151 add_timer(timer);
1150 1152
1151 part->nchannels = XPC_MAX_NCHANNELS; 1153 part->nchannels = XPC_MAX_NCHANNELS;
@@ -1196,10 +1198,10 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1196 xpc_vars_part[partid].GPs_pa = __pa(part_sn2->local_GPs); 1198 xpc_vars_part[partid].GPs_pa = __pa(part_sn2->local_GPs);
1197 xpc_vars_part[partid].openclose_args_pa = 1199 xpc_vars_part[partid].openclose_args_pa =
1198 __pa(part->local_openclose_args); 1200 __pa(part->local_openclose_args);
1199 xpc_vars_part[partid].IPI_amo_pa = __pa(part_sn2->local_IPI_amo_va); 1201 xpc_vars_part[partid].chctl_amo_pa = __pa(part_sn2->local_chctl_amo_va);
1200 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ 1202 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
1201 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); 1203 xpc_vars_part[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
1202 xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); 1204 xpc_vars_part[partid].notify_IRQ_phys_cpuid = cpu_physical_id(cpuid);
1203 xpc_vars_part[partid].nchannels = part->nchannels; 1205 xpc_vars_part[partid].nchannels = part->nchannels;
1204 xpc_vars_part[partid].magic = XPC_VP_MAGIC1; 1206 xpc_vars_part[partid].magic = XPC_VP_MAGIC1;
1205 1207
@@ -1239,7 +1241,7 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
1239 * processes by marking it as no longer setup. Then we make it 1241 * processes by marking it as no longer setup. Then we make it
1240 * inaccessible to remote processes by clearing the XPC per partition 1242 * inaccessible to remote processes by clearing the XPC per partition
1241 * specific variable's magic # (which indicates that these variables 1243 * specific variable's magic # (which indicates that these variables
1242 * are no longer valid) and by ignoring all XPC notify IPIs sent to 1244 * are no longer valid) and by ignoring all XPC notify IRQs sent to
1243 * this partition. 1245 * this partition.
1244 */ 1246 */
1245 1247
@@ -1275,7 +1277,7 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
1275 part_sn2->local_GPs = NULL; 1277 part_sn2->local_GPs = NULL;
1276 kfree(part->channels); 1278 kfree(part->channels);
1277 part->channels = NULL; 1279 part->channels = NULL;
1278 part_sn2->local_IPI_amo_va = NULL; 1280 part_sn2->local_chctl_amo_va = NULL;
1279} 1281}
1280 1282
1281/* 1283/*
@@ -1370,7 +1372,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1370 1372
1371 if (pulled_entry->GPs_pa == 0 || 1373 if (pulled_entry->GPs_pa == 0 ||
1372 pulled_entry->openclose_args_pa == 0 || 1374 pulled_entry->openclose_args_pa == 0 ||
1373 pulled_entry->IPI_amo_pa == 0) { 1375 pulled_entry->chctl_amo_pa == 0) {
1374 1376
1375 dev_err(xpc_chan, "partition %d's XPC vars_part for " 1377 dev_err(xpc_chan, "partition %d's XPC vars_part for "
1376 "partition %d are not valid\n", partid, 1378 "partition %d are not valid\n", partid,
@@ -1383,10 +1385,11 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1383 part_sn2->remote_GPs_pa = pulled_entry->GPs_pa; 1385 part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
1384 part_sn2->remote_openclose_args_pa = 1386 part_sn2->remote_openclose_args_pa =
1385 pulled_entry->openclose_args_pa; 1387 pulled_entry->openclose_args_pa;
1386 part_sn2->remote_IPI_amo_va = 1388 part_sn2->remote_chctl_amo_va =
1387 (AMO_t *)__va(pulled_entry->IPI_amo_pa); 1389 (AMO_t *)__va(pulled_entry->chctl_amo_pa);
1388 part_sn2->remote_IPI_nasid = pulled_entry->IPI_nasid; 1390 part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid;
1389 part_sn2->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; 1391 part_sn2->notify_IRQ_phys_cpuid =
1392 pulled_entry->notify_IRQ_phys_cpuid;
1390 1393
1391 if (part->nchannels > pulled_entry->nchannels) 1394 if (part->nchannels > pulled_entry->nchannels)
1392 part->nchannels = pulled_entry->nchannels; 1395 part->nchannels = pulled_entry->nchannels;
@@ -1437,7 +1440,7 @@ xpc_make_first_contact_sn2(struct xpc_partition *part)
1437 * Send activate IRQ to get other side to activate if they've not 1440 * Send activate IRQ to get other side to activate if they've not
1438 * already begun to do so. 1441 * already begun to do so.
1439 */ 1442 */
1440 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa, 1443 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
1441 cnodeid_to_nasid(0), 1444 cnodeid_to_nasid(0),
1442 part_sn2->activate_IRQ_nasid, 1445 part_sn2->activate_IRQ_nasid,
1443 part_sn2->activate_IRQ_phys_cpuid); 1446 part_sn2->activate_IRQ_phys_cpuid);
@@ -1462,28 +1465,28 @@ xpc_make_first_contact_sn2(struct xpc_partition *part)
1462} 1465}
1463 1466
1464/* 1467/*
1465 * Get the IPI flags and pull the openclose args and/or remote GPs as needed. 1468 * Get the chctl flags and pull the openclose args and/or remote GPs as needed.
1466 */ 1469 */
1467static u64 1470static u64
1468xpc_get_IPI_flags_sn2(struct xpc_partition *part) 1471xpc_get_chctl_all_flags_sn2(struct xpc_partition *part)
1469{ 1472{
1470 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 1473 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1471 unsigned long irq_flags; 1474 unsigned long irq_flags;
1472 u64 IPI_amo; 1475 union xpc_channel_ctl_flags chctl;
1473 enum xp_retval ret; 1476 enum xp_retval ret;
1474 1477
1475 /* 1478 /*
1476 * See if there are any IPI flags to be handled. 1479 * See if there are any chctl flags to be handled.
1477 */ 1480 */
1478 1481
1479 spin_lock_irqsave(&part->IPI_lock, irq_flags); 1482 spin_lock_irqsave(&part->chctl_lock, irq_flags);
1480 IPI_amo = part->local_IPI_amo; 1483 chctl = part->chctl;
1481 if (IPI_amo != 0) 1484 if (chctl.all_flags != 0)
1482 part->local_IPI_amo = 0; 1485 part->chctl.all_flags = 0;
1483 1486
1484 spin_unlock_irqrestore(&part->IPI_lock, irq_flags); 1487 spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
1485 1488
1486 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { 1489 if (xpc_any_openclose_chctl_flags_set(&chctl)) {
1487 ret = xpc_pull_remote_cachelines_sn2(part, part-> 1490 ret = xpc_pull_remote_cachelines_sn2(part, part->
1488 remote_openclose_args, 1491 remote_openclose_args,
1489 (void *)part_sn2-> 1492 (void *)part_sn2->
@@ -1496,12 +1499,12 @@ xpc_get_IPI_flags_sn2(struct xpc_partition *part)
1496 "partition %d, ret=%d\n", XPC_PARTID(part), 1499 "partition %d, ret=%d\n", XPC_PARTID(part),
1497 ret); 1500 ret);
1498 1501
1499 /* don't bother processing IPIs anymore */ 1502 /* don't bother processing chctl flags anymore */
1500 IPI_amo = 0; 1503 chctl.all_flags = 0;
1501 } 1504 }
1502 } 1505 }
1503 1506
1504 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { 1507 if (xpc_any_msg_chctl_flags_set(&chctl)) {
1505 ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, 1508 ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
1506 (void *)part_sn2->remote_GPs_pa, 1509 (void *)part_sn2->remote_GPs_pa,
1507 XPC_GP_SIZE); 1510 XPC_GP_SIZE);
@@ -1511,12 +1514,12 @@ xpc_get_IPI_flags_sn2(struct xpc_partition *part)
1511 dev_dbg(xpc_chan, "failed to pull GPs from partition " 1514 dev_dbg(xpc_chan, "failed to pull GPs from partition "
1512 "%d, ret=%d\n", XPC_PARTID(part), ret); 1515 "%d, ret=%d\n", XPC_PARTID(part), ret);
1513 1516
1514 /* don't bother processing IPIs anymore */ 1517 /* don't bother processing chctl flags anymore */
1515 IPI_amo = 0; 1518 chctl.all_flags = 0;
1516 } 1519 }
1517 } 1520 }
1518 1521
1519 return IPI_amo; 1522 return chctl.all_flags;
1520} 1523}
1521 1524
1522/* 1525/*
@@ -1610,7 +1613,7 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1610} 1613}
1611 1614
1612static void 1615static void
1613xpc_process_msg_IPI_sn2(struct xpc_partition *part, int ch_number) 1616xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number)
1614{ 1617{
1615 struct xpc_channel *ch = &part->channels[ch_number]; 1618 struct xpc_channel *ch = &part->channels[ch_number];
1616 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; 1619 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
@@ -1827,8 +1830,8 @@ xpc_get_deliverable_msg_sn2(struct xpc_channel *ch)
1827 1830
1828/* 1831/*
1829 * Now we actually send the messages that are ready to be sent by advancing 1832 * Now we actually send the messages that are ready to be sent by advancing
1830 * the local message queue's Put value and then send an IPI to the recipient 1833 * the local message queue's Put value and then send a chctl msgrequest to the
1831 * partition. 1834 * recipient partition.
1832 */ 1835 */
1833static void 1836static void
1834xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) 1837xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
@@ -1836,7 +1839,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1836 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; 1839 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1837 struct xpc_msg *msg; 1840 struct xpc_msg *msg;
1838 s64 put = initial_put + 1; 1841 s64 put = initial_put + 1;
1839 int send_IPI = 0; 1842 int send_msgrequest = 0;
1840 1843
1841 while (1) { 1844 while (1) {
1842 1845
@@ -1871,7 +1874,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1871 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, " 1874 dev_dbg(xpc_chan, "local_GP->put changed to %ld, partid=%d, "
1872 "channel=%d\n", put, ch->partid, ch->number); 1875 "channel=%d\n", put, ch->partid, ch->number);
1873 1876
1874 send_IPI = 1; 1877 send_msgrequest = 1;
1875 1878
1876 /* 1879 /*
1877 * We need to ensure that the message referenced by 1880 * We need to ensure that the message referenced by
@@ -1881,8 +1884,8 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1881 initial_put = put; 1884 initial_put = put;
1882 } 1885 }
1883 1886
1884 if (send_IPI) 1887 if (send_msgrequest)
1885 xpc_send_channel_msgrequest_sn2(ch); 1888 xpc_send_chctl_msgrequest_sn2(ch);
1886} 1889}
1887 1890
1888/* 1891/*
@@ -1929,13 +1932,13 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
1929 * There aren't any available msg entries at this time. 1932 * There aren't any available msg entries at this time.
1930 * 1933 *
1931 * In waiting for a message entry to become available, 1934 * In waiting for a message entry to become available,
1932 * we set a timeout in case the other side is not 1935 * we set a timeout in case the other side is not sending
1933 * sending completion IPIs. This lets us fake an IPI 1936 * completion interrupts. This lets us fake a notify IRQ
1934 * that will cause the IPI handler to fetch the latest 1937 * that will cause the notify IRQ handler to fetch the latest
1935 * GP values as if an IPI was sent by the other side. 1938 * GP values as if an interrupt was sent by the other side.
1936 */ 1939 */
1937 if (ret == xpTimeout) 1940 if (ret == xpTimeout)
1938 xpc_send_channel_local_msgrequest_sn2(ch); 1941 xpc_send_chctl_local_msgrequest_sn2(ch);
1939 1942
1940 if (flags & XPC_NOWAIT) 1943 if (flags & XPC_NOWAIT)
1941 return xpNoWait; 1944 return xpNoWait;
@@ -1962,8 +1965,8 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
1962 1965
1963/* 1966/*
1964 * Common code that does the actual sending of the message by advancing the 1967 * Common code that does the actual sending of the message by advancing the
1965 * local message queue's Put value and sends an IPI to the partition the 1968 * local message queue's Put value and sends a chctl msgrequest to the
1966 * message is being sent to. 1969 * partition the message is being sent to.
1967 */ 1970 */
1968static enum xp_retval 1971static enum xp_retval
1969xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, 1972xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
@@ -2055,7 +2058,7 @@ out_1:
2055/* 2058/*
2056 * Now we actually acknowledge the messages that have been delivered and ack'd 2059 * Now we actually acknowledge the messages that have been delivered and ack'd
2057 * by advancing the cached remote message queue's Get value and if requested 2060 * by advancing the cached remote message queue's Get value and if requested
2058 * send an IPI to the message sender's partition. 2061 * send a chctl msgrequest to the message sender's partition.
2059 */ 2062 */
2060static void 2063static void
2061xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) 2064xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
@@ -2063,7 +2066,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2063 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; 2066 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
2064 struct xpc_msg *msg; 2067 struct xpc_msg *msg;
2065 s64 get = initial_get + 1; 2068 s64 get = initial_get + 1;
2066 int send_IPI = 0; 2069 int send_msgrequest = 0;
2067 2070
2068 while (1) { 2071 while (1) {
2069 2072
@@ -2099,7 +2102,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2099 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " 2102 dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, "
2100 "channel=%d\n", get, ch->partid, ch->number); 2103 "channel=%d\n", get, ch->partid, ch->number);
2101 2104
2102 send_IPI = (msg_flags & XPC_M_INTERRUPT); 2105 send_msgrequest = (msg_flags & XPC_M_INTERRUPT);
2103 2106
2104 /* 2107 /*
2105 * We need to ensure that the message referenced by 2108 * We need to ensure that the message referenced by
@@ -2109,8 +2112,8 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
2109 initial_get = get; 2112 initial_get = get;
2110 } 2113 }
2111 2114
2112 if (send_IPI) 2115 if (send_msgrequest)
2113 xpc_send_channel_msgrequest_sn2(ch); 2116 xpc_send_chctl_msgrequest_sn2(ch);
2114} 2117}
2115 2118
2116static void 2119static void
@@ -2168,9 +2171,9 @@ xpc_init_sn2(void)
2168 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; 2171 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2;
2169 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; 2172 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2;
2170 xpc_make_first_contact = xpc_make_first_contact_sn2; 2173 xpc_make_first_contact = xpc_make_first_contact_sn2;
2171 xpc_get_IPI_flags = xpc_get_IPI_flags_sn2; 2174 xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2;
2172 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; 2175 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
2173 xpc_process_msg_IPI = xpc_process_msg_IPI_sn2; 2176 xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2;
2174 xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2; 2177 xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2;
2175 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; 2178 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2;
2176 2179
@@ -2181,10 +2184,10 @@ xpc_init_sn2(void)
2181 xpc_indicate_partition_disengaged_sn2; 2184 xpc_indicate_partition_disengaged_sn2;
2182 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; 2185 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
2183 2186
2184 xpc_send_channel_closerequest = xpc_send_channel_closerequest_sn2; 2187 xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2;
2185 xpc_send_channel_closereply = xpc_send_channel_closereply_sn2; 2188 xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2;
2186 xpc_send_channel_openrequest = xpc_send_channel_openrequest_sn2; 2189 xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2;
2187 xpc_send_channel_openreply = xpc_send_channel_openreply_sn2; 2190 xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2;
2188 2191
2189 xpc_send_msg = xpc_send_msg_sn2; 2192 xpc_send_msg = xpc_send_msg_sn2;
2190 xpc_received_msg = xpc_received_msg_sn2; 2193 xpc_received_msg = xpc_received_msg_sn2;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index c53b229cb04e..1401b0f45dcb 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -26,7 +26,7 @@ static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
26static void *xpc_activate_mq; 26static void *xpc_activate_mq;
27 27
28static void 28static void
29xpc_IPI_send_local_activate_uv(struct xpc_partition *part) 29xpc_send_local_activate_IRQ_uv(struct xpc_partition *part)
30{ 30{
31 /* 31 /*
32 * >>> make our side think that the remote parition sent an activate 32 * >>> make our side think that the remote parition sent an activate
@@ -75,13 +75,13 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
75 * >>> part->sn.uv.activate_mq_gpa = remote_rp->sn.activate_mq_gpa; 75 * >>> part->sn.uv.activate_mq_gpa = remote_rp->sn.activate_mq_gpa;
76 */ 76 */
77 77
78 xpc_IPI_send_local_activate_uv(part); 78 xpc_send_local_activate_IRQ_uv(part);
79} 79}
80 80
81static void 81static void
82xpc_request_partition_reactivation_uv(struct xpc_partition *part) 82xpc_request_partition_reactivation_uv(struct xpc_partition *part)
83{ 83{
84 xpc_IPI_send_local_activate_uv(part); 84 xpc_send_local_activate_IRQ_uv(part);
85} 85}
86 86
87/* 87/*
@@ -114,7 +114,7 @@ xpc_make_first_contact_uv(struct xpc_partition *part)
114} 114}
115 115
116static u64 116static u64
117xpc_get_IPI_flags_uv(struct xpc_partition *part) 117xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
118{ 118{
119 /* >>> this function needs fleshing out */ 119 /* >>> this function needs fleshing out */
120 return 0UL; 120 return 0UL;
@@ -140,7 +140,7 @@ xpc_init_uv(void)
140 xpc_setup_infrastructure = xpc_setup_infrastructure_uv; 140 xpc_setup_infrastructure = xpc_setup_infrastructure_uv;
141 xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv; 141 xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv;
142 xpc_make_first_contact = xpc_make_first_contact_uv; 142 xpc_make_first_contact = xpc_make_first_contact_uv;
143 xpc_get_IPI_flags = xpc_get_IPI_flags_uv; 143 xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv;
144 xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; 144 xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv;
145} 145}
146 146