aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-07-30 01:34:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-30 12:41:49 -0400
commita47d5dac9d8481766382f8cf1483dd581df38b99 (patch)
treed815a930f6d5f0d48b34eb7506447909c02eb3c3 /drivers
parent6e41017aad9ed175ca51e4828eabc8c5cf5910be (diff)
sgi-xp: isolate additional sn2 specific code
Move additional sn2 specific code into xpc_sn2.c. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/sgi-xp/xpc.h173
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c214
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c278
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c59
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c851
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c15
6 files changed, 784 insertions, 806 deletions
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 1edf37512de6..b04cfbed9581 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -122,9 +122,6 @@ struct xpc_rsvd_page {
122 122
123#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */ 123#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */
124 124
125#define XPC_SUPPORTS_RP_STAMP(_version) \
126 (_version >= _XPC_VERSION(1, 1))
127
128/* 125/*
129 * Define the structures by which XPC variables can be exported to other 126 * Define the structures by which XPC variables can be exported to other
130 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part) 127 * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
@@ -144,8 +141,8 @@ struct xpc_vars_sn2 {
144 u64 heartbeat; 141 u64 heartbeat;
145 DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); 142 DECLARE_BITMAP(heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
146 u64 heartbeat_offline; /* if 0, heartbeat should be changing */ 143 u64 heartbeat_offline; /* if 0, heartbeat should be changing */
147 int act_nasid; 144 int activate_IRQ_nasid;
148 int act_phys_cpuid; 145 int activate_IRQ_phys_cpuid;
149 u64 vars_part_pa; 146 u64 vars_part_pa;
150 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ 147 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
151 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 148 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
@@ -153,9 +150,6 @@ struct xpc_vars_sn2 {
153 150
154#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */ 151#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
155 152
156#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
157 (_version >= _XPC_VERSION(3, 1))
158
159/* 153/*
160 * The following pertains to ia64-sn2 only. 154 * The following pertains to ia64-sn2 only.
161 * 155 *
@@ -167,14 +161,14 @@ struct xpc_vars_sn2 {
167 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 161 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
168 * AMO variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of 162 * AMO variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
169 * NOTIFY IRQs, 128 AMO variables (based on XP_NASID_MASK_WORDS) to identify 163 * NOTIFY IRQs, 128 AMO variables (based on XP_NASID_MASK_WORDS) to identify
170 * the senders of ACTIVATE IRQs, and 2 AMO variables to identify which remote 164 * the senders of ACTIVATE IRQs, 1 AMO variable to identify which remote
171 * partitions (i.e., XPCs) consider themselves currently engaged with the 165 * partitions (i.e., XPCs) consider themselves currently engaged with the
172 * local XPC. 166 * local XPC and 1 AMO variable to request partition deactivation.
173 */ 167 */
174#define XPC_NOTIFY_IRQ_AMOS 0 168#define XPC_NOTIFY_IRQ_AMOS 0
175#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2) 169#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2)
176#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) 170#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
177#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) 171#define XPC_DEACTIVATE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
178 172
179/* 173/*
180 * The following structure describes the per partition specific variables. 174 * The following structure describes the per partition specific variables.
@@ -369,6 +363,23 @@ struct xpc_notify {
369 * new messages, by the clearing of the message flags of the acknowledged 363 * new messages, by the clearing of the message flags of the acknowledged
370 * messages. 364 * messages.
371 */ 365 */
366struct xpc_channel_sn2 {
367
368 /* various flavors of local and remote Get/Put values */
369
370 struct xpc_gp *local_GP; /* local Get/Put values */
371 struct xpc_gp remote_GP; /* remote Get/Put values */
372 struct xpc_gp w_local_GP; /* working local Get/Put values */
373 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
374 s64 next_msg_to_pull; /* Put value of next msg to pull */
375
376 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
377};
378
379struct xpc_channel_uv {
380 /* >>> code is coming */
381};
382
372struct xpc_channel { 383struct xpc_channel {
373 short partid; /* ID of remote partition connected */ 384 short partid; /* ID of remote partition connected */
374 spinlock_t lock; /* lock for updating this structure */ 385 spinlock_t lock; /* lock for updating this structure */
@@ -407,20 +418,11 @@ struct xpc_channel {
407 xpc_channel_func func; /* user's channel function */ 418 xpc_channel_func func; /* user's channel function */
408 void *key; /* pointer to user's key */ 419 void *key; /* pointer to user's key */
409 420
410 struct mutex msg_to_pull_mutex; /* next msg to pull serialization */
411 struct completion wdisconnect_wait; /* wait for channel disconnect */ 421 struct completion wdisconnect_wait; /* wait for channel disconnect */
412 422
413 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 423 struct xpc_openclose_args *local_openclose_args; /* args passed on */
414 /* opening or closing of channel */ 424 /* opening or closing of channel */
415 425
416 /* various flavors of local and remote Get/Put values */
417
418 struct xpc_gp *local_GP; /* local Get/Put values */
419 struct xpc_gp remote_GP; /* remote Get/Put values */
420 struct xpc_gp w_local_GP; /* working local Get/Put values */
421 struct xpc_gp w_remote_GP; /* working remote Get/Put values */
422 s64 next_msg_to_pull; /* Put value of next msg to pull */
423
424 /* kthread management related fields */ 426 /* kthread management related fields */
425 427
426 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ 428 atomic_t kthreads_assigned; /* #of kthreads assigned to channel */
@@ -431,6 +433,11 @@ struct xpc_channel {
431 433
432 wait_queue_head_t idle_wq; /* idle kthread wait queue */ 434 wait_queue_head_t idle_wq; /* idle kthread wait queue */
433 435
436 union {
437 struct xpc_channel_sn2 sn2;
438 struct xpc_channel_uv uv;
439 } sn;
440
434} ____cacheline_aligned; 441} ____cacheline_aligned;
435 442
436/* struct xpc_channel flags */ 443/* struct xpc_channel flags */
@@ -467,6 +474,40 @@ struct xpc_channel {
467 * for each partition (a partition will never utilize the structure that 474 * for each partition (a partition will never utilize the structure that
468 * represents itself). 475 * represents itself).
469 */ 476 */
477
478struct xpc_partition_sn2 {
479 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
480 int activate_IRQ_nasid; /* active partition's act/deact nasid */
481 int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */
482
483 u64 remote_vars_pa; /* phys addr of partition's vars */
484 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
485 u8 remote_vars_version; /* version# of partition's vars */
486
487 void *local_GPs_base; /* base address of kmalloc'd space */
488 struct xpc_gp *local_GPs; /* local Get/Put values */
489 void *remote_GPs_base; /* base address of kmalloc'd space */
490 struct xpc_gp *remote_GPs; /* copy of remote partition's local */
491 /* Get/Put values */
492 u64 remote_GPs_pa; /* phys address of remote partition's local */
493 /* Get/Put values */
494
495 u64 remote_openclose_args_pa; /* phys addr of remote's args */
496
497 int remote_IPI_nasid; /* nasid of where to send IPIs */
498 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
499 char IPI_owner[8]; /* IPI owner's name */
500
501 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
502 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
503
504 struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
505};
506
507struct xpc_partition_uv {
508 /* >>> code is coming */
509};
510
470struct xpc_partition { 511struct xpc_partition {
471 512
472 /* XPC HB infrastructure */ 513 /* XPC HB infrastructure */
@@ -474,22 +515,15 @@ struct xpc_partition {
474 u8 remote_rp_version; /* version# of partition's rsvd pg */ 515 u8 remote_rp_version; /* version# of partition's rsvd pg */
475 unsigned long remote_rp_stamp; /* time when rsvd pg was initialized */ 516 unsigned long remote_rp_stamp; /* time when rsvd pg was initialized */
476 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 517 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
477 u64 remote_vars_pa; /* phys addr of partition's vars */
478 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
479 u64 last_heartbeat; /* HB at last read */ 518 u64 last_heartbeat; /* HB at last read */
480 u64 remote_amos_page_pa; /* phys addr of partition's amos page */
481 int remote_act_nasid; /* active part's act/deact nasid */
482 int remote_act_phys_cpuid; /* active part's act/deact phys cpuid */
483 u32 activate_IRQ_rcvd; /* IRQs since activation */ 519 u32 activate_IRQ_rcvd; /* IRQs since activation */
484 spinlock_t act_lock; /* protect updating of act_state */ 520 spinlock_t act_lock; /* protect updating of act_state */
485 u8 act_state; /* from XPC HB viewpoint */ 521 u8 act_state; /* from XPC HB viewpoint */
486 u8 remote_vars_version; /* version# of partition's vars */
487 enum xp_retval reason; /* reason partition is deactivating */ 522 enum xp_retval reason; /* reason partition is deactivating */
488 int reason_line; /* line# deactivation initiated from */ 523 int reason_line; /* line# deactivation initiated from */
489 int reactivate_nasid; /* nasid in partition to reactivate */
490 524
491 unsigned long disengage_request_timeout; /* timeout in jiffies */ 525 unsigned long disengage_timeout; /* timeout in jiffies */
492 struct timer_list disengage_request_timer; 526 struct timer_list disengage_timer;
493 527
494 /* XPC infrastructure referencing and teardown control */ 528 /* XPC infrastructure referencing and teardown control */
495 529
@@ -502,14 +536,6 @@ struct xpc_partition {
502 atomic_t nchannels_engaged; /* #of channels engaged with remote part */ 536 atomic_t nchannels_engaged; /* #of channels engaged with remote part */
503 struct xpc_channel *channels; /* array of channel structures */ 537 struct xpc_channel *channels; /* array of channel structures */
504 538
505 void *local_GPs_base; /* base address of kmalloc'd space */
506 struct xpc_gp *local_GPs; /* local Get/Put values */
507 void *remote_GPs_base; /* base address of kmalloc'd space */
508 struct xpc_gp *remote_GPs; /* copy of remote partition's local */
509 /* Get/Put values */
510 u64 remote_GPs_pa; /* phys address of remote partition's local */
511 /* Get/Put values */
512
513 /* fields used to pass args when opening or closing a channel */ 539 /* fields used to pass args when opening or closing a channel */
514 540
515 void *local_openclose_args_base; /* base address of kmalloc'd space */ 541 void *local_openclose_args_base; /* base address of kmalloc'd space */
@@ -517,19 +543,10 @@ struct xpc_partition {
517 void *remote_openclose_args_base; /* base address of kmalloc'd space */ 543 void *remote_openclose_args_base; /* base address of kmalloc'd space */
518 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ 544 struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
519 /* args */ 545 /* args */
520 u64 remote_openclose_args_pa; /* phys addr of remote's args */
521 546
522 /* IPI sending, receiving and handling related fields */ 547 /* IPI sending, receiving and handling related fields */
523 548
524 int remote_IPI_nasid; /* nasid of where to send IPIs */
525 int remote_IPI_phys_cpuid; /* phys CPU ID of where to send IPIs */
526 AMO_t *remote_IPI_amo_va; /* address of remote IPI AMO_t structure */
527
528 AMO_t *local_IPI_amo_va; /* address of IPI AMO_t structure */
529 u64 local_IPI_amo; /* IPI amo flags yet to be handled */ 549 u64 local_IPI_amo; /* IPI amo flags yet to be handled */
530 char IPI_owner[8]; /* IPI owner's name */
531 struct timer_list dropped_IPI_timer; /* dropped IPI timer */
532
533 spinlock_t IPI_lock; /* IPI handler lock */ 550 spinlock_t IPI_lock; /* IPI handler lock */
534 551
535 /* channel manager related fields */ 552 /* channel manager related fields */
@@ -537,6 +554,11 @@ struct xpc_partition {
537 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */ 554 atomic_t channel_mgr_requests; /* #of requests to activate chan mgr */
538 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */ 555 wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
539 556
557 union {
558 struct xpc_partition_sn2 sn2;
559 struct xpc_partition_uv uv;
560 } sn;
561
540} ____cacheline_aligned; 562} ____cacheline_aligned;
541 563
542/* struct xpc_partition act_state values (for XPC HB) */ 564/* struct xpc_partition act_state values (for XPC HB) */
@@ -565,10 +587,10 @@ struct xpc_partition {
565#define XPC_P_DROPPED_IPI_WAIT_INTERVAL (0.25 * HZ) 587#define XPC_P_DROPPED_IPI_WAIT_INTERVAL (0.25 * HZ)
566 588
567/* number of seconds to wait for other partitions to disengage */ 589/* number of seconds to wait for other partitions to disengage */
568#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90 590#define XPC_DISENGAGE_DEFAULT_TIMELIMIT 90
569 591
570/* interval in seconds to print 'waiting disengagement' messages */ 592/* interval in seconds to print 'waiting deactivation' messages */
571#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10 593#define XPC_DEACTIVATE_PRINTMSG_INTERVAL 10
572 594
573#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0])) 595#define XPC_PARTID(_p) ((short)((_p) - &xpc_partitions[0]))
574 596
@@ -578,13 +600,11 @@ extern struct xpc_registration xpc_registrations[];
578/* found in xpc_main.c */ 600/* found in xpc_main.c */
579extern struct device *xpc_part; 601extern struct device *xpc_part;
580extern struct device *xpc_chan; 602extern struct device *xpc_chan;
581extern int xpc_disengage_request_timelimit; 603extern int xpc_disengage_timelimit;
582extern int xpc_disengage_request_timedout; 604extern int xpc_disengage_timedout;
583extern atomic_t xpc_activate_IRQ_rcvd; 605extern atomic_t xpc_activate_IRQ_rcvd;
584extern wait_queue_head_t xpc_activate_IRQ_wq; 606extern wait_queue_head_t xpc_activate_IRQ_wq;
585extern void *xpc_heartbeating_to_mask; 607extern void *xpc_heartbeating_to_mask;
586extern irqreturn_t xpc_notify_IRQ_handler(int, void *);
587extern void xpc_dropped_IPI_check(struct xpc_partition *);
588extern void xpc_activate_partition(struct xpc_partition *); 608extern void xpc_activate_partition(struct xpc_partition *);
589extern void xpc_activate_kthreads(struct xpc_channel *, int); 609extern void xpc_activate_kthreads(struct xpc_channel *, int);
590extern void xpc_create_kthreads(struct xpc_channel *, int, int); 610extern void xpc_create_kthreads(struct xpc_channel *, int, int);
@@ -598,31 +618,34 @@ extern void (*xpc_online_heartbeat) (void);
598extern void (*xpc_check_remote_hb) (void); 618extern void (*xpc_check_remote_hb) (void);
599extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); 619extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *);
600extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *); 620extern u64 (*xpc_get_IPI_flags) (struct xpc_partition *);
621extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *);
622extern void (*xpc_process_msg_IPI) (struct xpc_partition *, int);
623extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *);
601extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); 624extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *);
602extern void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *, u64, 625extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, u64,
603 int); 626 int);
627extern void (*xpc_request_partition_reactivation) (struct xpc_partition *);
628extern void (*xpc_request_partition_deactivation) (struct xpc_partition *);
629extern void (*xpc_cancel_partition_deactivation_request) (
630 struct xpc_partition *);
604extern void (*xpc_process_activate_IRQ_rcvd) (int); 631extern void (*xpc_process_activate_IRQ_rcvd) (int);
605extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *); 632extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *);
606extern void (*xpc_teardown_infrastructure) (struct xpc_partition *); 633extern void (*xpc_teardown_infrastructure) (struct xpc_partition *);
607extern void (*xpc_mark_partition_engaged) (struct xpc_partition *); 634
608extern void (*xpc_mark_partition_disengaged) (struct xpc_partition *); 635extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *);
609extern void (*xpc_request_partition_disengage) (struct xpc_partition *); 636extern int (*xpc_partition_engaged) (short);
610extern void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *); 637extern int (*xpc_any_partition_engaged) (void);
611extern u64 (*xpc_partition_engaged) (u64); 638extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *);
612extern u64 (*xpc_partition_disengage_requested) (u64);; 639extern void (*xpc_assume_partition_disengaged) (short);
613extern void (*xpc_clear_partition_engaged) (u64); 640
614extern void (*xpc_clear_partition_disengage_request) (u64); 641extern void (*xpc_send_channel_closerequest) (struct xpc_channel *,
615 642 unsigned long *);
616extern void (*xpc_IPI_send_local_activate) (int); 643extern void (*xpc_send_channel_closereply) (struct xpc_channel *,
617extern void (*xpc_IPI_send_activated) (struct xpc_partition *); 644 unsigned long *);
618extern void (*xpc_IPI_send_local_reactivate) (int); 645extern void (*xpc_send_channel_openrequest) (struct xpc_channel *,
619extern void (*xpc_IPI_send_disengage) (struct xpc_partition *); 646 unsigned long *);
620 647extern void (*xpc_send_channel_openreply) (struct xpc_channel *,
621extern void (*xpc_IPI_send_closerequest) (struct xpc_channel *, 648 unsigned long *);
622 unsigned long *);
623extern void (*xpc_IPI_send_closereply) (struct xpc_channel *, unsigned long *);
624extern void (*xpc_IPI_send_openrequest) (struct xpc_channel *, unsigned long *);
625extern void (*xpc_IPI_send_openreply) (struct xpc_channel *, unsigned long *);
626 649
627extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16, 650extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16,
628 u8, xpc_notify_func, void *); 651 u8, xpc_notify_func, void *);
@@ -646,8 +669,6 @@ extern char *xpc_remote_copy_buffer;
646extern void *xpc_remote_copy_buffer_base; 669extern void *xpc_remote_copy_buffer_base;
647extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); 670extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **);
648extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void); 671extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void);
649extern void xpc_allow_IPI_ops(void);
650extern void xpc_restrict_IPI_ops(void);
651extern int xpc_identify_activate_IRQ_sender(void); 672extern int xpc_identify_activate_IRQ_sender(void);
652extern int xpc_partition_disengaged(struct xpc_partition *); 673extern int xpc_partition_disengaged(struct xpc_partition *);
653extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); 674extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *);
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 55182c8dd32a..48b16136305e 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -201,7 +201,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
201 201
202 if (!(ch->flags & XPC_C_OPENREPLY)) { 202 if (!(ch->flags & XPC_C_OPENREPLY)) {
203 ch->flags |= XPC_C_OPENREPLY; 203 ch->flags |= XPC_C_OPENREPLY;
204 xpc_IPI_send_openreply(ch, irq_flags); 204 xpc_send_channel_openreply(ch, irq_flags);
205 } 205 }
206 206
207 if (!(ch->flags & XPC_C_ROPENREPLY)) 207 if (!(ch->flags & XPC_C_ROPENREPLY))
@@ -220,52 +220,6 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
220} 220}
221 221
222/* 222/*
223 * Notify those who wanted to be notified upon delivery of their message.
224 */
225static void
226xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put)
227{
228 struct xpc_notify *notify;
229 u8 notify_type;
230 s64 get = ch->w_remote_GP.get - 1;
231
232 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
233
234 notify = &ch->notify_queue[get % ch->local_nentries];
235
236 /*
237 * See if the notify entry indicates it was associated with
238 * a message who's sender wants to be notified. It is possible
239 * that it is, but someone else is doing or has done the
240 * notification.
241 */
242 notify_type = notify->type;
243 if (notify_type == 0 ||
244 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
245 continue;
246 }
247
248 DBUG_ON(notify_type != XPC_N_CALL);
249
250 atomic_dec(&ch->n_to_notify);
251
252 if (notify->func != NULL) {
253 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
254 "msg_number=%ld, partid=%d, channel=%d\n",
255 (void *)notify, get, ch->partid, ch->number);
256
257 notify->func(reason, ch->partid, ch->number,
258 notify->key);
259
260 dev_dbg(xpc_chan, "notify->func() returned, "
261 "notify=0x%p, msg_number=%ld, partid=%d, "
262 "channel=%d\n", (void *)notify, get,
263 ch->partid, ch->number);
264 }
265 }
266}
267
268/*
269 * Free up message queues and other stuff that were allocated for the specified 223 * Free up message queues and other stuff that were allocated for the specified
270 * channel. 224 * channel.
271 * 225 *
@@ -275,6 +229,8 @@ xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put)
275static void 229static void
276xpc_free_msgqueues(struct xpc_channel *ch) 230xpc_free_msgqueues(struct xpc_channel *ch)
277{ 231{
232 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
233
278 DBUG_ON(!spin_is_locked(&ch->lock)); 234 DBUG_ON(!spin_is_locked(&ch->lock));
279 DBUG_ON(atomic_read(&ch->n_to_notify) != 0); 235 DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
280 236
@@ -287,15 +243,15 @@ xpc_free_msgqueues(struct xpc_channel *ch)
287 ch->kthreads_assigned_limit = 0; 243 ch->kthreads_assigned_limit = 0;
288 ch->kthreads_idle_limit = 0; 244 ch->kthreads_idle_limit = 0;
289 245
290 ch->local_GP->get = 0; 246 ch_sn2->local_GP->get = 0;
291 ch->local_GP->put = 0; 247 ch_sn2->local_GP->put = 0;
292 ch->remote_GP.get = 0; 248 ch_sn2->remote_GP.get = 0;
293 ch->remote_GP.put = 0; 249 ch_sn2->remote_GP.put = 0;
294 ch->w_local_GP.get = 0; 250 ch_sn2->w_local_GP.get = 0;
295 ch->w_local_GP.put = 0; 251 ch_sn2->w_local_GP.put = 0;
296 ch->w_remote_GP.get = 0; 252 ch_sn2->w_remote_GP.get = 0;
297 ch->w_remote_GP.put = 0; 253 ch_sn2->w_remote_GP.put = 0;
298 ch->next_msg_to_pull = 0; 254 ch_sn2->next_msg_to_pull = 0;
299 255
300 if (ch->flags & XPC_C_SETUP) { 256 if (ch->flags & XPC_C_SETUP) {
301 ch->flags &= ~XPC_C_SETUP; 257 ch->flags &= ~XPC_C_SETUP;
@@ -339,7 +295,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
339 295
340 if (part->act_state == XPC_P_DEACTIVATING) { 296 if (part->act_state == XPC_P_DEACTIVATING) {
341 /* can't proceed until the other side disengages from us */ 297 /* can't proceed until the other side disengages from us */
342 if (xpc_partition_engaged(1UL << ch->partid)) 298 if (xpc_partition_engaged(ch->partid))
343 return; 299 return;
344 300
345 } else { 301 } else {
@@ -351,7 +307,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
351 307
352 if (!(ch->flags & XPC_C_CLOSEREPLY)) { 308 if (!(ch->flags & XPC_C_CLOSEREPLY)) {
353 ch->flags |= XPC_C_CLOSEREPLY; 309 ch->flags |= XPC_C_CLOSEREPLY;
354 xpc_IPI_send_closereply(ch, irq_flags); 310 xpc_send_channel_closereply(ch, irq_flags);
355 } 311 }
356 312
357 if (!(ch->flags & XPC_C_RCLOSEREPLY)) 313 if (!(ch->flags & XPC_C_RCLOSEREPLY))
@@ -361,7 +317,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
361 /* wake those waiting for notify completion */ 317 /* wake those waiting for notify completion */
362 if (atomic_read(&ch->n_to_notify) > 0) { 318 if (atomic_read(&ch->n_to_notify) > 0) {
363 /* >>> we do callout while holding ch->lock */ 319 /* >>> we do callout while holding ch->lock */
364 xpc_notify_senders(ch, ch->reason, ch->w_local_GP.put); 320 xpc_notify_senders_of_disconnect(ch);
365 } 321 }
366 322
367 /* both sides are disconnected now */ 323 /* both sides are disconnected now */
@@ -734,7 +690,7 @@ xpc_connect_channel(struct xpc_channel *ch)
734 /* initiate the connection */ 690 /* initiate the connection */
735 691
736 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); 692 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
737 xpc_IPI_send_openrequest(ch, &irq_flags); 693 xpc_send_channel_openrequest(ch, &irq_flags);
738 694
739 xpc_process_connect(ch, &irq_flags); 695 xpc_process_connect(ch, &irq_flags);
740 696
@@ -743,142 +699,6 @@ xpc_connect_channel(struct xpc_channel *ch)
743 return xpSuccess; 699 return xpSuccess;
744} 700}
745 701
746/*
747 * Clear some of the msg flags in the local message queue.
748 */
749static inline void
750xpc_clear_local_msgqueue_flags(struct xpc_channel *ch)
751{
752 struct xpc_msg *msg;
753 s64 get;
754
755 get = ch->w_remote_GP.get;
756 do {
757 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
758 (get % ch->local_nentries) *
759 ch->msg_size);
760 msg->flags = 0;
761 } while (++get < ch->remote_GP.get);
762}
763
764/*
765 * Clear some of the msg flags in the remote message queue.
766 */
767static inline void
768xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch)
769{
770 struct xpc_msg *msg;
771 s64 put;
772
773 put = ch->w_remote_GP.put;
774 do {
775 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
776 (put % ch->remote_nentries) *
777 ch->msg_size);
778 msg->flags = 0;
779 } while (++put < ch->remote_GP.put);
780}
781
782static void
783xpc_process_msg_IPI(struct xpc_partition *part, int ch_number)
784{
785 struct xpc_channel *ch = &part->channels[ch_number];
786 int nmsgs_sent;
787
788 ch->remote_GP = part->remote_GPs[ch_number];
789
790 /* See what, if anything, has changed for each connected channel */
791
792 xpc_msgqueue_ref(ch);
793
794 if (ch->w_remote_GP.get == ch->remote_GP.get &&
795 ch->w_remote_GP.put == ch->remote_GP.put) {
796 /* nothing changed since GPs were last pulled */
797 xpc_msgqueue_deref(ch);
798 return;
799 }
800
801 if (!(ch->flags & XPC_C_CONNECTED)) {
802 xpc_msgqueue_deref(ch);
803 return;
804 }
805
806 /*
807 * First check to see if messages recently sent by us have been
808 * received by the other side. (The remote GET value will have
809 * changed since we last looked at it.)
810 */
811
812 if (ch->w_remote_GP.get != ch->remote_GP.get) {
813
814 /*
815 * We need to notify any senders that want to be notified
816 * that their sent messages have been received by their
817 * intended recipients. We need to do this before updating
818 * w_remote_GP.get so that we don't allocate the same message
819 * queue entries prematurely (see xpc_allocate_msg()).
820 */
821 if (atomic_read(&ch->n_to_notify) > 0) {
822 /*
823 * Notify senders that messages sent have been
824 * received and delivered by the other side.
825 */
826 xpc_notify_senders(ch, xpMsgDelivered,
827 ch->remote_GP.get);
828 }
829
830 /*
831 * Clear msg->flags in previously sent messages, so that
832 * they're ready for xpc_allocate_msg().
833 */
834 xpc_clear_local_msgqueue_flags(ch);
835
836 ch->w_remote_GP.get = ch->remote_GP.get;
837
838 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
839 "channel=%d\n", ch->w_remote_GP.get, ch->partid,
840 ch->number);
841
842 /*
843 * If anyone was waiting for message queue entries to become
844 * available, wake them up.
845 */
846 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
847 wake_up(&ch->msg_allocate_wq);
848 }
849
850 /*
851 * Now check for newly sent messages by the other side. (The remote
852 * PUT value will have changed since we last looked at it.)
853 */
854
855 if (ch->w_remote_GP.put != ch->remote_GP.put) {
856 /*
857 * Clear msg->flags in previously received messages, so that
858 * they're ready for xpc_get_deliverable_msg().
859 */
860 xpc_clear_remote_msgqueue_flags(ch);
861
862 ch->w_remote_GP.put = ch->remote_GP.put;
863
864 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
865 "channel=%d\n", ch->w_remote_GP.put, ch->partid,
866 ch->number);
867
868 nmsgs_sent = ch->w_remote_GP.put - ch->w_local_GP.get;
869 if (nmsgs_sent > 0) {
870 dev_dbg(xpc_chan, "msgs waiting to be copied and "
871 "delivered=%d, partid=%d, channel=%d\n",
872 nmsgs_sent, ch->partid, ch->number);
873
874 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
875 xpc_activate_kthreads(ch, nmsgs_sent);
876 }
877 }
878
879 xpc_msgqueue_deref(ch);
880}
881
882void 702void
883xpc_process_channel_activity(struct xpc_partition *part) 703xpc_process_channel_activity(struct xpc_partition *part)
884{ 704{
@@ -1117,7 +937,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
1117 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | 937 XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
1118 XPC_C_CONNECTING | XPC_C_CONNECTED); 938 XPC_C_CONNECTING | XPC_C_CONNECTED);
1119 939
1120 xpc_IPI_send_closerequest(ch, irq_flags); 940 xpc_send_channel_closerequest(ch, irq_flags);
1121 941
1122 if (channel_was_connected) 942 if (channel_was_connected)
1123 ch->flags |= XPC_C_WASCONNECTED; 943 ch->flags |= XPC_C_WASCONNECTED;
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 8780d5d00f62..563aaf4a2ff6 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -26,7 +26,7 @@
26 * Caveats: 26 * Caveats:
27 * 27 *
28 * . We currently have no way to determine which nasid an IPI came 28 * . We currently have no way to determine which nasid an IPI came
29 * from. Thus, xpc_IPI_send() does a remote AMO write followed by 29 * from. Thus, >>> xpc_IPI_send() does a remote AMO write followed by
30 * an IPI. The AMO indicates where data is to be pulled from, so 30 * an IPI. The AMO indicates where data is to be pulled from, so
31 * after the IPI arrives, the remote partition checks the AMO word. 31 * after the IPI arrives, the remote partition checks the AMO word.
32 * The IPI can actually arrive before the AMO however, so other code 32 * The IPI can actually arrive before the AMO however, so other code
@@ -89,9 +89,9 @@ static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
89static int xpc_hb_check_min_interval = 10; 89static int xpc_hb_check_min_interval = 10;
90static int xpc_hb_check_max_interval = 120; 90static int xpc_hb_check_max_interval = 120;
91 91
92int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT; 92int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
93static int xpc_disengage_request_min_timelimit; /* = 0 */ 93static int xpc_disengage_min_timelimit; /* = 0 */
94static int xpc_disengage_request_max_timelimit = 120; 94static int xpc_disengage_max_timelimit = 120;
95 95
96static ctl_table xpc_sys_xpc_hb_dir[] = { 96static ctl_table xpc_sys_xpc_hb_dir[] = {
97 { 97 {
@@ -124,14 +124,14 @@ static ctl_table xpc_sys_xpc_dir[] = {
124 .child = xpc_sys_xpc_hb_dir}, 124 .child = xpc_sys_xpc_hb_dir},
125 { 125 {
126 .ctl_name = CTL_UNNUMBERED, 126 .ctl_name = CTL_UNNUMBERED,
127 .procname = "disengage_request_timelimit", 127 .procname = "disengage_timelimit",
128 .data = &xpc_disengage_request_timelimit, 128 .data = &xpc_disengage_timelimit,
129 .maxlen = sizeof(int), 129 .maxlen = sizeof(int),
130 .mode = 0644, 130 .mode = 0644,
131 .proc_handler = &proc_dointvec_minmax, 131 .proc_handler = &proc_dointvec_minmax,
132 .strategy = &sysctl_intvec, 132 .strategy = &sysctl_intvec,
133 .extra1 = &xpc_disengage_request_min_timelimit, 133 .extra1 = &xpc_disengage_min_timelimit,
134 .extra2 = &xpc_disengage_request_max_timelimit}, 134 .extra2 = &xpc_disengage_max_timelimit},
135 {} 135 {}
136}; 136};
137static ctl_table xpc_sys_dir[] = { 137static ctl_table xpc_sys_dir[] = {
@@ -144,8 +144,8 @@ static ctl_table xpc_sys_dir[] = {
144}; 144};
145static struct ctl_table_header *xpc_sysctl; 145static struct ctl_table_header *xpc_sysctl;
146 146
147/* non-zero if any remote partition disengage request was timed out */ 147/* non-zero if any remote partition disengage was timed out */
148int xpc_disengage_request_timedout; 148int xpc_disengage_timedout;
149 149
150/* #of activate IRQs received */ 150/* #of activate IRQs received */
151atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0); 151atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0);
@@ -184,38 +184,36 @@ void (*xpc_online_heartbeat) (void);
184void (*xpc_check_remote_hb) (void); 184void (*xpc_check_remote_hb) (void);
185 185
186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); 186enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
187void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
187u64 (*xpc_get_IPI_flags) (struct xpc_partition *part); 188u64 (*xpc_get_IPI_flags) (struct xpc_partition *part);
189void (*xpc_process_msg_IPI) (struct xpc_partition *part, int ch_number);
190int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch);
188struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); 191struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
189 192
190void (*xpc_initiate_partition_activation) (struct xpc_rsvd_page *remote_rp, 193void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
191 u64 remote_rp_pa, int nasid); 194 u64 remote_rp_pa, int nasid);
195void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
196void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
197void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
192 198
193void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected); 199void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected);
194enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part); 200enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
195void (*xpc_teardown_infrastructure) (struct xpc_partition *part); 201void (*xpc_teardown_infrastructure) (struct xpc_partition *part);
196 202
197void (*xpc_mark_partition_engaged) (struct xpc_partition *part); 203void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
198void (*xpc_mark_partition_disengaged) (struct xpc_partition *part); 204int (*xpc_partition_engaged) (short partid);
199void (*xpc_request_partition_disengage) (struct xpc_partition *part); 205int (*xpc_any_partition_engaged) (void);
200void (*xpc_cancel_partition_disengage_request) (struct xpc_partition *part); 206void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
201u64 (*xpc_partition_engaged) (u64 partid_mask); 207void (*xpc_assume_partition_disengaged) (short partid);
202u64 (*xpc_partition_disengage_requested) (u64 partid_mask); 208
203void (*xpc_clear_partition_engaged) (u64 partid_mask); 209void (*xpc_send_channel_closerequest) (struct xpc_channel *ch,
204void (*xpc_clear_partition_disengage_request) (u64 partid_mask); 210 unsigned long *irq_flags);
205 211void (*xpc_send_channel_closereply) (struct xpc_channel *ch,
206void (*xpc_IPI_send_local_activate) (int from_nasid); 212 unsigned long *irq_flags);
207void (*xpc_IPI_send_activated) (struct xpc_partition *part); 213void (*xpc_send_channel_openrequest) (struct xpc_channel *ch,
208void (*xpc_IPI_send_local_reactivate) (int from_nasid); 214 unsigned long *irq_flags);
209void (*xpc_IPI_send_disengage) (struct xpc_partition *part); 215void (*xpc_send_channel_openreply) (struct xpc_channel *ch,
210 216 unsigned long *irq_flags);
211void (*xpc_IPI_send_closerequest) (struct xpc_channel *ch,
212 unsigned long *irq_flags);
213void (*xpc_IPI_send_closereply) (struct xpc_channel *ch,
214 unsigned long *irq_flags);
215void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch,
216 unsigned long *irq_flags);
217void (*xpc_IPI_send_openreply) (struct xpc_channel *ch,
218 unsigned long *irq_flags);
219 217
220enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags, 218enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
221 void *payload, u16 payload_size, u8 notify_type, 219 void *payload, u16 payload_size, u8 notify_type,
@@ -223,19 +221,19 @@ enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
223void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg); 221void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
224 222
225/* 223/*
226 * Timer function to enforce the timelimit on the partition disengage request. 224 * Timer function to enforce the timelimit on the partition disengage.
227 */ 225 */
228static void 226static void
229xpc_timeout_partition_disengage_request(unsigned long data) 227xpc_timeout_partition_disengage(unsigned long data)
230{ 228{
231 struct xpc_partition *part = (struct xpc_partition *)data; 229 struct xpc_partition *part = (struct xpc_partition *)data;
232 230
233 DBUG_ON(time_is_after_jiffies(part->disengage_request_timeout)); 231 DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
234 232
235 (void)xpc_partition_disengaged(part); 233 (void)xpc_partition_disengaged(part);
236 234
237 DBUG_ON(part->disengage_request_timeout != 0); 235 DBUG_ON(part->disengage_timeout != 0);
238 DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0); 236 DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
239} 237}
240 238
241/* 239/*
@@ -464,7 +462,7 @@ xpc_activating(void *__partid)
464 462
465 if (part->reason == xpReactivating) { 463 if (part->reason == xpReactivating) {
466 /* interrupting ourselves results in activating partition */ 464 /* interrupting ourselves results in activating partition */
467 xpc_IPI_send_local_reactivate(part->reactivate_nasid); 465 xpc_request_partition_reactivation(part);
468 } 466 }
469 467
470 return 0; 468 return 0;
@@ -496,82 +494,6 @@ xpc_activate_partition(struct xpc_partition *part)
496 } 494 }
497} 495}
498 496
499/*
500 * Check to see if there is any channel activity to/from the specified
501 * partition.
502 */
503static void
504xpc_check_for_channel_activity(struct xpc_partition *part)
505{
506 u64 IPI_amo;
507 unsigned long irq_flags;
508
509/* this needs to be uncommented, but I'm thinking this function and the */
510/* ones that call it need to be moved into xpc_sn2.c... */
511 IPI_amo = 0; /* = xpc_IPI_receive(part->local_IPI_amo_va); */
512 if (IPI_amo == 0)
513 return;
514
515 spin_lock_irqsave(&part->IPI_lock, irq_flags);
516 part->local_IPI_amo |= IPI_amo;
517 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
518
519 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
520 XPC_PARTID(part), IPI_amo);
521
522 xpc_wakeup_channel_mgr(part);
523}
524
525/*
526 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
527 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
528 * than one partition, we use an AMO_t structure per partition to indicate
529 * whether a partition has sent an IPI or not. If it has, then wake up the
530 * associated kthread to handle it.
531 *
532 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
533 * running on other partitions.
534 *
535 * Noteworthy Arguments:
536 *
537 * irq - Interrupt ReQuest number. NOT USED.
538 *
539 * dev_id - partid of IPI's potential sender.
540 */
541irqreturn_t
542xpc_notify_IRQ_handler(int irq, void *dev_id)
543{
544 short partid = (short)(u64)dev_id;
545 struct xpc_partition *part = &xpc_partitions[partid];
546
547 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
548
549 if (xpc_part_ref(part)) {
550 xpc_check_for_channel_activity(part);
551
552 xpc_part_deref(part);
553 }
554 return IRQ_HANDLED;
555}
556
557/*
558 * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
559 * because the write to their associated IPI amo completed after the IRQ/IPI
560 * was received.
561 */
562void
563xpc_dropped_IPI_check(struct xpc_partition *part)
564{
565 if (xpc_part_ref(part)) {
566 xpc_check_for_channel_activity(part);
567
568 part->dropped_IPI_timer.expires = jiffies +
569 XPC_P_DROPPED_IPI_WAIT_INTERVAL;
570 add_timer(&part->dropped_IPI_timer);
571 xpc_part_deref(part);
572 }
573}
574
575void 497void
576xpc_activate_kthreads(struct xpc_channel *ch, int needed) 498xpc_activate_kthreads(struct xpc_channel *ch, int needed)
577{ 499{
@@ -616,7 +538,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
616 do { 538 do {
617 /* deliver messages to their intended recipients */ 539 /* deliver messages to their intended recipients */
618 540
619 while (ch->w_local_GP.get < ch->w_remote_GP.put && 541 while (xpc_n_of_deliverable_msgs(ch) > 0 &&
620 !(ch->flags & XPC_C_DISCONNECTING)) { 542 !(ch->flags & XPC_C_DISCONNECTING)) {
621 xpc_deliver_msg(ch); 543 xpc_deliver_msg(ch);
622 } 544 }
@@ -632,7 +554,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
632 "wait_event_interruptible_exclusive()\n"); 554 "wait_event_interruptible_exclusive()\n");
633 555
634 (void)wait_event_interruptible_exclusive(ch->idle_wq, 556 (void)wait_event_interruptible_exclusive(ch->idle_wq,
635 (ch->w_local_GP.get < ch->w_remote_GP.put || 557 (xpc_n_of_deliverable_msgs(ch) > 0 ||
636 (ch->flags & XPC_C_DISCONNECTING))); 558 (ch->flags & XPC_C_DISCONNECTING)));
637 559
638 atomic_dec(&ch->kthreads_idle); 560 atomic_dec(&ch->kthreads_idle);
@@ -677,7 +599,7 @@ xpc_kthread_start(void *args)
677 * additional kthreads to help deliver them. We only 599 * additional kthreads to help deliver them. We only
678 * need one less than total #of messages to deliver. 600 * need one less than total #of messages to deliver.
679 */ 601 */
680 n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1; 602 n_needed = xpc_n_of_deliverable_msgs(ch) - 1;
681 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) 603 if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
682 xpc_activate_kthreads(ch, n_needed); 604 xpc_activate_kthreads(ch, n_needed);
683 605
@@ -703,11 +625,9 @@ xpc_kthread_start(void *args)
703 } 625 }
704 spin_unlock_irqrestore(&ch->lock, irq_flags); 626 spin_unlock_irqrestore(&ch->lock, irq_flags);
705 627
706 if (atomic_dec_return(&ch->kthreads_assigned) == 0) { 628 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
707 if (atomic_dec_return(&part->nchannels_engaged) == 0) { 629 atomic_dec_return(&part->nchannels_engaged) == 0) {
708 xpc_mark_partition_disengaged(part); 630 xpc_indicate_partition_disengaged(part);
709 xpc_IPI_send_disengage(part);
710 }
711 } 631 }
712 632
713 xpc_msgqueue_deref(ch); 633 xpc_msgqueue_deref(ch);
@@ -758,9 +678,9 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
758 } else if (ch->flags & XPC_C_DISCONNECTING) { 678 } else if (ch->flags & XPC_C_DISCONNECTING) {
759 break; 679 break;
760 680
761 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) { 681 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
762 if (atomic_inc_return(&part->nchannels_engaged) == 1) 682 atomic_inc_return(&part->nchannels_engaged) == 1) {
763 xpc_mark_partition_engaged(part); 683 xpc_indicate_partition_engaged(part);
764 } 684 }
765 (void)xpc_part_ref(part); 685 (void)xpc_part_ref(part);
766 xpc_msgqueue_ref(ch); 686 xpc_msgqueue_ref(ch);
@@ -782,8 +702,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
782 702
783 if (atomic_dec_return(&ch->kthreads_assigned) == 0 && 703 if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
784 atomic_dec_return(&part->nchannels_engaged) == 0) { 704 atomic_dec_return(&part->nchannels_engaged) == 0) {
785 xpc_mark_partition_disengaged(part); 705 xpc_indicate_partition_disengaged(part);
786 xpc_IPI_send_disengage(part);
787 } 706 }
788 xpc_msgqueue_deref(ch); 707 xpc_msgqueue_deref(ch);
789 xpc_part_deref(part); 708 xpc_part_deref(part);
@@ -862,7 +781,7 @@ xpc_do_exit(enum xp_retval reason)
862 short partid; 781 short partid;
863 int active_part_count, printed_waiting_msg = 0; 782 int active_part_count, printed_waiting_msg = 0;
864 struct xpc_partition *part; 783 struct xpc_partition *part;
865 unsigned long printmsg_time, disengage_request_timeout = 0; 784 unsigned long printmsg_time, disengage_timeout = 0;
866 785
867 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */ 786 /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
868 DBUG_ON(xpc_exiting == 1); 787 DBUG_ON(xpc_exiting == 1);
@@ -886,8 +805,8 @@ xpc_do_exit(enum xp_retval reason)
886 805
887 /* wait for all partitions to become inactive */ 806 /* wait for all partitions to become inactive */
888 807
889 printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 808 printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
890 xpc_disengage_request_timedout = 0; 809 xpc_disengage_timedout = 0;
891 810
892 do { 811 do {
893 active_part_count = 0; 812 active_part_count = 0;
@@ -904,36 +823,32 @@ xpc_do_exit(enum xp_retval reason)
904 823
905 XPC_DEACTIVATE_PARTITION(part, reason); 824 XPC_DEACTIVATE_PARTITION(part, reason);
906 825
907 if (part->disengage_request_timeout > 826 if (part->disengage_timeout > disengage_timeout)
908 disengage_request_timeout) { 827 disengage_timeout = part->disengage_timeout;
909 disengage_request_timeout =
910 part->disengage_request_timeout;
911 }
912 } 828 }
913 829
914 if (xpc_partition_engaged(-1UL)) { 830 if (xpc_any_partition_engaged()) {
915 if (time_is_before_jiffies(printmsg_time)) { 831 if (time_is_before_jiffies(printmsg_time)) {
916 dev_info(xpc_part, "waiting for remote " 832 dev_info(xpc_part, "waiting for remote "
917 "partitions to disengage, timeout in " 833 "partitions to deactivate, timeout in "
918 "%ld seconds\n", 834 "%ld seconds\n", (disengage_timeout -
919 (disengage_request_timeout - jiffies) 835 jiffies) / HZ);
920 / HZ);
921 printmsg_time = jiffies + 836 printmsg_time = jiffies +
922 (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ); 837 (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
923 printed_waiting_msg = 1; 838 printed_waiting_msg = 1;
924 } 839 }
925 840
926 } else if (active_part_count > 0) { 841 } else if (active_part_count > 0) {
927 if (printed_waiting_msg) { 842 if (printed_waiting_msg) {
928 dev_info(xpc_part, "waiting for local partition" 843 dev_info(xpc_part, "waiting for local partition"
929 " to disengage\n"); 844 " to deactivate\n");
930 printed_waiting_msg = 0; 845 printed_waiting_msg = 0;
931 } 846 }
932 847
933 } else { 848 } else {
934 if (!xpc_disengage_request_timedout) { 849 if (!xpc_disengage_timedout) {
935 dev_info(xpc_part, "all partitions have " 850 dev_info(xpc_part, "all partitions have "
936 "disengaged\n"); 851 "deactivated\n");
937 } 852 }
938 break; 853 break;
939 } 854 }
@@ -943,7 +858,7 @@ xpc_do_exit(enum xp_retval reason)
943 858
944 } while (1); 859 } while (1);
945 860
946 DBUG_ON(xpc_partition_engaged(-1UL)); 861 DBUG_ON(xpc_any_partition_engaged());
947 DBUG_ON(xpc_any_hbs_allowed() != 0); 862 DBUG_ON(xpc_any_hbs_allowed() != 0);
948 863
949 /* indicate to others that our reserved page is uninitialized */ 864 /* indicate to others that our reserved page is uninitialized */
@@ -996,15 +911,16 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
996} 911}
997 912
998/* 913/*
999 * Notify other partitions to disengage from all references to our memory. 914 * Notify other partitions to deactivate from us by first disengaging from all
915 * references to our memory.
1000 */ 916 */
1001static void 917static void
1002xpc_die_disengage(void) 918xpc_die_deactivate(void)
1003{ 919{
1004 struct xpc_partition *part; 920 struct xpc_partition *part;
1005 short partid; 921 short partid;
1006 unsigned long engaged; 922 int any_engaged;
1007 long time, printmsg_time, disengage_request_timeout; 923 long time, printmsg_time, disengage_timeout;
1008 924
1009 /* keep xpc_hb_checker thread from doing anything (just in case) */ 925 /* keep xpc_hb_checker thread from doing anything (just in case) */
1010 xpc_exiting = 1; 926 xpc_exiting = 1;
@@ -1014,43 +930,37 @@ xpc_die_disengage(void)
1014 for (partid = 0; partid < xp_max_npartitions; partid++) { 930 for (partid = 0; partid < xp_max_npartitions; partid++) {
1015 part = &xpc_partitions[partid]; 931 part = &xpc_partitions[partid];
1016 932
1017 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> 933 if (xpc_partition_engaged(partid) ||
1018 remote_vars_version)) {
1019
1020 /* just in case it was left set by an earlier XPC */
1021 xpc_clear_partition_engaged(1UL << partid);
1022 continue;
1023 }
1024
1025 if (xpc_partition_engaged(1UL << partid) ||
1026 part->act_state != XPC_P_INACTIVE) { 934 part->act_state != XPC_P_INACTIVE) {
1027 xpc_request_partition_disengage(part); 935 xpc_request_partition_deactivation(part);
1028 xpc_mark_partition_disengaged(part); 936 xpc_indicate_partition_disengaged(part);
1029 xpc_IPI_send_disengage(part);
1030 } 937 }
1031 } 938 }
1032 939
1033 time = rtc_time(); 940 time = rtc_time();
1034 printmsg_time = time + 941 printmsg_time = time +
1035 (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); 942 (XPC_DEACTIVATE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1036 disengage_request_timeout = time + 943 disengage_timeout = time +
1037 (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second); 944 (xpc_disengage_timelimit * sn_rtc_cycles_per_second);
1038 945
1039 /* wait for all other partitions to disengage from us */ 946 /*
947 * Though we requested that all other partitions deactivate from us,
948 * we only wait until they've all disengaged.
949 */
1040 950
1041 while (1) { 951 while (1) {
1042 engaged = xpc_partition_engaged(-1UL); 952 any_engaged = xpc_any_partition_engaged();
1043 if (!engaged) { 953 if (!any_engaged) {
1044 dev_info(xpc_part, "all partitions have disengaged\n"); 954 dev_info(xpc_part, "all partitions have deactivated\n");
1045 break; 955 break;
1046 } 956 }
1047 957
1048 time = rtc_time(); 958 time = rtc_time();
1049 if (time >= disengage_request_timeout) { 959 if (time >= disengage_timeout) {
1050 for (partid = 0; partid < xp_max_npartitions; 960 for (partid = 0; partid < xp_max_npartitions;
1051 partid++) { 961 partid++) {
1052 if (engaged & (1UL << partid)) { 962 if (xpc_partition_engaged(partid)) {
1053 dev_info(xpc_part, "disengage from " 963 dev_info(xpc_part, "deactivate from "
1054 "remote partition %d timed " 964 "remote partition %d timed "
1055 "out\n", partid); 965 "out\n", partid);
1056 } 966 }
@@ -1060,11 +970,11 @@ xpc_die_disengage(void)
1060 970
1061 if (time >= printmsg_time) { 971 if (time >= printmsg_time) {
1062 dev_info(xpc_part, "waiting for remote partitions to " 972 dev_info(xpc_part, "waiting for remote partitions to "
1063 "disengage, timeout in %ld seconds\n", 973 "deactivate, timeout in %ld seconds\n",
1064 (disengage_request_timeout - time) / 974 (disengage_timeout - time) /
1065 sn_rtc_cycles_per_second); 975 sn_rtc_cycles_per_second);
1066 printmsg_time = time + 976 printmsg_time = time +
1067 (XPC_DISENGAGE_PRINTMSG_INTERVAL * 977 (XPC_DEACTIVATE_PRINTMSG_INTERVAL *
1068 sn_rtc_cycles_per_second); 978 sn_rtc_cycles_per_second);
1069 } 979 }
1070 } 980 }
@@ -1084,7 +994,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1084 switch (event) { 994 switch (event) {
1085 case DIE_MACHINE_RESTART: 995 case DIE_MACHINE_RESTART:
1086 case DIE_MACHINE_HALT: 996 case DIE_MACHINE_HALT:
1087 xpc_die_disengage(); 997 xpc_die_deactivate();
1088 break; 998 break;
1089 999
1090 case DIE_KDEBUG_ENTER: 1000 case DIE_KDEBUG_ENTER:
@@ -1183,10 +1093,10 @@ xpc_init(void)
1183 part->act_state = XPC_P_INACTIVE; 1093 part->act_state = XPC_P_INACTIVE;
1184 XPC_SET_REASON(part, 0, 0); 1094 XPC_SET_REASON(part, 0, 0);
1185 1095
1186 init_timer(&part->disengage_request_timer); 1096 init_timer(&part->disengage_timer);
1187 part->disengage_request_timer.function = 1097 part->disengage_timer.function =
1188 xpc_timeout_partition_disengage_request; 1098 xpc_timeout_partition_disengage;
1189 part->disengage_request_timer.data = (unsigned long)part; 1099 part->disengage_timer.data = (unsigned long)part;
1190 1100
1191 part->setup_state = XPC_P_UNSET; 1101 part->setup_state = XPC_P_UNSET;
1192 init_waitqueue_head(&part->teardown_wq); 1102 init_waitqueue_head(&part->teardown_wq);
@@ -1295,9 +1205,9 @@ module_param(xpc_hb_check_interval, int, 0);
1295MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between " 1205MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1296 "heartbeat checks."); 1206 "heartbeat checks.");
1297 1207
1298module_param(xpc_disengage_request_timelimit, int, 0); 1208module_param(xpc_disengage_timelimit, int, 0);
1299MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait " 1209MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
1300 "for disengage request to complete."); 1210 "for disengage to complete.");
1301 1211
1302module_param(xpc_kdebug_ignore, int, 0); 1212module_param(xpc_kdebug_ignore, int, 0);
1303MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by " 1213MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index bf9b1193bd2a..c769ab8f74ef 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -242,7 +242,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
242 return xpBadVersion; 242 return xpBadVersion;
243 } 243 }
244 244
245 /* check that both local and remote partids are valid for each side */ 245 /* check that both remote and local partids are valid for each side */
246 if (remote_rp->SAL_partid < 0 || 246 if (remote_rp->SAL_partid < 0 ||
247 remote_rp->SAL_partid >= xp_max_npartitions || 247 remote_rp->SAL_partid >= xp_max_npartitions ||
248 remote_rp->max_npartitions <= sn_partition_id) { 248 remote_rp->max_npartitions <= sn_partition_id) {
@@ -256,8 +256,9 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids,
256} 256}
257 257
258/* 258/*
259 * See if the other side has responded to a partition disengage request 259 * See if the other side has responded to a partition deactivate request
260 * from us. 260 * from us. Though we requested the remote partition to deactivate with regard
261 * to us, we really only need to wait for the other side to disengage from us.
261 */ 262 */
262int 263int
263xpc_partition_disengaged(struct xpc_partition *part) 264xpc_partition_disengaged(struct xpc_partition *part)
@@ -265,41 +266,37 @@ xpc_partition_disengaged(struct xpc_partition *part)
265 short partid = XPC_PARTID(part); 266 short partid = XPC_PARTID(part);
266 int disengaged; 267 int disengaged;
267 268
268 disengaged = (xpc_partition_engaged(1UL << partid) == 0); 269 disengaged = !xpc_partition_engaged(partid);
269 if (part->disengage_request_timeout) { 270 if (part->disengage_timeout) {
270 if (!disengaged) { 271 if (!disengaged) {
271 if (time_is_after_jiffies(part-> 272 if (time_is_after_jiffies(part->disengage_timeout)) {
272 disengage_request_timeout)) {
273 /* timelimit hasn't been reached yet */ 273 /* timelimit hasn't been reached yet */
274 return 0; 274 return 0;
275 } 275 }
276 276
277 /* 277 /*
278 * Other side hasn't responded to our disengage 278 * Other side hasn't responded to our deactivate
279 * request in a timely fashion, so assume it's dead. 279 * request in a timely fashion, so assume it's dead.
280 */ 280 */
281 281
282 dev_info(xpc_part, "disengage from remote partition %d " 282 dev_info(xpc_part, "deactivate request to remote "
283 "timed out\n", partid); 283 "partition %d timed out\n", partid);
284 xpc_disengage_request_timedout = 1; 284 xpc_disengage_timedout = 1;
285 xpc_clear_partition_engaged(1UL << partid); 285 xpc_assume_partition_disengaged(partid);
286 disengaged = 1; 286 disengaged = 1;
287 } 287 }
288 part->disengage_request_timeout = 0; 288 part->disengage_timeout = 0;
289 289
290 /* cancel the timer function, provided it's not us */ 290 /* cancel the timer function, provided it's not us */
291 if (!in_interrupt()) { 291 if (!in_interrupt())
292 del_singleshot_timer_sync(&part-> 292 del_singleshot_timer_sync(&part->disengage_timer);
293 disengage_request_timer);
294 }
295 293
296 DBUG_ON(part->act_state != XPC_P_DEACTIVATING && 294 DBUG_ON(part->act_state != XPC_P_DEACTIVATING &&
297 part->act_state != XPC_P_INACTIVE); 295 part->act_state != XPC_P_INACTIVE);
298 if (part->act_state != XPC_P_INACTIVE) 296 if (part->act_state != XPC_P_INACTIVE)
299 xpc_wakeup_channel_mgr(part); 297 xpc_wakeup_channel_mgr(part);
300 298
301 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) 299 xpc_cancel_partition_deactivation_request(part);
302 xpc_cancel_partition_disengage_request(part);
303 } 300 }
304 return disengaged; 301 return disengaged;
305} 302}
@@ -329,7 +326,7 @@ xpc_mark_partition_active(struct xpc_partition *part)
329} 326}
330 327
331/* 328/*
332 * Notify XPC that the partition is down. 329 * Start the process of deactivating the specified partition.
333 */ 330 */
334void 331void
335xpc_deactivate_partition(const int line, struct xpc_partition *part, 332xpc_deactivate_partition(const int line, struct xpc_partition *part,
@@ -344,7 +341,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
344 spin_unlock_irqrestore(&part->act_lock, irq_flags); 341 spin_unlock_irqrestore(&part->act_lock, irq_flags);
345 if (reason == xpReactivating) { 342 if (reason == xpReactivating) {
346 /* we interrupt ourselves to reactivate partition */ 343 /* we interrupt ourselves to reactivate partition */
347 xpc_IPI_send_local_reactivate(part->reactivate_nasid); 344 xpc_request_partition_reactivation(part);
348 } 345 }
349 return; 346 return;
350 } 347 }
@@ -362,17 +359,13 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part,
362 359
363 spin_unlock_irqrestore(&part->act_lock, irq_flags); 360 spin_unlock_irqrestore(&part->act_lock, irq_flags);
364 361
365 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 362 /* ask remote partition to deactivate with regard to us */
366 xpc_request_partition_disengage(part); 363 xpc_request_partition_deactivation(part);
367 xpc_IPI_send_disengage(part);
368 364
369 /* set a timelimit on the disengage request */ 365 /* set a timelimit on the disengage phase of the deactivation request */
370 part->disengage_request_timeout = jiffies + 366 part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ);
371 (xpc_disengage_request_timelimit * HZ); 367 part->disengage_timer.expires = part->disengage_timeout;
372 part->disengage_request_timer.expires = 368 add_timer(&part->disengage_timer);
373 part->disengage_request_timeout;
374 add_timer(&part->disengage_request_timer);
375 }
376 369
377 dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n", 370 dev_dbg(xpc_part, "bringing partition %d down, reason = %d\n",
378 XPC_PARTID(part), reason); 371 XPC_PARTID(part), reason);
@@ -505,8 +498,8 @@ xpc_discovery(void)
505 continue; 498 continue;
506 } 499 }
507 500
508 xpc_initiate_partition_activation(remote_rp, 501 xpc_request_partition_activation(remote_rp,
509 remote_rp_pa, nasid); 502 remote_rp_pa, nasid);
510 } 503 }
511 } 504 }
512 505
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 4659f6cb885e..69d74bd56899 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -196,37 +196,85 @@ xpc_activate_IRQ_send_local_sn2(int from_nasid)
196 wake_up_interruptible(&xpc_activate_IRQ_wq); 196 wake_up_interruptible(&xpc_activate_IRQ_wq);
197} 197}
198 198
199static void 199/*
200xpc_IPI_send_local_activate_sn2(int from_nasid) 200 * IPIs associated with SGI_XPC_NOTIFY IRQ.
201{ 201 */
202 xpc_activate_IRQ_send_local_sn2(from_nasid);
203}
204 202
203/*
204 * Check to see if there is any channel activity to/from the specified
205 * partition.
206 */
205static void 207static void
206xpc_IPI_send_activated_sn2(struct xpc_partition *part) 208xpc_check_for_channel_activity_sn2(struct xpc_partition *part)
207{ 209{
208 xpc_activate_IRQ_send_sn2(part->remote_amos_page_pa, 210 u64 IPI_amo;
209 cnodeid_to_nasid(0), part->remote_act_nasid, 211 unsigned long irq_flags;
210 part->remote_act_phys_cpuid);
211}
212 212
213static void 213 IPI_amo = xpc_IPI_receive_sn2(part->sn.sn2.local_IPI_amo_va);
214xpc_IPI_send_local_reactivate_sn2(int from_nasid) 214 if (IPI_amo == 0)
215{ 215 return;
216 xpc_activate_IRQ_send_local_sn2(from_nasid); 216
217 spin_lock_irqsave(&part->IPI_lock, irq_flags);
218 part->local_IPI_amo |= IPI_amo;
219 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
220
221 dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
222 XPC_PARTID(part), IPI_amo);
223
224 xpc_wakeup_channel_mgr(part);
217} 225}
218 226
219static void 227/*
220xpc_IPI_send_disengage_sn2(struct xpc_partition *part) 228 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
229 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
230 * than one partition, we use an AMO_t structure per partition to indicate
231 * whether a partition has sent an IPI or not. If it has, then wake up the
232 * associated kthread to handle it.
233 *
234 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
235 * running on other partitions.
236 *
237 * Noteworthy Arguments:
238 *
239 * irq - Interrupt ReQuest number. NOT USED.
240 *
241 * dev_id - partid of IPI's potential sender.
242 */
243static irqreturn_t
244xpc_handle_notify_IRQ_sn2(int irq, void *dev_id)
221{ 245{
222 xpc_activate_IRQ_send_sn2(part->remote_amos_page_pa, 246 short partid = (short)(u64)dev_id;
223 cnodeid_to_nasid(0), part->remote_act_nasid, 247 struct xpc_partition *part = &xpc_partitions[partid];
224 part->remote_act_phys_cpuid); 248
249 DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
250
251 if (xpc_part_ref(part)) {
252 xpc_check_for_channel_activity_sn2(part);
253
254 xpc_part_deref(part);
255 }
256 return IRQ_HANDLED;
225} 257}
226 258
227/* 259/*
228 * IPIs associated with SGI_XPC_NOTIFY IRQ. 260 * Check to see if xpc_handle_notify_IRQ_sn2() dropped any IPIs on the floor
261 * because the write to their associated IPI amo completed after the IRQ/IPI
262 * was received.
229 */ 263 */
264static void
265xpc_dropped_notify_IRQ_check_sn2(struct xpc_partition *part)
266{
267 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
268
269 if (xpc_part_ref(part)) {
270 xpc_check_for_channel_activity_sn2(part);
271
272 part_sn2->dropped_notify_IRQ_timer.expires = jiffies +
273 XPC_P_DROPPED_IPI_WAIT_INTERVAL;
274 add_timer(&part_sn2->dropped_notify_IRQ_timer);
275 xpc_part_deref(part);
276 }
277}
230 278
231/* 279/*
232 * Send an IPI to the remote partition that is associated with the 280 * Send an IPI to the remote partition that is associated with the
@@ -237,13 +285,14 @@ xpc_notify_IRQ_send_sn2(struct xpc_channel *ch, u8 ipi_flag,
237 char *ipi_flag_string, unsigned long *irq_flags) 285 char *ipi_flag_string, unsigned long *irq_flags)
238{ 286{
239 struct xpc_partition *part = &xpc_partitions[ch->partid]; 287 struct xpc_partition *part = &xpc_partitions[ch->partid];
288 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
240 enum xp_retval ret; 289 enum xp_retval ret;
241 290
242 if (likely(part->act_state != XPC_P_DEACTIVATING)) { 291 if (likely(part->act_state != XPC_P_DEACTIVATING)) {
243 ret = xpc_IPI_send_sn2(part->remote_IPI_amo_va, 292 ret = xpc_IPI_send_sn2(part_sn2->remote_IPI_amo_va,
244 (u64)ipi_flag << (ch->number * 8), 293 (u64)ipi_flag << (ch->number * 8),
245 part->remote_IPI_nasid, 294 part_sn2->remote_IPI_nasid,
246 part->remote_IPI_phys_cpuid, 295 part_sn2->remote_IPI_phys_cpuid,
247 SGI_XPC_NOTIFY); 296 SGI_XPC_NOTIFY);
248 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", 297 dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
249 ipi_flag_string, ch->partid, ch->number, ret); 298 ipi_flag_string, ch->partid, ch->number, ret);
@@ -263,7 +312,7 @@ xpc_notify_IRQ_send_sn2(struct xpc_channel *ch, u8 ipi_flag,
263/* 312/*
264 * Make it look like the remote partition, which is associated with the 313 * Make it look like the remote partition, which is associated with the
265 * specified channel, sent us an IPI. This faked IPI will be handled 314 * specified channel, sent us an IPI. This faked IPI will be handled
266 * by xpc_dropped_IPI_check(). 315 * by xpc_dropped_notify_IRQ_check_sn2().
267 */ 316 */
268static void 317static void
269xpc_notify_IRQ_send_local_sn2(struct xpc_channel *ch, u8 ipi_flag, 318xpc_notify_IRQ_send_local_sn2(struct xpc_channel *ch, u8 ipi_flag,
@@ -271,7 +320,7 @@ xpc_notify_IRQ_send_local_sn2(struct xpc_channel *ch, u8 ipi_flag,
271{ 320{
272 struct xpc_partition *part = &xpc_partitions[ch->partid]; 321 struct xpc_partition *part = &xpc_partitions[ch->partid];
273 322
274 FETCHOP_STORE_OP(TO_AMO((u64)&part->local_IPI_amo_va->variable), 323 FETCHOP_STORE_OP(TO_AMO((u64)&part->sn.sn2.local_IPI_amo_va->variable),
275 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8))); 324 FETCHOP_OR, ((u64)ipi_flag << (ch->number * 8)));
276 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n", 325 dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
277 ipi_flag_string, ch->partid, ch->number); 326 ipi_flag_string, ch->partid, ch->number);
@@ -281,7 +330,8 @@ xpc_notify_IRQ_send_local_sn2(struct xpc_channel *ch, u8 ipi_flag,
281 xpc_notify_IRQ_send_local_sn2(_ch, _ipi_f, #_ipi_f) 330 xpc_notify_IRQ_send_local_sn2(_ch, _ipi_f, #_ipi_f)
282 331
283static void 332static void
284xpc_IPI_send_closerequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) 333xpc_send_channel_closerequest_sn2(struct xpc_channel *ch,
334 unsigned long *irq_flags)
285{ 335{
286 struct xpc_openclose_args *args = ch->local_openclose_args; 336 struct xpc_openclose_args *args = ch->local_openclose_args;
287 337
@@ -290,13 +340,15 @@ xpc_IPI_send_closerequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
290} 340}
291 341
292static void 342static void
293xpc_IPI_send_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) 343xpc_send_channel_closereply_sn2(struct xpc_channel *ch,
344 unsigned long *irq_flags)
294{ 345{
295 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREPLY, irq_flags); 346 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_CLOSEREPLY, irq_flags);
296} 347}
297 348
298static void 349static void
299xpc_IPI_send_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) 350xpc_send_channel_openrequest_sn2(struct xpc_channel *ch,
351 unsigned long *irq_flags)
300{ 352{
301 struct xpc_openclose_args *args = ch->local_openclose_args; 353 struct xpc_openclose_args *args = ch->local_openclose_args;
302 354
@@ -306,7 +358,7 @@ xpc_IPI_send_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
306} 358}
307 359
308static void 360static void
309xpc_IPI_send_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) 361xpc_send_channel_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
310{ 362{
311 struct xpc_openclose_args *args = ch->local_openclose_args; 363 struct xpc_openclose_args *args = ch->local_openclose_args;
312 364
@@ -317,13 +369,13 @@ xpc_IPI_send_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags)
317} 369}
318 370
319static void 371static void
320xpc_IPI_send_msgrequest_sn2(struct xpc_channel *ch) 372xpc_send_channel_msgrequest_sn2(struct xpc_channel *ch)
321{ 373{
322 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_MSGREQUEST, NULL); 374 XPC_NOTIFY_IRQ_SEND_SN2(ch, XPC_IPI_MSGREQUEST, NULL);
323} 375}
324 376
325static void 377static void
326xpc_IPI_send_local_msgrequest_sn2(struct xpc_channel *ch) 378xpc_send_channel_local_msgrequest_sn2(struct xpc_channel *ch)
327{ 379{
328 XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(ch, XPC_IPI_MSGREQUEST); 380 XPC_NOTIFY_IRQ_SEND_LOCAL_SN2(ch, XPC_IPI_MSGREQUEST);
329} 381}
@@ -334,10 +386,10 @@ xpc_IPI_send_local_msgrequest_sn2(struct xpc_channel *ch)
334 */ 386 */
335 387
336static void 388static void
337xpc_mark_partition_engaged_sn2(struct xpc_partition *part) 389xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
338{ 390{
339 unsigned long irq_flags; 391 unsigned long irq_flags;
340 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + 392 AMO_t *amo = (AMO_t *)__va(part->sn.sn2.remote_amos_page_pa +
341 (XPC_ENGAGED_PARTITIONS_AMO * 393 (XPC_ENGAGED_PARTITIONS_AMO *
342 sizeof(AMO_t))); 394 sizeof(AMO_t)));
343 395
@@ -360,10 +412,11 @@ xpc_mark_partition_engaged_sn2(struct xpc_partition *part)
360} 412}
361 413
362static void 414static void
363xpc_mark_partition_disengaged_sn2(struct xpc_partition *part) 415xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
364{ 416{
417 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
365 unsigned long irq_flags; 418 unsigned long irq_flags;
366 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa + 419 AMO_t *amo = (AMO_t *)__va(part_sn2->remote_amos_page_pa +
367 (XPC_ENGAGED_PARTITIONS_AMO * 420 (XPC_ENGAGED_PARTITIONS_AMO *
368 sizeof(AMO_t))); 421 sizeof(AMO_t)));
369 422
@@ -383,96 +436,44 @@ xpc_mark_partition_disengaged_sn2(struct xpc_partition *part)
383 xp_nofault_PIOR_target)); 436 xp_nofault_PIOR_target));
384 437
385 local_irq_restore(irq_flags); 438 local_irq_restore(irq_flags);
386}
387
388static void
389xpc_request_partition_disengage_sn2(struct xpc_partition *part)
390{
391 unsigned long irq_flags;
392 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
393 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
394
395 local_irq_save(irq_flags);
396 439
397 /* set bit corresponding to our partid in remote partition's AMO */
398 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
399 (1UL << sn_partition_id));
400 /* 440 /*
401 * We must always use the nofault function regardless of whether we 441 * Send activate IRQ to get other side to see that we've cleared our
402 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 442 * bit in their engaged partitions AMO.
403 * didn't, we'd never know that the other partition is down and would
404 * keep sending IPIs and AMOs to it until the heartbeat times out.
405 */ 443 */
406 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 444 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa,
407 variable), 445 cnodeid_to_nasid(0),
408 xp_nofault_PIOR_target)); 446 part_sn2->activate_IRQ_nasid,
409 447 part_sn2->activate_IRQ_phys_cpuid);
410 local_irq_restore(irq_flags);
411} 448}
412 449
413static void 450static int
414xpc_cancel_partition_disengage_request_sn2(struct xpc_partition *part) 451xpc_partition_engaged_sn2(short partid)
415{
416 unsigned long irq_flags;
417 AMO_t *amo = (AMO_t *)__va(part->remote_amos_page_pa +
418 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
419
420 local_irq_save(irq_flags);
421
422 /* clear bit corresponding to our partid in remote partition's AMO */
423 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
424 ~(1UL << sn_partition_id));
425 /*
426 * We must always use the nofault function regardless of whether we
427 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
428 * didn't, we'd never know that the other partition is down and would
429 * keep sending IPIs and AMOs to it until the heartbeat times out.
430 */
431 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
432 variable),
433 xp_nofault_PIOR_target));
434
435 local_irq_restore(irq_flags);
436}
437
438static u64
439xpc_partition_engaged_sn2(u64 partid_mask)
440{ 452{
441 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 453 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
442 454
443 /* return our partition's AMO variable ANDed with partid_mask */ 455 /* our partition's AMO variable ANDed with partid mask */
444 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & 456 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
445 partid_mask); 457 (1UL << partid)) != 0;
446} 458}
447 459
448static u64 460static int
449xpc_partition_disengage_requested_sn2(u64 partid_mask) 461xpc_any_partition_engaged_sn2(void)
450{
451 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
452
453 /* return our partition's AMO variable ANDed with partid_mask */
454 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
455 partid_mask);
456}
457
458static void
459xpc_clear_partition_engaged_sn2(u64 partid_mask)
460{ 462{
461 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 463 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
462 464
463 /* clear bit(s) based on partid_mask in our partition's AMO */ 465 /* our partition's AMO variable */
464 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 466 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
465 ~partid_mask);
466} 467}
467 468
468static void 469static void
469xpc_clear_partition_disengage_request_sn2(u64 partid_mask) 470xpc_assume_partition_disengaged_sn2(short partid)
470{ 471{
471 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO; 472 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
472 473
473 /* clear bit(s) based on partid_mask in our partition's AMO */ 474 /* clear bit(s) based on partid mask in our partition's AMO */
474 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 475 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
475 ~partid_mask); 476 ~(1UL << partid));
476} 477}
477 478
478/* original protection values for each node */ 479/* original protection values for each node */
@@ -545,7 +546,6 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
545 xpc_vars_part = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + 546 xpc_vars_part = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
546 XPC_RP_VARS_SIZE); 547 XPC_RP_VARS_SIZE);
547 548
548
549 /* 549 /*
550 * Before clearing xpc_vars, see if a page of AMOs had been previously 550 * Before clearing xpc_vars, see if a page of AMOs had been previously
551 * allocated. If not we'll need to allocate one and set permissions 551 * allocated. If not we'll need to allocate one and set permissions
@@ -583,8 +583,8 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
583 memset(xpc_vars, 0, sizeof(struct xpc_vars_sn2)); 583 memset(xpc_vars, 0, sizeof(struct xpc_vars_sn2));
584 584
585 xpc_vars->version = XPC_V_VERSION; 585 xpc_vars->version = XPC_V_VERSION;
586 xpc_vars->act_nasid = cpuid_to_nasid(0); 586 xpc_vars->activate_IRQ_nasid = cpuid_to_nasid(0);
587 xpc_vars->act_phys_cpuid = cpu_physical_id(0); 587 xpc_vars->activate_IRQ_phys_cpuid = cpu_physical_id(0);
588 xpc_vars->vars_part_pa = __pa(xpc_vars_part); 588 xpc_vars->vars_part_pa = __pa(xpc_vars_part);
589 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); 589 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page);
590 xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 590 xpc_vars->amos_page = amos_page; /* save for next load of XPC */
@@ -599,7 +599,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
599 599
600 /* initialize the engaged remote partitions related AMO variables */ 600 /* initialize the engaged remote partitions related AMO variables */
601 (void)xpc_IPI_init_sn2(XPC_ENGAGED_PARTITIONS_AMO); 601 (void)xpc_IPI_init_sn2(XPC_ENGAGED_PARTITIONS_AMO);
602 (void)xpc_IPI_init_sn2(XPC_DISENGAGE_REQUEST_AMO); 602 (void)xpc_IPI_init_sn2(XPC_DEACTIVATE_REQUEST_AMO);
603 603
604 return xpSuccess; 604 return xpSuccess;
605} 605}
@@ -671,7 +671,7 @@ xpc_check_remote_hb_sn2(void)
671 671
672 /* pull the remote_hb cache line */ 672 /* pull the remote_hb cache line */
673 ret = xp_remote_memcpy(remote_vars, 673 ret = xp_remote_memcpy(remote_vars,
674 (void *)part->remote_vars_pa, 674 (void *)part->sn.sn2.remote_vars_pa,
675 XPC_RP_VARS_SIZE); 675 XPC_RP_VARS_SIZE);
676 if (ret != xpSuccess) { 676 if (ret != xpSuccess) {
677 XPC_DEACTIVATE_PARTITION(part, ret); 677 XPC_DEACTIVATE_PARTITION(part, ret);
@@ -726,10 +726,86 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars)
726} 726}
727 727
728static void 728static void
729xpc_initiate_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, 729xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp,
730 u64 remote_rp_pa, int nasid) 730 u64 remote_rp_pa, int nasid)
731{ 731{
732 xpc_IPI_send_local_activate(nasid); 732 xpc_activate_IRQ_send_local_sn2(nasid);
733}
734
735static void
736xpc_request_partition_reactivation_sn2(struct xpc_partition *part)
737{
738 xpc_activate_IRQ_send_local_sn2(part->sn.sn2.activate_IRQ_nasid);
739}
740
741static void
742xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
743{
744 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
745 unsigned long irq_flags;
746 AMO_t *amo = (AMO_t *)__va(part_sn2->remote_amos_page_pa +
747 (XPC_DEACTIVATE_REQUEST_AMO * sizeof(AMO_t)));
748
749 local_irq_save(irq_flags);
750
751 /* set bit corresponding to our partid in remote partition's AMO */
752 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
753 (1UL << sn_partition_id));
754 /*
755 * We must always use the nofault function regardless of whether we
756 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
757 * didn't, we'd never know that the other partition is down and would
758 * keep sending IPIs and AMOs to it until the heartbeat times out.
759 */
760 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
761 variable),
762 xp_nofault_PIOR_target));
763
764 local_irq_restore(irq_flags);
765
766 /*
767 * Send activate IRQ to get other side to see that we've set our
768 * bit in their deactivate request AMO.
769 */
770 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa,
771 cnodeid_to_nasid(0),
772 part_sn2->activate_IRQ_nasid,
773 part_sn2->activate_IRQ_phys_cpuid);
774}
775
776static void
777xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
778{
779 unsigned long irq_flags;
780 AMO_t *amo = (AMO_t *)__va(part->sn.sn2.remote_amos_page_pa +
781 (XPC_DEACTIVATE_REQUEST_AMO * sizeof(AMO_t)));
782
783 local_irq_save(irq_flags);
784
785 /* clear bit corresponding to our partid in remote partition's AMO */
786 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
787 ~(1UL << sn_partition_id));
788 /*
789 * We must always use the nofault function regardless of whether we
790 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
791 * didn't, we'd never know that the other partition is down and would
792 * keep sending IPIs and AMOs to it until the heartbeat times out.
793 */
794 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
795 variable),
796 xp_nofault_PIOR_target));
797
798 local_irq_restore(irq_flags);
799}
800
801static int
802xpc_partition_deactivation_requested_sn2(short partid)
803{
804 AMO_t *amo = xpc_vars->amos_page + XPC_DEACTIVATE_REQUEST_AMO;
805
806 /* our partition's AMO variable ANDed with partid mask */
807 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
808 (1UL << partid)) != 0;
733} 809}
734 810
735/* 811/*
@@ -741,6 +817,8 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
741 u64 remote_vars_pa, 817 u64 remote_vars_pa,
742 struct xpc_vars_sn2 *remote_vars) 818 struct xpc_vars_sn2 *remote_vars)
743{ 819{
820 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
821
744 part->remote_rp_version = remote_rp_version; 822 part->remote_rp_version = remote_rp_version;
745 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n", 823 dev_dbg(xpc_part, " remote_rp_version = 0x%016x\n",
746 part->remote_rp_version); 824 part->remote_rp_version);
@@ -752,33 +830,34 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
752 part->remote_rp_pa = remote_rp_pa; 830 part->remote_rp_pa = remote_rp_pa;
753 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa); 831 dev_dbg(xpc_part, " remote_rp_pa = 0x%016lx\n", part->remote_rp_pa);
754 832
755 part->remote_vars_pa = remote_vars_pa; 833 part_sn2->remote_vars_pa = remote_vars_pa;
756 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n", 834 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
757 part->remote_vars_pa); 835 part_sn2->remote_vars_pa);
758 836
759 part->last_heartbeat = remote_vars->heartbeat; 837 part->last_heartbeat = remote_vars->heartbeat;
760 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n", 838 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
761 part->last_heartbeat); 839 part->last_heartbeat);
762 840
763 part->remote_vars_part_pa = remote_vars->vars_part_pa; 841 part_sn2->remote_vars_part_pa = remote_vars->vars_part_pa;
764 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n", 842 dev_dbg(xpc_part, " remote_vars_part_pa = 0x%016lx\n",
765 part->remote_vars_part_pa); 843 part_sn2->remote_vars_part_pa);
766 844
767 part->remote_act_nasid = remote_vars->act_nasid; 845 part_sn2->activate_IRQ_nasid = remote_vars->activate_IRQ_nasid;
768 dev_dbg(xpc_part, " remote_act_nasid = 0x%x\n", 846 dev_dbg(xpc_part, " activate_IRQ_nasid = 0x%x\n",
769 part->remote_act_nasid); 847 part_sn2->activate_IRQ_nasid);
770 848
771 part->remote_act_phys_cpuid = remote_vars->act_phys_cpuid; 849 part_sn2->activate_IRQ_phys_cpuid =
772 dev_dbg(xpc_part, " remote_act_phys_cpuid = 0x%x\n", 850 remote_vars->activate_IRQ_phys_cpuid;
773 part->remote_act_phys_cpuid); 851 dev_dbg(xpc_part, " activate_IRQ_phys_cpuid = 0x%x\n",
852 part_sn2->activate_IRQ_phys_cpuid);
774 853
775 part->remote_amos_page_pa = remote_vars->amos_page_pa; 854 part_sn2->remote_amos_page_pa = remote_vars->amos_page_pa;
776 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n", 855 dev_dbg(xpc_part, " remote_amos_page_pa = 0x%lx\n",
777 part->remote_amos_page_pa); 856 part_sn2->remote_amos_page_pa);
778 857
779 part->remote_vars_version = remote_vars->version; 858 part_sn2->remote_vars_version = remote_vars->version;
780 dev_dbg(xpc_part, " remote_vars_version = 0x%x\n", 859 dev_dbg(xpc_part, " remote_vars_version = 0x%x\n",
781 part->remote_vars_version); 860 part_sn2->remote_vars_version);
782} 861}
783 862
784/* 863/*
@@ -807,6 +886,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
807 unsigned long remote_rp_stamp = 0; 886 unsigned long remote_rp_stamp = 0;
808 short partid; 887 short partid;
809 struct xpc_partition *part; 888 struct xpc_partition *part;
889 struct xpc_partition_sn2 *part_sn2;
810 enum xp_retval ret; 890 enum xp_retval ret;
811 891
812 /* pull over the reserved page structure */ 892 /* pull over the reserved page structure */
@@ -822,11 +902,11 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
822 902
823 remote_vars_pa = remote_rp->sn.vars_pa; 903 remote_vars_pa = remote_rp->sn.vars_pa;
824 remote_rp_version = remote_rp->version; 904 remote_rp_version = remote_rp->version;
825 if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) 905 remote_rp_stamp = remote_rp->stamp;
826 remote_rp_stamp = remote_rp->stamp;
827 906
828 partid = remote_rp->SAL_partid; 907 partid = remote_rp->SAL_partid;
829 part = &xpc_partitions[partid]; 908 part = &xpc_partitions[partid];
909 part_sn2 = &part->sn.sn2;
830 910
831 /* pull over the cross partition variables */ 911 /* pull over the cross partition variables */
832 912
@@ -834,7 +914,6 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
834 914
835 ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars); 915 ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars);
836 if (ret != xpSuccess) { 916 if (ret != xpSuccess) {
837
838 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " 917 dev_warn(xpc_part, "unable to get XPC variables from nasid %d, "
839 "which sent interrupt, reason=%d\n", nasid, ret); 918 "which sent interrupt, reason=%d\n", nasid, ret);
840 919
@@ -855,18 +934,12 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
855 &remote_rp_stamp, remote_rp_pa, 934 &remote_rp_stamp, remote_rp_pa,
856 remote_vars_pa, remote_vars); 935 remote_vars_pa, remote_vars);
857 936
858 if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) { 937 if (xpc_partition_deactivation_requested_sn2(partid)) {
859 if (xpc_partition_disengage_requested(1UL << partid)) { 938 /*
860 /* 939 * Other side is waiting on us to deactivate even though
861 * Other side is waiting on us to disengage, 940 * we already have.
862 * even though we already have. 941 */
863 */ 942 return;
864 return;
865 }
866
867 } else {
868 /* other side doesn't support disengage requests */
869 xpc_clear_partition_disengage_request(1UL << partid);
870 } 943 }
871 944
872 xpc_activate_partition(part); 945 xpc_activate_partition(part);
@@ -874,93 +947,30 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
874 } 947 }
875 948
876 DBUG_ON(part->remote_rp_version == 0); 949 DBUG_ON(part->remote_rp_version == 0);
877 DBUG_ON(part->remote_vars_version == 0); 950 DBUG_ON(part_sn2->remote_vars_version == 0);
878
879 if (!XPC_SUPPORTS_RP_STAMP(part->remote_rp_version)) {
880 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(part->
881 remote_vars_version));
882
883 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
884 DBUG_ON(XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->
885 version));
886 /* see if the other side rebooted */
887 if (part->remote_amos_page_pa ==
888 remote_vars->amos_page_pa &&
889 xpc_hb_allowed(sn_partition_id,
890 &remote_vars->heartbeating_to_mask)) {
891 /* doesn't look that way, so ignore the IPI */
892 return;
893 }
894 }
895 951
896 /* 952 if (remote_rp_stamp != part->remote_rp_stamp) {
897 * Other side rebooted and previous XPC didn't support the
898 * disengage request, so we don't need to do anything special.
899 */
900 953
901 xpc_update_partition_info_sn2(part, remote_rp_version, 954 /* the other side rebooted */
902 &remote_rp_stamp, remote_rp_pa,
903 remote_vars_pa, remote_vars);
904 part->reactivate_nasid = nasid;
905 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
906 return;
907 }
908 955
909 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)); 956 DBUG_ON(xpc_partition_engaged_sn2(partid));
910 957 DBUG_ON(xpc_partition_deactivation_requested_sn2(partid));
911 if (!XPC_SUPPORTS_RP_STAMP(remote_rp_version)) {
912 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
913
914 /*
915 * Other side rebooted and previous XPC did support the
916 * disengage request, but the new one doesn't.
917 */
918
919 xpc_clear_partition_engaged(1UL << partid);
920 xpc_clear_partition_disengage_request(1UL << partid);
921 958
922 xpc_update_partition_info_sn2(part, remote_rp_version, 959 xpc_update_partition_info_sn2(part, remote_rp_version,
923 &remote_rp_stamp, remote_rp_pa, 960 &remote_rp_stamp, remote_rp_pa,
924 remote_vars_pa, remote_vars); 961 remote_vars_pa, remote_vars);
925 reactivate = 1; 962 reactivate = 1;
926
927 } else {
928 DBUG_ON(!XPC_SUPPORTS_DISENGAGE_REQUEST(remote_vars->version));
929
930 if (remote_rp_stamp != part->remote_rp_stamp) {
931
932 /*
933 * Other side rebooted and the previous XPC did support
934 * the disengage request, as does the new one.
935 */
936
937 DBUG_ON(xpc_partition_engaged(1UL << partid));
938 DBUG_ON(xpc_partition_disengage_requested(1UL <<
939 partid));
940
941 xpc_update_partition_info_sn2(part, remote_rp_version,
942 &remote_rp_stamp,
943 remote_rp_pa,
944 remote_vars_pa,
945 remote_vars);
946 reactivate = 1;
947 }
948 } 963 }
949 964
950 if (part->disengage_request_timeout > 0 && 965 if (part->disengage_timeout > 0 && !xpc_partition_disengaged(part)) {
951 !xpc_partition_disengaged(part)) {
952 /* still waiting on other side to disengage from us */ 966 /* still waiting on other side to disengage from us */
953 return; 967 return;
954 } 968 }
955 969
956 if (reactivate) { 970 if (reactivate)
957 part->reactivate_nasid = nasid;
958 XPC_DEACTIVATE_PARTITION(part, xpReactivating); 971 XPC_DEACTIVATE_PARTITION(part, xpReactivating);
959 972 else if (xpc_partition_deactivation_requested_sn2(partid))
960 } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) &&
961 xpc_partition_disengage_requested(1UL << partid)) {
962 XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown); 973 XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown);
963 }
964} 974}
965 975
966/* 976/*
@@ -1038,6 +1048,7 @@ xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected)
1038static enum xp_retval 1048static enum xp_retval
1039xpc_setup_infrastructure_sn2(struct xpc_partition *part) 1049xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1040{ 1050{
1051 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1041 enum xp_retval retval; 1052 enum xp_retval retval;
1042 int ret; 1053 int ret;
1043 int cpuid; 1054 int cpuid;
@@ -1060,28 +1071,29 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1060 1071
1061 /* allocate all the required GET/PUT values */ 1072 /* allocate all the required GET/PUT values */
1062 1073
1063 part->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 1074 part_sn2->local_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
1064 GFP_KERNEL, 1075 GFP_KERNEL,
1065 &part->local_GPs_base); 1076 &part_sn2->
1066 if (part->local_GPs == NULL) { 1077 local_GPs_base);
1078 if (part_sn2->local_GPs == NULL) {
1067 dev_err(xpc_chan, "can't get memory for local get/put " 1079 dev_err(xpc_chan, "can't get memory for local get/put "
1068 "values\n"); 1080 "values\n");
1069 retval = xpNoMemory; 1081 retval = xpNoMemory;
1070 goto out_1; 1082 goto out_1;
1071 } 1083 }
1072 1084
1073 part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, 1085 part_sn2->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE,
1074 GFP_KERNEL, 1086 GFP_KERNEL,
1075 &part-> 1087 &part_sn2->
1076 remote_GPs_base); 1088 remote_GPs_base);
1077 if (part->remote_GPs == NULL) { 1089 if (part_sn2->remote_GPs == NULL) {
1078 dev_err(xpc_chan, "can't get memory for remote get/put " 1090 dev_err(xpc_chan, "can't get memory for remote get/put "
1079 "values\n"); 1091 "values\n");
1080 retval = xpNoMemory; 1092 retval = xpNoMemory;
1081 goto out_2; 1093 goto out_2;
1082 } 1094 }
1083 1095
1084 part->remote_GPs_pa = 0; 1096 part_sn2->remote_GPs_pa = 0;
1085 1097
1086 /* allocate all the required open and close args */ 1098 /* allocate all the required open and close args */
1087 1099
@@ -1103,22 +1115,23 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1103 goto out_4; 1115 goto out_4;
1104 } 1116 }
1105 1117
1106 part->remote_openclose_args_pa = 0; 1118 part_sn2->remote_openclose_args_pa = 0;
1107 1119
1108 part->local_IPI_amo_va = xpc_IPI_init_sn2(partid); 1120 part_sn2->local_IPI_amo_va = xpc_IPI_init_sn2(partid);
1109 part->local_IPI_amo = 0; 1121 part->local_IPI_amo = 0;
1110 spin_lock_init(&part->IPI_lock); 1122 spin_lock_init(&part->IPI_lock);
1111 1123
1112 part->remote_IPI_nasid = 0; 1124 part_sn2->remote_IPI_nasid = 0;
1113 part->remote_IPI_phys_cpuid = 0; 1125 part_sn2->remote_IPI_phys_cpuid = 0;
1114 part->remote_IPI_amo_va = NULL; 1126 part_sn2->remote_IPI_amo_va = NULL;
1115 1127
1116 atomic_set(&part->channel_mgr_requests, 1); 1128 atomic_set(&part->channel_mgr_requests, 1);
1117 init_waitqueue_head(&part->channel_mgr_wq); 1129 init_waitqueue_head(&part->channel_mgr_wq);
1118 1130
1119 sprintf(part->IPI_owner, "xpc%02d", partid); 1131 sprintf(part_sn2->IPI_owner, "xpc%02d", partid);
1120 ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED, 1132 ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2,
1121 part->IPI_owner, (void *)(u64)partid); 1133 IRQF_SHARED, part_sn2->IPI_owner,
1134 (void *)(u64)partid);
1122 if (ret != 0) { 1135 if (ret != 0) {
1123 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " 1136 dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
1124 "errno=%d\n", -ret); 1137 "errno=%d\n", -ret);
@@ -1127,9 +1140,10 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1127 } 1140 }
1128 1141
1129 /* Setup a timer to check for dropped IPIs */ 1142 /* Setup a timer to check for dropped IPIs */
1130 timer = &part->dropped_IPI_timer; 1143 timer = &part_sn2->dropped_notify_IRQ_timer;
1131 init_timer(timer); 1144 init_timer(timer);
1132 timer->function = (void (*)(unsigned long))xpc_dropped_IPI_check; 1145 timer->function =
1146 (void (*)(unsigned long))xpc_dropped_notify_IRQ_check_sn2;
1133 timer->data = (unsigned long)part; 1147 timer->data = (unsigned long)part;
1134 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT_INTERVAL; 1148 timer->expires = jiffies + XPC_P_DROPPED_IPI_WAIT_INTERVAL;
1135 add_timer(timer); 1149 add_timer(timer);
@@ -1146,7 +1160,7 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1146 ch->number = ch_number; 1160 ch->number = ch_number;
1147 ch->flags = XPC_C_DISCONNECTED; 1161 ch->flags = XPC_C_DISCONNECTED;
1148 1162
1149 ch->local_GP = &part->local_GPs[ch_number]; 1163 ch->sn.sn2.local_GP = &part_sn2->local_GPs[ch_number];
1150 ch->local_openclose_args = 1164 ch->local_openclose_args =
1151 &part->local_openclose_args[ch_number]; 1165 &part->local_openclose_args[ch_number];
1152 1166
@@ -1158,7 +1172,7 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1158 atomic_set(&ch->n_to_notify, 0); 1172 atomic_set(&ch->n_to_notify, 0);
1159 1173
1160 spin_lock_init(&ch->lock); 1174 spin_lock_init(&ch->lock);
1161 mutex_init(&ch->msg_to_pull_mutex); 1175 mutex_init(&ch->sn.sn2.msg_to_pull_mutex);
1162 init_completion(&ch->wdisconnect_wait); 1176 init_completion(&ch->wdisconnect_wait);
1163 1177
1164 atomic_set(&ch->n_on_msg_allocate_wq, 0); 1178 atomic_set(&ch->n_on_msg_allocate_wq, 0);
@@ -1179,10 +1193,10 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1179 * The setting of the magic # indicates that these per partition 1193 * The setting of the magic # indicates that these per partition
1180 * specific variables are ready to be used. 1194 * specific variables are ready to be used.
1181 */ 1195 */
1182 xpc_vars_part[partid].GPs_pa = __pa(part->local_GPs); 1196 xpc_vars_part[partid].GPs_pa = __pa(part_sn2->local_GPs);
1183 xpc_vars_part[partid].openclose_args_pa = 1197 xpc_vars_part[partid].openclose_args_pa =
1184 __pa(part->local_openclose_args); 1198 __pa(part->local_openclose_args);
1185 xpc_vars_part[partid].IPI_amo_pa = __pa(part->local_IPI_amo_va); 1199 xpc_vars_part[partid].IPI_amo_pa = __pa(part_sn2->local_IPI_amo_va);
1186 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ 1200 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
1187 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid); 1201 xpc_vars_part[partid].IPI_nasid = cpuid_to_nasid(cpuid);
1188 xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid); 1202 xpc_vars_part[partid].IPI_phys_cpuid = cpu_physical_id(cpuid);
@@ -1199,11 +1213,11 @@ out_4:
1199 kfree(part->local_openclose_args_base); 1213 kfree(part->local_openclose_args_base);
1200 part->local_openclose_args = NULL; 1214 part->local_openclose_args = NULL;
1201out_3: 1215out_3:
1202 kfree(part->remote_GPs_base); 1216 kfree(part_sn2->remote_GPs_base);
1203 part->remote_GPs = NULL; 1217 part_sn2->remote_GPs = NULL;
1204out_2: 1218out_2:
1205 kfree(part->local_GPs_base); 1219 kfree(part_sn2->local_GPs_base);
1206 part->local_GPs = NULL; 1220 part_sn2->local_GPs = NULL;
1207out_1: 1221out_1:
1208 kfree(part->channels); 1222 kfree(part->channels);
1209 part->channels = NULL; 1223 part->channels = NULL;
@@ -1217,6 +1231,7 @@ out_1:
1217static void 1231static void
1218xpc_teardown_infrastructure_sn2(struct xpc_partition *part) 1232xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
1219{ 1233{
1234 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1220 short partid = XPC_PARTID(part); 1235 short partid = XPC_PARTID(part);
1221 1236
1222 /* 1237 /*
@@ -1248,19 +1263,19 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
1248 part->setup_state = XPC_P_TORNDOWN; 1263 part->setup_state = XPC_P_TORNDOWN;
1249 1264
1250 /* in case we've still got outstanding timers registered... */ 1265 /* in case we've still got outstanding timers registered... */
1251 del_timer_sync(&part->dropped_IPI_timer); 1266 del_timer_sync(&part_sn2->dropped_notify_IRQ_timer);
1252 1267
1253 kfree(part->remote_openclose_args_base); 1268 kfree(part->remote_openclose_args_base);
1254 part->remote_openclose_args = NULL; 1269 part->remote_openclose_args = NULL;
1255 kfree(part->local_openclose_args_base); 1270 kfree(part->local_openclose_args_base);
1256 part->local_openclose_args = NULL; 1271 part->local_openclose_args = NULL;
1257 kfree(part->remote_GPs_base); 1272 kfree(part_sn2->remote_GPs_base);
1258 part->remote_GPs = NULL; 1273 part_sn2->remote_GPs = NULL;
1259 kfree(part->local_GPs_base); 1274 kfree(part_sn2->local_GPs_base);
1260 part->local_GPs = NULL; 1275 part_sn2->local_GPs = NULL;
1261 kfree(part->channels); 1276 kfree(part->channels);
1262 part->channels = NULL; 1277 part->channels = NULL;
1263 part->local_IPI_amo_va = NULL; 1278 part_sn2->local_IPI_amo_va = NULL;
1264} 1279}
1265 1280
1266/* 1281/*
@@ -1300,6 +1315,7 @@ xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst,
1300static enum xp_retval 1315static enum xp_retval
1301xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) 1316xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1302{ 1317{
1318 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1303 u8 buffer[L1_CACHE_BYTES * 2]; 1319 u8 buffer[L1_CACHE_BYTES * 2];
1304 struct xpc_vars_part_sn2 *pulled_entry_cacheline = 1320 struct xpc_vars_part_sn2 *pulled_entry_cacheline =
1305 (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); 1321 (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer);
@@ -1310,11 +1326,11 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1310 1326
1311 /* pull the cacheline that contains the variables we're interested in */ 1327 /* pull the cacheline that contains the variables we're interested in */
1312 1328
1313 DBUG_ON(part->remote_vars_part_pa != 1329 DBUG_ON(part_sn2->remote_vars_part_pa !=
1314 L1_CACHE_ALIGN(part->remote_vars_part_pa)); 1330 L1_CACHE_ALIGN(part_sn2->remote_vars_part_pa));
1315 DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2); 1331 DBUG_ON(sizeof(struct xpc_vars_part_sn2) != L1_CACHE_BYTES / 2);
1316 1332
1317 remote_entry_pa = part->remote_vars_part_pa + 1333 remote_entry_pa = part_sn2->remote_vars_part_pa +
1318 sn_partition_id * sizeof(struct xpc_vars_part_sn2); 1334 sn_partition_id * sizeof(struct xpc_vars_part_sn2);
1319 1335
1320 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1)); 1336 remote_entry_cacheline_pa = (remote_entry_pa & ~(L1_CACHE_BYTES - 1));
@@ -1364,13 +1380,13 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1364 1380
1365 /* the variables we imported look to be valid */ 1381 /* the variables we imported look to be valid */
1366 1382
1367 part->remote_GPs_pa = pulled_entry->GPs_pa; 1383 part_sn2->remote_GPs_pa = pulled_entry->GPs_pa;
1368 part->remote_openclose_args_pa = 1384 part_sn2->remote_openclose_args_pa =
1369 pulled_entry->openclose_args_pa; 1385 pulled_entry->openclose_args_pa;
1370 part->remote_IPI_amo_va = 1386 part_sn2->remote_IPI_amo_va =
1371 (AMO_t *)__va(pulled_entry->IPI_amo_pa); 1387 (AMO_t *)__va(pulled_entry->IPI_amo_pa);
1372 part->remote_IPI_nasid = pulled_entry->IPI_nasid; 1388 part_sn2->remote_IPI_nasid = pulled_entry->IPI_nasid;
1373 part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; 1389 part_sn2->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid;
1374 1390
1375 if (part->nchannels > pulled_entry->nchannels) 1391 if (part->nchannels > pulled_entry->nchannels)
1376 part->nchannels = pulled_entry->nchannels; 1392 part->nchannels = pulled_entry->nchannels;
@@ -1394,6 +1410,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1394static enum xp_retval 1410static enum xp_retval
1395xpc_make_first_contact_sn2(struct xpc_partition *part) 1411xpc_make_first_contact_sn2(struct xpc_partition *part)
1396{ 1412{
1413 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1397 enum xp_retval ret; 1414 enum xp_retval ret;
1398 1415
1399 /* 1416 /*
@@ -1406,7 +1423,7 @@ xpc_make_first_contact_sn2(struct xpc_partition *part)
1406 * we should get the same page for remote_amos_page_pa after module 1423 * we should get the same page for remote_amos_page_pa after module
1407 * reloads and system reboots. 1424 * reloads and system reboots.
1408 */ 1425 */
1409 if (sn_register_xp_addr_region(part->remote_amos_page_pa, 1426 if (sn_register_xp_addr_region(part_sn2->remote_amos_page_pa,
1410 PAGE_SIZE, 1) < 0) { 1427 PAGE_SIZE, 1) < 0) {
1411 dev_warn(xpc_part, "xpc_activating(%d) failed to register " 1428 dev_warn(xpc_part, "xpc_activating(%d) failed to register "
1412 "xp_addr region\n", XPC_PARTID(part)); 1429 "xp_addr region\n", XPC_PARTID(part));
@@ -1416,7 +1433,14 @@ xpc_make_first_contact_sn2(struct xpc_partition *part)
1416 return ret; 1433 return ret;
1417 } 1434 }
1418 1435
1419 xpc_IPI_send_activated(part); 1436 /*
1437 * Send activate IRQ to get other side to activate if they've not
1438 * already begun to do so.
1439 */
1440 xpc_activate_IRQ_send_sn2(part_sn2->remote_amos_page_pa,
1441 cnodeid_to_nasid(0),
1442 part_sn2->activate_IRQ_nasid,
1443 part_sn2->activate_IRQ_phys_cpuid);
1420 1444
1421 while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) { 1445 while ((ret = xpc_pull_remote_vars_part_sn2(part)) != xpSuccess) {
1422 if (ret != xpRetry) { 1446 if (ret != xpRetry) {
@@ -1443,6 +1467,7 @@ xpc_make_first_contact_sn2(struct xpc_partition *part)
1443static u64 1467static u64
1444xpc_get_IPI_flags_sn2(struct xpc_partition *part) 1468xpc_get_IPI_flags_sn2(struct xpc_partition *part)
1445{ 1469{
1470 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
1446 unsigned long irq_flags; 1471 unsigned long irq_flags;
1447 u64 IPI_amo; 1472 u64 IPI_amo;
1448 enum xp_retval ret; 1473 enum xp_retval ret;
@@ -1459,9 +1484,9 @@ xpc_get_IPI_flags_sn2(struct xpc_partition *part)
1459 spin_unlock_irqrestore(&part->IPI_lock, irq_flags); 1484 spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
1460 1485
1461 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { 1486 if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) {
1462 ret = xpc_pull_remote_cachelines_sn2(part, 1487 ret = xpc_pull_remote_cachelines_sn2(part, part->
1463 part->remote_openclose_args, 1488 remote_openclose_args,
1464 (void *)part-> 1489 (void *)part_sn2->
1465 remote_openclose_args_pa, 1490 remote_openclose_args_pa,
1466 XPC_OPENCLOSE_ARGS_SIZE); 1491 XPC_OPENCLOSE_ARGS_SIZE);
1467 if (ret != xpSuccess) { 1492 if (ret != xpSuccess) {
@@ -1477,8 +1502,8 @@ xpc_get_IPI_flags_sn2(struct xpc_partition *part)
1477 } 1502 }
1478 1503
1479 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) { 1504 if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_amo)) {
1480 ret = xpc_pull_remote_cachelines_sn2(part, part->remote_GPs, 1505 ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs,
1481 (void *)part->remote_GPs_pa, 1506 (void *)part_sn2->remote_GPs_pa,
1482 XPC_GP_SIZE); 1507 XPC_GP_SIZE);
1483 if (ret != xpSuccess) { 1508 if (ret != xpSuccess) {
1484 XPC_DEACTIVATE_PARTITION(part, ret); 1509 XPC_DEACTIVATE_PARTITION(part, ret);
@@ -1494,28 +1519,220 @@ xpc_get_IPI_flags_sn2(struct xpc_partition *part)
1494 return IPI_amo; 1519 return IPI_amo;
1495} 1520}
1496 1521
1522/*
1523 * Notify those who wanted to be notified upon delivery of their message.
1524 */
1525static void
1526xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put)
1527{
1528 struct xpc_notify *notify;
1529 u8 notify_type;
1530 s64 get = ch->sn.sn2.w_remote_GP.get - 1;
1531
1532 while (++get < put && atomic_read(&ch->n_to_notify) > 0) {
1533
1534 notify = &ch->notify_queue[get % ch->local_nentries];
1535
1536 /*
1537 * See if the notify entry indicates it was associated with
1538 * a message who's sender wants to be notified. It is possible
1539 * that it is, but someone else is doing or has done the
1540 * notification.
1541 */
1542 notify_type = notify->type;
1543 if (notify_type == 0 ||
1544 cmpxchg(&notify->type, notify_type, 0) != notify_type) {
1545 continue;
1546 }
1547
1548 DBUG_ON(notify_type != XPC_N_CALL);
1549
1550 atomic_dec(&ch->n_to_notify);
1551
1552 if (notify->func != NULL) {
1553 dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, "
1554 "msg_number=%ld, partid=%d, channel=%d\n",
1555 (void *)notify, get, ch->partid, ch->number);
1556
1557 notify->func(reason, ch->partid, ch->number,
1558 notify->key);
1559
1560 dev_dbg(xpc_chan, "notify->func() returned, "
1561 "notify=0x%p, msg_number=%ld, partid=%d, "
1562 "channel=%d\n", (void *)notify, get,
1563 ch->partid, ch->number);
1564 }
1565 }
1566}
1567
1568static void
1569xpc_notify_senders_of_disconnect_sn2(struct xpc_channel *ch)
1570{
1571 xpc_notify_senders_sn2(ch, ch->reason, ch->sn.sn2.w_local_GP.put);
1572}
1573
1574/*
1575 * Clear some of the msg flags in the local message queue.
1576 */
1577static inline void
1578xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch)
1579{
1580 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1581 struct xpc_msg *msg;
1582 s64 get;
1583
1584 get = ch_sn2->w_remote_GP.get;
1585 do {
1586 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
1587 (get % ch->local_nentries) *
1588 ch->msg_size);
1589 msg->flags = 0;
1590 } while (++get < ch_sn2->remote_GP.get);
1591}
1592
1593/*
1594 * Clear some of the msg flags in the remote message queue.
1595 */
1596static inline void
1597xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch)
1598{
1599 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1600 struct xpc_msg *msg;
1601 s64 put;
1602
1603 put = ch_sn2->w_remote_GP.put;
1604 do {
1605 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
1606 (put % ch->remote_nentries) *
1607 ch->msg_size);
1608 msg->flags = 0;
1609 } while (++put < ch_sn2->remote_GP.put);
1610}
1611
1612static void
1613xpc_process_msg_IPI_sn2(struct xpc_partition *part, int ch_number)
1614{
1615 struct xpc_channel *ch = &part->channels[ch_number];
1616 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1617 int nmsgs_sent;
1618
1619 ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number];
1620
1621 /* See what, if anything, has changed for each connected channel */
1622
1623 xpc_msgqueue_ref(ch);
1624
1625 if (ch_sn2->w_remote_GP.get == ch_sn2->remote_GP.get &&
1626 ch_sn2->w_remote_GP.put == ch_sn2->remote_GP.put) {
1627 /* nothing changed since GPs were last pulled */
1628 xpc_msgqueue_deref(ch);
1629 return;
1630 }
1631
1632 if (!(ch->flags & XPC_C_CONNECTED)) {
1633 xpc_msgqueue_deref(ch);
1634 return;
1635 }
1636
1637 /*
1638 * First check to see if messages recently sent by us have been
1639 * received by the other side. (The remote GET value will have
1640 * changed since we last looked at it.)
1641 */
1642
1643 if (ch_sn2->w_remote_GP.get != ch_sn2->remote_GP.get) {
1644
1645 /*
1646 * We need to notify any senders that want to be notified
1647 * that their sent messages have been received by their
1648 * intended recipients. We need to do this before updating
1649 * w_remote_GP.get so that we don't allocate the same message
1650 * queue entries prematurely (see xpc_allocate_msg()).
1651 */
1652 if (atomic_read(&ch->n_to_notify) > 0) {
1653 /*
1654 * Notify senders that messages sent have been
1655 * received and delivered by the other side.
1656 */
1657 xpc_notify_senders_sn2(ch, xpMsgDelivered,
1658 ch_sn2->remote_GP.get);
1659 }
1660
1661 /*
1662 * Clear msg->flags in previously sent messages, so that
1663 * they're ready for xpc_allocate_msg().
1664 */
1665 xpc_clear_local_msgqueue_flags_sn2(ch);
1666
1667 ch_sn2->w_remote_GP.get = ch_sn2->remote_GP.get;
1668
1669 dev_dbg(xpc_chan, "w_remote_GP.get changed to %ld, partid=%d, "
1670 "channel=%d\n", ch_sn2->w_remote_GP.get, ch->partid,
1671 ch->number);
1672
1673 /*
1674 * If anyone was waiting for message queue entries to become
1675 * available, wake them up.
1676 */
1677 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
1678 wake_up(&ch->msg_allocate_wq);
1679 }
1680
1681 /*
1682 * Now check for newly sent messages by the other side. (The remote
1683 * PUT value will have changed since we last looked at it.)
1684 */
1685
1686 if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) {
1687 /*
1688 * Clear msg->flags in previously received messages, so that
1689 * they're ready for xpc_get_deliverable_msg().
1690 */
1691 xpc_clear_remote_msgqueue_flags_sn2(ch);
1692
1693 ch_sn2->w_remote_GP.put = ch_sn2->remote_GP.put;
1694
1695 dev_dbg(xpc_chan, "w_remote_GP.put changed to %ld, partid=%d, "
1696 "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid,
1697 ch->number);
1698
1699 nmsgs_sent = ch_sn2->w_remote_GP.put - ch_sn2->w_local_GP.get;
1700 if (nmsgs_sent > 0) {
1701 dev_dbg(xpc_chan, "msgs waiting to be copied and "
1702 "delivered=%d, partid=%d, channel=%d\n",
1703 nmsgs_sent, ch->partid, ch->number);
1704
1705 if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)
1706 xpc_activate_kthreads(ch, nmsgs_sent);
1707 }
1708 }
1709
1710 xpc_msgqueue_deref(ch);
1711}
1712
1497static struct xpc_msg * 1713static struct xpc_msg *
1498xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) 1714xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1499{ 1715{
1500 struct xpc_partition *part = &xpc_partitions[ch->partid]; 1716 struct xpc_partition *part = &xpc_partitions[ch->partid];
1717 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1501 struct xpc_msg *remote_msg, *msg; 1718 struct xpc_msg *remote_msg, *msg;
1502 u32 msg_index, nmsgs; 1719 u32 msg_index, nmsgs;
1503 u64 msg_offset; 1720 u64 msg_offset;
1504 enum xp_retval ret; 1721 enum xp_retval ret;
1505 1722
1506 if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { 1723 if (mutex_lock_interruptible(&ch_sn2->msg_to_pull_mutex) != 0) {
1507 /* we were interrupted by a signal */ 1724 /* we were interrupted by a signal */
1508 return NULL; 1725 return NULL;
1509 } 1726 }
1510 1727
1511 while (get >= ch->next_msg_to_pull) { 1728 while (get >= ch_sn2->next_msg_to_pull) {
1512 1729
1513 /* pull as many messages as are ready and able to be pulled */ 1730 /* pull as many messages as are ready and able to be pulled */
1514 1731
1515 msg_index = ch->next_msg_to_pull % ch->remote_nentries; 1732 msg_index = ch_sn2->next_msg_to_pull % ch->remote_nentries;
1516 1733
1517 DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put); 1734 DBUG_ON(ch_sn2->next_msg_to_pull >= ch_sn2->w_remote_GP.put);
1518 nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull; 1735 nmsgs = ch_sn2->w_remote_GP.put - ch_sn2->next_msg_to_pull;
1519 if (msg_index + nmsgs > ch->remote_nentries) { 1736 if (msg_index + nmsgs > ch->remote_nentries) {
1520 /* ignore the ones that wrap the msg queue for now */ 1737 /* ignore the ones that wrap the msg queue for now */
1521 nmsgs = ch->remote_nentries - msg_index; 1738 nmsgs = ch->remote_nentries - msg_index;
@@ -1532,19 +1749,19 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1532 1749
1533 dev_dbg(xpc_chan, "failed to pull %d msgs starting with" 1750 dev_dbg(xpc_chan, "failed to pull %d msgs starting with"
1534 " msg %ld from partition %d, channel=%d, " 1751 " msg %ld from partition %d, channel=%d, "
1535 "ret=%d\n", nmsgs, ch->next_msg_to_pull, 1752 "ret=%d\n", nmsgs, ch_sn2->next_msg_to_pull,
1536 ch->partid, ch->number, ret); 1753 ch->partid, ch->number, ret);
1537 1754
1538 XPC_DEACTIVATE_PARTITION(part, ret); 1755 XPC_DEACTIVATE_PARTITION(part, ret);
1539 1756
1540 mutex_unlock(&ch->msg_to_pull_mutex); 1757 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1541 return NULL; 1758 return NULL;
1542 } 1759 }
1543 1760
1544 ch->next_msg_to_pull += nmsgs; 1761 ch_sn2->next_msg_to_pull += nmsgs;
1545 } 1762 }
1546 1763
1547 mutex_unlock(&ch->msg_to_pull_mutex); 1764 mutex_unlock(&ch_sn2->msg_to_pull_mutex);
1548 1765
1549 /* return the message we were looking for */ 1766 /* return the message we were looking for */
1550 msg_offset = (get % ch->remote_nentries) * ch->msg_size; 1767 msg_offset = (get % ch->remote_nentries) * ch->msg_size;
@@ -1553,12 +1770,19 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get)
1553 return msg; 1770 return msg;
1554} 1771}
1555 1772
1773static int
1774xpc_n_of_deliverable_msgs_sn2(struct xpc_channel *ch)
1775{
1776 return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get;
1777}
1778
1556/* 1779/*
1557 * Get a message to be delivered. 1780 * Get a message to be delivered.
1558 */ 1781 */
1559static struct xpc_msg * 1782static struct xpc_msg *
1560xpc_get_deliverable_msg_sn2(struct xpc_channel *ch) 1783xpc_get_deliverable_msg_sn2(struct xpc_channel *ch)
1561{ 1784{
1785 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1562 struct xpc_msg *msg = NULL; 1786 struct xpc_msg *msg = NULL;
1563 s64 get; 1787 s64 get;
1564 1788
@@ -1566,9 +1790,9 @@ xpc_get_deliverable_msg_sn2(struct xpc_channel *ch)
1566 if (ch->flags & XPC_C_DISCONNECTING) 1790 if (ch->flags & XPC_C_DISCONNECTING)
1567 break; 1791 break;
1568 1792
1569 get = ch->w_local_GP.get; 1793 get = ch_sn2->w_local_GP.get;
1570 rmb(); /* guarantee that .get loads before .put */ 1794 rmb(); /* guarantee that .get loads before .put */
1571 if (get == ch->w_remote_GP.put) 1795 if (get == ch_sn2->w_remote_GP.put)
1572 break; 1796 break;
1573 1797
1574 /* There are messages waiting to be pulled and delivered. 1798 /* There are messages waiting to be pulled and delivered.
@@ -1578,7 +1802,7 @@ xpc_get_deliverable_msg_sn2(struct xpc_channel *ch)
1578 * to try again for the next one. 1802 * to try again for the next one.
1579 */ 1803 */
1580 1804
1581 if (cmpxchg(&ch->w_local_GP.get, get, get + 1) == get) { 1805 if (cmpxchg(&ch_sn2->w_local_GP.get, get, get + 1) == get) {
1582 /* we got the entry referenced by get */ 1806 /* we got the entry referenced by get */
1583 1807
1584 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, " 1808 dev_dbg(xpc_chan, "w_local_GP.get changed to %ld, "
@@ -1609,6 +1833,7 @@ xpc_get_deliverable_msg_sn2(struct xpc_channel *ch)
1609static void 1833static void
1610xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) 1834xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1611{ 1835{
1836 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1612 struct xpc_msg *msg; 1837 struct xpc_msg *msg;
1613 s64 put = initial_put + 1; 1838 s64 put = initial_put + 1;
1614 int send_IPI = 0; 1839 int send_IPI = 0;
@@ -1616,7 +1841,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1616 while (1) { 1841 while (1) {
1617 1842
1618 while (1) { 1843 while (1) {
1619 if (put == ch->w_local_GP.put) 1844 if (put == ch_sn2->w_local_GP.put)
1620 break; 1845 break;
1621 1846
1622 msg = (struct xpc_msg *)((u64)ch->local_msgqueue + 1847 msg = (struct xpc_msg *)((u64)ch->local_msgqueue +
@@ -1634,10 +1859,10 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1634 break; 1859 break;
1635 } 1860 }
1636 1861
1637 if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != 1862 if (cmpxchg_rel(&ch_sn2->local_GP->put, initial_put, put) !=
1638 initial_put) { 1863 initial_put) {
1639 /* someone else beat us to it */ 1864 /* someone else beat us to it */
1640 DBUG_ON(ch->local_GP->put < initial_put); 1865 DBUG_ON(ch_sn2->local_GP->put < initial_put);
1641 break; 1866 break;
1642 } 1867 }
1643 1868
@@ -1657,7 +1882,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put)
1657 } 1882 }
1658 1883
1659 if (send_IPI) 1884 if (send_IPI)
1660 xpc_IPI_send_msgrequest_sn2(ch); 1885 xpc_send_channel_msgrequest_sn2(ch);
1661} 1886}
1662 1887
1663/* 1888/*
@@ -1668,6 +1893,7 @@ static enum xp_retval
1668xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, 1893xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
1669 struct xpc_msg **address_of_msg) 1894 struct xpc_msg **address_of_msg)
1670{ 1895{
1896 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1671 struct xpc_msg *msg; 1897 struct xpc_msg *msg;
1672 enum xp_retval ret; 1898 enum xp_retval ret;
1673 s64 put; 1899 s64 put;
@@ -1681,9 +1907,9 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
1681 1907
1682 while (1) { 1908 while (1) {
1683 1909
1684 put = ch->w_local_GP.put; 1910 put = ch_sn2->w_local_GP.put;
1685 rmb(); /* guarantee that .put loads before .get */ 1911 rmb(); /* guarantee that .put loads before .get */
1686 if (put - ch->w_remote_GP.get < ch->local_nentries) { 1912 if (put - ch_sn2->w_remote_GP.get < ch->local_nentries) {
1687 1913
1688 /* There are available message entries. We need to try 1914 /* There are available message entries. We need to try
1689 * to secure one for ourselves. We'll do this by trying 1915 * to secure one for ourselves. We'll do this by trying
@@ -1691,7 +1917,8 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
1691 * doesn't beat us to it. If they do, we'll have to 1917 * doesn't beat us to it. If they do, we'll have to
1692 * try again. 1918 * try again.
1693 */ 1919 */
1694 if (cmpxchg(&ch->w_local_GP.put, put, put + 1) == put) { 1920 if (cmpxchg(&ch_sn2->w_local_GP.put, put, put + 1) ==
1921 put) {
1695 /* we got the entry referenced by put */ 1922 /* we got the entry referenced by put */
1696 break; 1923 break;
1697 } 1924 }
@@ -1708,7 +1935,7 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags,
1708 * GP values as if an IPI was sent by the other side. 1935 * GP values as if an IPI was sent by the other side.
1709 */ 1936 */
1710 if (ret == xpTimeout) 1937 if (ret == xpTimeout)
1711 xpc_IPI_send_local_msgrequest_sn2(ch); 1938 xpc_send_channel_local_msgrequest_sn2(ch);
1712 1939
1713 if (flags & XPC_NOWAIT) 1940 if (flags & XPC_NOWAIT)
1714 return xpNoWait; 1941 return xpNoWait;
@@ -1810,13 +2037,13 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload,
1810 2037
1811 /* 2038 /*
1812 * The preceding store of msg->flags must occur before the following 2039 * The preceding store of msg->flags must occur before the following
1813 * load of ch->local_GP->put. 2040 * load of local_GP->put.
1814 */ 2041 */
1815 mb(); 2042 mb();
1816 2043
1817 /* see if the message is next in line to be sent, if so send it */ 2044 /* see if the message is next in line to be sent, if so send it */
1818 2045
1819 put = ch->local_GP->put; 2046 put = ch->sn.sn2.local_GP->put;
1820 if (put == msg_number) 2047 if (put == msg_number)
1821 xpc_send_msgs_sn2(ch, put); 2048 xpc_send_msgs_sn2(ch, put);
1822 2049
@@ -1833,6 +2060,7 @@ out_1:
1833static void 2060static void
1834xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) 2061xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
1835{ 2062{
2063 struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
1836 struct xpc_msg *msg; 2064 struct xpc_msg *msg;
1837 s64 get = initial_get + 1; 2065 s64 get = initial_get + 1;
1838 int send_IPI = 0; 2066 int send_IPI = 0;
@@ -1840,7 +2068,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
1840 while (1) { 2068 while (1) {
1841 2069
1842 while (1) { 2070 while (1) {
1843 if (get == ch->w_local_GP.get) 2071 if (get == ch_sn2->w_local_GP.get)
1844 break; 2072 break;
1845 2073
1846 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + 2074 msg = (struct xpc_msg *)((u64)ch->remote_msgqueue +
@@ -1859,10 +2087,10 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
1859 break; 2087 break;
1860 } 2088 }
1861 2089
1862 if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != 2090 if (cmpxchg_rel(&ch_sn2->local_GP->get, initial_get, get) !=
1863 initial_get) { 2091 initial_get) {
1864 /* someone else beat us to it */ 2092 /* someone else beat us to it */
1865 DBUG_ON(ch->local_GP->get <= initial_get); 2093 DBUG_ON(ch_sn2->local_GP->get <= initial_get);
1866 break; 2094 break;
1867 } 2095 }
1868 2096
@@ -1882,7 +2110,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags)
1882 } 2110 }
1883 2111
1884 if (send_IPI) 2112 if (send_IPI)
1885 xpc_IPI_send_msgrequest_sn2(ch); 2113 xpc_send_channel_msgrequest_sn2(ch);
1886} 2114}
1887 2115
1888static void 2116static void
@@ -1902,7 +2130,7 @@ xpc_received_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg)
1902 2130
1903 /* 2131 /*
1904 * The preceding store of msg->flags must occur before the following 2132 * The preceding store of msg->flags must occur before the following
1905 * load of ch->local_GP->get. 2133 * load of local_GP->get.
1906 */ 2134 */
1907 mb(); 2135 mb();
1908 2136
@@ -1910,7 +2138,7 @@ xpc_received_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg)
1910 * See if this message is next in line to be acknowledged as having 2138 * See if this message is next in line to be acknowledged as having
1911 * been delivered. 2139 * been delivered.
1912 */ 2140 */
1913 get = ch->local_GP->get; 2141 get = ch->sn.sn2.local_GP->get;
1914 if (get == msg_number) 2142 if (get == msg_number)
1915 xpc_acknowledge_msgs_sn2(ch, get, msg->flags); 2143 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
1916} 2144}
@@ -1928,36 +2156,35 @@ xpc_init_sn2(void)
1928 xpc_heartbeat_exit = xpc_heartbeat_exit_sn2; 2156 xpc_heartbeat_exit = xpc_heartbeat_exit_sn2;
1929 xpc_check_remote_hb = xpc_check_remote_hb_sn2; 2157 xpc_check_remote_hb = xpc_check_remote_hb_sn2;
1930 2158
1931 xpc_initiate_partition_activation = 2159 xpc_request_partition_activation = xpc_request_partition_activation_sn2;
1932 xpc_initiate_partition_activation_sn2; 2160 xpc_request_partition_reactivation =
2161 xpc_request_partition_reactivation_sn2;
2162 xpc_request_partition_deactivation =
2163 xpc_request_partition_deactivation_sn2;
2164 xpc_cancel_partition_deactivation_request =
2165 xpc_cancel_partition_deactivation_request_sn2;
2166
1933 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2; 2167 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
1934 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; 2168 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2;
1935 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; 2169 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2;
1936 xpc_make_first_contact = xpc_make_first_contact_sn2; 2170 xpc_make_first_contact = xpc_make_first_contact_sn2;
1937 xpc_get_IPI_flags = xpc_get_IPI_flags_sn2; 2171 xpc_get_IPI_flags = xpc_get_IPI_flags_sn2;
2172 xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2;
2173 xpc_process_msg_IPI = xpc_process_msg_IPI_sn2;
2174 xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2;
1938 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; 2175 xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2;
1939 2176
1940 xpc_mark_partition_engaged = xpc_mark_partition_engaged_sn2; 2177 xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2;
1941 xpc_mark_partition_disengaged = xpc_mark_partition_disengaged_sn2;
1942 xpc_request_partition_disengage = xpc_request_partition_disengage_sn2;
1943 xpc_cancel_partition_disengage_request =
1944 xpc_cancel_partition_disengage_request_sn2;
1945 xpc_partition_engaged = xpc_partition_engaged_sn2; 2178 xpc_partition_engaged = xpc_partition_engaged_sn2;
1946 xpc_partition_disengage_requested = 2179 xpc_any_partition_engaged = xpc_any_partition_engaged_sn2;
1947 xpc_partition_disengage_requested_sn2; 2180 xpc_indicate_partition_disengaged =
1948 xpc_clear_partition_engaged = xpc_clear_partition_engaged_sn2; 2181 xpc_indicate_partition_disengaged_sn2;
1949 xpc_clear_partition_disengage_request = 2182 xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2;
1950 xpc_clear_partition_disengage_request_sn2; 2183
1951 2184 xpc_send_channel_closerequest = xpc_send_channel_closerequest_sn2;
1952 xpc_IPI_send_local_activate = xpc_IPI_send_local_activate_sn2; 2185 xpc_send_channel_closereply = xpc_send_channel_closereply_sn2;
1953 xpc_IPI_send_activated = xpc_IPI_send_activated_sn2; 2186 xpc_send_channel_openrequest = xpc_send_channel_openrequest_sn2;
1954 xpc_IPI_send_local_reactivate = xpc_IPI_send_local_reactivate_sn2; 2187 xpc_send_channel_openreply = xpc_send_channel_openreply_sn2;
1955 xpc_IPI_send_disengage = xpc_IPI_send_disengage_sn2;
1956
1957 xpc_IPI_send_closerequest = xpc_IPI_send_closerequest_sn2;
1958 xpc_IPI_send_closereply = xpc_IPI_send_closereply_sn2;
1959 xpc_IPI_send_openrequest = xpc_IPI_send_openrequest_sn2;
1960 xpc_IPI_send_openreply = xpc_IPI_send_openreply_sn2;
1961 2188
1962 xpc_send_msg = xpc_send_msg_sn2; 2189 xpc_send_msg = xpc_send_msg_sn2;
1963 xpc_received_msg = xpc_received_msg_sn2; 2190 xpc_received_msg = xpc_received_msg_sn2;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 32c577b8d0df..c53b229cb04e 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -63,8 +63,8 @@ xpc_heartbeat_exit_uv(void)
63} 63}
64 64
65static void 65static void
66xpc_initiate_partition_activation_uv(struct xpc_rsvd_page *remote_rp, 66xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
67 u64 remote_rp_pa, int nasid) 67 u64 remote_rp_pa, int nasid)
68{ 68{
69 short partid = remote_rp->SAL_partid; 69 short partid = remote_rp->SAL_partid;
70 struct xpc_partition *part = &xpc_partitions[partid]; 70 struct xpc_partition *part = &xpc_partitions[partid];
@@ -78,6 +78,12 @@ xpc_initiate_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
78 xpc_IPI_send_local_activate_uv(part); 78 xpc_IPI_send_local_activate_uv(part);
79} 79}
80 80
81static void
82xpc_request_partition_reactivation_uv(struct xpc_partition *part)
83{
84 xpc_IPI_send_local_activate_uv(part);
85}
86
81/* 87/*
82 * Setup the infrastructure necessary to support XPartition Communication 88 * Setup the infrastructure necessary to support XPartition Communication
83 * between the specified remote partition and the local one. 89 * between the specified remote partition and the local one.
@@ -128,8 +134,9 @@ xpc_init_uv(void)
128 xpc_increment_heartbeat = xpc_increment_heartbeat_uv; 134 xpc_increment_heartbeat = xpc_increment_heartbeat_uv;
129 xpc_heartbeat_init = xpc_heartbeat_init_uv; 135 xpc_heartbeat_init = xpc_heartbeat_init_uv;
130 xpc_heartbeat_exit = xpc_heartbeat_exit_uv; 136 xpc_heartbeat_exit = xpc_heartbeat_exit_uv;
131 xpc_initiate_partition_activation = 137 xpc_request_partition_activation = xpc_request_partition_activation_uv;
132 xpc_initiate_partition_activation_uv; 138 xpc_request_partition_reactivation =
139 xpc_request_partition_reactivation_uv;
133 xpc_setup_infrastructure = xpc_setup_infrastructure_uv; 140 xpc_setup_infrastructure = xpc_setup_infrastructure_uv;
134 xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv; 141 xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv;
135 xpc_make_first_contact = xpc_make_first_contact_uv; 142 xpc_make_first_contact = xpc_make_first_contact_uv;