aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn/kernel/xpc.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/kernel/xpc.h')
-rw-r--r--arch/ia64/sn/kernel/xpc.h366
1 files changed, 324 insertions, 42 deletions
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h
index e5f5a4e51f70..fbcedc7c27fa 100644
--- a/arch/ia64/sn/kernel/xpc.h
+++ b/arch/ia64/sn/kernel/xpc.h
@@ -57,7 +57,7 @@
57#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) 57#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
58 58
59#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ 59#define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */
60#define XPC_HB_CHECK_DEFAULT_TIMEOUT 20 /* check HB every x secs */ 60#define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */
61 61
62/* define the process name of HB checker and the CPU it is pinned to */ 62/* define the process name of HB checker and the CPU it is pinned to */
63#define XPC_HB_CHECK_THREAD_NAME "xpc_hb" 63#define XPC_HB_CHECK_THREAD_NAME "xpc_hb"
@@ -67,34 +67,82 @@
67#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery" 67#define XPC_DISCOVERY_THREAD_NAME "xpc_discovery"
68 68
69 69
70#define XPC_HB_ALLOWED(_p, _v) ((_v)->heartbeating_to_mask & (1UL << (_p)))
71#define XPC_ALLOW_HB(_p, _v) (_v)->heartbeating_to_mask |= (1UL << (_p))
72#define XPC_DISALLOW_HB(_p, _v) (_v)->heartbeating_to_mask &= (~(1UL << (_p)))
73
74
75/* 70/*
76 * Reserved Page provided by SAL. 71 * the reserved page
72 *
73 * SAL reserves one page of memory per partition for XPC. Though a full page
74 * in length (16384 bytes), its starting address is not page aligned, but it
75 * is cacheline aligned. The reserved page consists of the following:
76 *
77 * reserved page header
78 *
79 * The first cacheline of the reserved page contains the header
80 * (struct xpc_rsvd_page). Before SAL initialization has completed,
81 * SAL has set up the following fields of the reserved page header:
82 * SAL_signature, SAL_version, partid, and nasids_size. The other
83 * fields are set up by XPC. (xpc_rsvd_page points to the local
84 * partition's reserved page.)
77 * 85 *
78 * SAL provides one page per partition of reserved memory. When SAL 86 * part_nasids mask
79 * initialization is complete, SAL_signature, SAL_version, partid, 87 * mach_nasids mask
80 * part_nasids, and mach_nasids are set. 88 *
89 * SAL also sets up two bitmaps (or masks), one that reflects the actual
90 * nasids in this partition (part_nasids), and the other that reflects
91 * the actual nasids in the entire machine (mach_nasids). We're only
92 * interested in the even numbered nasids (which contain the processors
93 * and/or memory), so we only need half as many bits to represent the
94 * nasids. The part_nasids mask is located starting at the first cacheline
95 * following the reserved page header. The mach_nasids mask follows right
96 * after the part_nasids mask. The size in bytes of each mask is reflected
97 * by the reserved page header field 'nasids_size'. (Local partition's
98 * mask pointers are xpc_part_nasids and xpc_mach_nasids.)
99 *
100 * vars
101 * vars part
102 *
103 * Immediately following the mach_nasids mask are the XPC variables
104 * required by other partitions. First are those that are generic to all
105 * partitions (vars), followed on the next available cacheline by those
106 * which are partition specific (vars part). These are setup by XPC.
107 * (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
81 * 108 *
82 * Note: Until vars_pa is set, the partition XPC code has not been initialized. 109 * Note: Until vars_pa is set, the partition XPC code has not been initialized.
83 */ 110 */
84struct xpc_rsvd_page { 111struct xpc_rsvd_page {
85 u64 SAL_signature; /* SAL unique signature */ 112 u64 SAL_signature; /* SAL: unique signature */
86 u64 SAL_version; /* SAL specified version */ 113 u64 SAL_version; /* SAL: version */
87 u8 partid; /* partition ID from SAL */ 114 u8 partid; /* SAL: partition ID */
88 u8 version; 115 u8 version;
89 u8 pad[6]; /* pad to u64 align */ 116 u8 pad1[6]; /* align to next u64 in cacheline */
90 volatile u64 vars_pa; 117 volatile u64 vars_pa;
91 u64 part_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; 118 struct timespec stamp; /* time when reserved page was setup by XPC */
92 u64 mach_nasids[XP_NASID_MASK_WORDS] ____cacheline_aligned; 119 u64 pad2[9]; /* align to last u64 in cacheline */
120 u64 nasids_size; /* SAL: size of each nasid mask in bytes */
93}; 121};
94#define XPC_RP_VERSION _XPC_VERSION(1,0) /* version 1.0 of the reserved page */
95 122
96#define XPC_RSVD_PAGE_ALIGNED_SIZE \ 123#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
97 (L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))) 124
125#define XPC_SUPPORTS_RP_STAMP(_version) \
126 (_version >= _XPC_VERSION(1,1))
127
128/*
129 * compare stamps - the return value is:
130 *
131 * < 0, if stamp1 < stamp2
132 * = 0, if stamp1 == stamp2
133 * > 0, if stamp1 > stamp2
134 */
135static inline int
136xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
137{
138 int ret;
139
140
141 if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
142 ret = stamp1->tv_nsec - stamp2->tv_nsec;
143 }
144 return ret;
145}
98 146
99 147
100/* 148/*
@@ -121,11 +169,58 @@ struct xpc_vars {
121 u64 vars_part_pa; 169 u64 vars_part_pa;
122 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ 170 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */
123 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 171 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */
124 AMO_t *act_amos; /* pointer to the first activation AMO */
125}; 172};
126#define XPC_V_VERSION _XPC_VERSION(3,0) /* version 3.0 of the cross vars */
127 173
128#define XPC_VARS_ALIGNED_SIZE (L1_CACHE_ALIGN(sizeof(struct xpc_vars))) 174#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
175
176#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
177 (_version >= _XPC_VERSION(3,1))
178
179
180static inline int
181xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
182{
183 return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
184}
185
186static inline void
187xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
188{
189 u64 old_mask, new_mask;
190
191 do {
192 old_mask = vars->heartbeating_to_mask;
193 new_mask = (old_mask | (1UL << partid));
194 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
195 old_mask);
196}
197
198static inline void
199xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
200{
201 u64 old_mask, new_mask;
202
203 do {
204 old_mask = vars->heartbeating_to_mask;
205 new_mask = (old_mask & ~(1UL << partid));
206 } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
207 old_mask);
208}
209
210
211/*
212 * The AMOs page consists of a number of AMO variables which are divided into
213 * four groups, The first two groups are used to identify an IRQ's sender.
214 * These two groups consist of 64 and 128 AMO variables respectively. The last
215 * two groups, consisting of just one AMO variable each, are used to identify
216 * the remote partitions that are currently engaged (from the viewpoint of
217 * the XPC running on the remote partition).
218 */
219#define XPC_NOTIFY_IRQ_AMOS 0
220#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
221#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
222#define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1)
223
129 224
130/* 225/*
131 * The following structure describes the per partition specific variables. 226 * The following structure describes the per partition specific variables.
@@ -165,6 +260,16 @@ struct xpc_vars_part {
165#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ 260#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */
166 261
167 262
263/* the reserved page sizes and offsets */
264
265#define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
266#define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars))
267
268#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
269#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
270#define XPC_RP_VARS(_rp) ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
271#define XPC_RP_VARS_PART(_rp) (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
272
168 273
169/* 274/*
170 * Functions registered by add_timer() or called by kernel_thread() only 275 * Functions registered by add_timer() or called by kernel_thread() only
@@ -349,6 +454,9 @@ struct xpc_channel {
349 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ 454 atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */
350 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */ 455 wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
351 456
457 u8 delayed_IPI_flags; /* IPI flags received, but delayed */
458 /* action until channel disconnected */
459
352 /* queue of msg senders who want to be notified when msg received */ 460 /* queue of msg senders who want to be notified when msg received */
353 461
354 atomic_t n_to_notify; /* #of msg senders to notify */ 462 atomic_t n_to_notify; /* #of msg senders to notify */
@@ -358,7 +466,7 @@ struct xpc_channel {
358 void *key; /* pointer to user's key */ 466 void *key; /* pointer to user's key */
359 467
360 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */ 468 struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
361 struct semaphore teardown_sema; /* wait for teardown completion */ 469 struct semaphore wdisconnect_sema; /* wait for channel disconnect */
362 470
363 struct xpc_openclose_args *local_openclose_args; /* args passed on */ 471 struct xpc_openclose_args *local_openclose_args; /* args passed on */
364 /* opening or closing of channel */ 472 /* opening or closing of channel */
@@ -410,6 +518,8 @@ struct xpc_channel {
410 518
411#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */ 519#define XPC_C_DISCONNECTED 0x00002000 /* channel is disconnected */
412#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */ 520#define XPC_C_DISCONNECTING 0x00004000 /* channel is being disconnected */
521#define XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */
522#define XPC_C_WDISCONNECT 0x00010000 /* waiting for channel disconnect */
413 523
414 524
415 525
@@ -422,6 +532,8 @@ struct xpc_partition {
422 532
423 /* XPC HB infrastructure */ 533 /* XPC HB infrastructure */
424 534
535 u8 remote_rp_version; /* version# of partition's rsvd pg */
536 struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
425 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ 537 u64 remote_rp_pa; /* phys addr of partition's rsvd pg */
426 u64 remote_vars_pa; /* phys addr of partition's vars */ 538 u64 remote_vars_pa; /* phys addr of partition's vars */
427 u64 remote_vars_part_pa; /* phys addr of partition's vars part */ 539 u64 remote_vars_part_pa; /* phys addr of partition's vars part */
@@ -432,14 +544,18 @@ struct xpc_partition {
432 u32 act_IRQ_rcvd; /* IRQs since activation */ 544 u32 act_IRQ_rcvd; /* IRQs since activation */
433 spinlock_t act_lock; /* protect updating of act_state */ 545 spinlock_t act_lock; /* protect updating of act_state */
434 u8 act_state; /* from XPC HB viewpoint */ 546 u8 act_state; /* from XPC HB viewpoint */
547 u8 remote_vars_version; /* version# of partition's vars */
435 enum xpc_retval reason; /* reason partition is deactivating */ 548 enum xpc_retval reason; /* reason partition is deactivating */
436 int reason_line; /* line# deactivation initiated from */ 549 int reason_line; /* line# deactivation initiated from */
437 int reactivate_nasid; /* nasid in partition to reactivate */ 550 int reactivate_nasid; /* nasid in partition to reactivate */
438 551
552 unsigned long disengage_request_timeout; /* timeout in jiffies */
553 struct timer_list disengage_request_timer;
554
439 555
440 /* XPC infrastructure referencing and teardown control */ 556 /* XPC infrastructure referencing and teardown control */
441 557
442 volatile u8 setup_state; /* infrastructure setup state */ 558 volatile u8 setup_state; /* infrastructure setup state */
443 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */ 559 wait_queue_head_t teardown_wq; /* kthread waiting to teardown infra */
444 atomic_t references; /* #of references to infrastructure */ 560 atomic_t references; /* #of references to infrastructure */
445 561
@@ -454,6 +570,7 @@ struct xpc_partition {
454 570
455 u8 nchannels; /* #of defined channels supported */ 571 u8 nchannels; /* #of defined channels supported */
456 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */ 572 atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
573 atomic_t nchannels_engaged;/* #of channels engaged with remote part */
457 struct xpc_channel *channels;/* array of channel structures */ 574 struct xpc_channel *channels;/* array of channel structures */
458 575
459 void *local_GPs_base; /* base address of kmalloc'd space */ 576 void *local_GPs_base; /* base address of kmalloc'd space */
@@ -518,6 +635,7 @@ struct xpc_partition {
518#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */ 635#define XPC_P_TORNDOWN 0x03 /* infrastructure is torndown */
519 636
520 637
638
521/* 639/*
522 * struct xpc_partition IPI_timer #of seconds to wait before checking for 640 * struct xpc_partition IPI_timer #of seconds to wait before checking for
523 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until 641 * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
@@ -526,6 +644,13 @@ struct xpc_partition {
526#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ) 644#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
527 645
528 646
647/* number of seconds to wait for other partitions to disengage */
648#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT 90
649
650/* interval in seconds to print 'waiting disengagement' messages */
651#define XPC_DISENGAGE_PRINTMSG_INTERVAL 10
652
653
529#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0])) 654#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
530 655
531 656
@@ -534,24 +659,20 @@ struct xpc_partition {
534extern struct xpc_registration xpc_registrations[]; 659extern struct xpc_registration xpc_registrations[];
535 660
536 661
537/* >>> found in xpc_main.c only */ 662/* found in xpc_main.c */
538extern struct device *xpc_part; 663extern struct device *xpc_part;
539extern struct device *xpc_chan; 664extern struct device *xpc_chan;
665extern int xpc_disengage_request_timelimit;
540extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *); 666extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
541extern void xpc_dropped_IPI_check(struct xpc_partition *); 667extern void xpc_dropped_IPI_check(struct xpc_partition *);
668extern void xpc_activate_partition(struct xpc_partition *);
542extern void xpc_activate_kthreads(struct xpc_channel *, int); 669extern void xpc_activate_kthreads(struct xpc_channel *, int);
543extern void xpc_create_kthreads(struct xpc_channel *, int); 670extern void xpc_create_kthreads(struct xpc_channel *, int);
544extern void xpc_disconnect_wait(int); 671extern void xpc_disconnect_wait(int);
545 672
546 673
547/* found in xpc_main.c and efi-xpc.c */
548extern void xpc_activate_partition(struct xpc_partition *);
549
550
551/* found in xpc_partition.c */ 674/* found in xpc_partition.c */
552extern int xpc_exiting; 675extern int xpc_exiting;
553extern int xpc_hb_interval;
554extern int xpc_hb_check_interval;
555extern struct xpc_vars *xpc_vars; 676extern struct xpc_vars *xpc_vars;
556extern struct xpc_rsvd_page *xpc_rsvd_page; 677extern struct xpc_rsvd_page *xpc_rsvd_page;
557extern struct xpc_vars_part *xpc_vars_part; 678extern struct xpc_vars_part *xpc_vars_part;
@@ -561,6 +682,7 @@ extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
561extern void xpc_allow_IPI_ops(void); 682extern void xpc_allow_IPI_ops(void);
562extern void xpc_restrict_IPI_ops(void); 683extern void xpc_restrict_IPI_ops(void);
563extern int xpc_identify_act_IRQ_sender(void); 684extern int xpc_identify_act_IRQ_sender(void);
685extern int xpc_partition_disengaged(struct xpc_partition *);
564extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); 686extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
565extern void xpc_mark_partition_inactive(struct xpc_partition *); 687extern void xpc_mark_partition_inactive(struct xpc_partition *);
566extern void xpc_discovery(void); 688extern void xpc_discovery(void);
@@ -585,8 +707,8 @@ extern void xpc_connected_callout(struct xpc_channel *);
585extern void xpc_deliver_msg(struct xpc_channel *); 707extern void xpc_deliver_msg(struct xpc_channel *);
586extern void xpc_disconnect_channel(const int, struct xpc_channel *, 708extern void xpc_disconnect_channel(const int, struct xpc_channel *,
587 enum xpc_retval, unsigned long *); 709 enum xpc_retval, unsigned long *);
588extern void xpc_disconnected_callout(struct xpc_channel *); 710extern void xpc_disconnecting_callout(struct xpc_channel *);
589extern void xpc_partition_down(struct xpc_partition *, enum xpc_retval); 711extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
590extern void xpc_teardown_infrastructure(struct xpc_partition *); 712extern void xpc_teardown_infrastructure(struct xpc_partition *);
591 713
592 714
@@ -674,6 +796,157 @@ xpc_part_ref(struct xpc_partition *part)
674 796
675 797
676/* 798/*
799 * This next set of inlines are used to keep track of when a partition is
800 * potentially engaged in accessing memory belonging to another partition.
801 */
802
803static inline void
804xpc_mark_partition_engaged(struct xpc_partition *part)
805{
806 unsigned long irq_flags;
807 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
808 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
809
810
811 local_irq_save(irq_flags);
812
813 /* set bit corresponding to our partid in remote partition's AMO */
814 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
815 (1UL << sn_partition_id));
816 /*
817 * We must always use the nofault function regardless of whether we
818 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
819 * didn't, we'd never know that the other partition is down and would
820 * keep sending IPIs and AMOs to it until the heartbeat times out.
821 */
822 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
823 variable), xp_nofault_PIOR_target));
824
825 local_irq_restore(irq_flags);
826}
827
828static inline void
829xpc_mark_partition_disengaged(struct xpc_partition *part)
830{
831 unsigned long irq_flags;
832 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
833 (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
834
835
836 local_irq_save(irq_flags);
837
838 /* clear bit corresponding to our partid in remote partition's AMO */
839 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
840 ~(1UL << sn_partition_id));
841 /*
842 * We must always use the nofault function regardless of whether we
843 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
844 * didn't, we'd never know that the other partition is down and would
845 * keep sending IPIs and AMOs to it until the heartbeat times out.
846 */
847 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
848 variable), xp_nofault_PIOR_target));
849
850 local_irq_restore(irq_flags);
851}
852
853static inline void
854xpc_request_partition_disengage(struct xpc_partition *part)
855{
856 unsigned long irq_flags;
857 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
858 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
859
860
861 local_irq_save(irq_flags);
862
863 /* set bit corresponding to our partid in remote partition's AMO */
864 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
865 (1UL << sn_partition_id));
866 /*
867 * We must always use the nofault function regardless of whether we
868 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
869 * didn't, we'd never know that the other partition is down and would
870 * keep sending IPIs and AMOs to it until the heartbeat times out.
871 */
872 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
873 variable), xp_nofault_PIOR_target));
874
875 local_irq_restore(irq_flags);
876}
877
878static inline void
879xpc_cancel_partition_disengage_request(struct xpc_partition *part)
880{
881 unsigned long irq_flags;
882 AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
883 (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
884
885
886 local_irq_save(irq_flags);
887
888 /* clear bit corresponding to our partid in remote partition's AMO */
889 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
890 ~(1UL << sn_partition_id));
891 /*
892 * We must always use the nofault function regardless of whether we
893 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
894 * didn't, we'd never know that the other partition is down and would
895 * keep sending IPIs and AMOs to it until the heartbeat times out.
896 */
897 (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
898 variable), xp_nofault_PIOR_target));
899
900 local_irq_restore(irq_flags);
901}
902
903static inline u64
904xpc_partition_engaged(u64 partid_mask)
905{
906 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
907
908
909 /* return our partition's AMO variable ANDed with partid_mask */
910 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
911 partid_mask);
912}
913
914static inline u64
915xpc_partition_disengage_requested(u64 partid_mask)
916{
917 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
918
919
920 /* return our partition's AMO variable ANDed with partid_mask */
921 return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
922 partid_mask);
923}
924
925static inline void
926xpc_clear_partition_engaged(u64 partid_mask)
927{
928 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
929
930
931 /* clear bit(s) based on partid_mask in our partition's AMO */
932 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
933 ~partid_mask);
934}
935
936static inline void
937xpc_clear_partition_disengage_request(u64 partid_mask)
938{
939 AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
940
941
942 /* clear bit(s) based on partid_mask in our partition's AMO */
943 FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
944 ~partid_mask);
945}
946
947
948
949/*
677 * The following set of macros and inlines are used for the sending and 950 * The following set of macros and inlines are used for the sending and
678 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 951 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
679 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and 952 * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
@@ -722,13 +995,13 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
722 * Flag the appropriate AMO variable and send an IPI to the specified node. 995 * Flag the appropriate AMO variable and send an IPI to the specified node.
723 */ 996 */
724static inline void 997static inline void
725xpc_activate_IRQ_send(u64 amos_page, int from_nasid, int to_nasid, 998xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
726 int to_phys_cpuid) 999 int to_phys_cpuid)
727{ 1000{
728 int w_index = XPC_NASID_W_INDEX(from_nasid); 1001 int w_index = XPC_NASID_W_INDEX(from_nasid);
729 int b_index = XPC_NASID_B_INDEX(from_nasid); 1002 int b_index = XPC_NASID_B_INDEX(from_nasid);
730 AMO_t *amos = (AMO_t *) __va(amos_page + 1003 AMO_t *amos = (AMO_t *) __va(amos_page_pa +
731 (XP_MAX_PARTITIONS * sizeof(AMO_t))); 1004 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
732 1005
733 1006
734 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid, 1007 (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
@@ -756,6 +1029,13 @@ xpc_IPI_send_reactivate(struct xpc_partition *part)
756 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid); 1029 xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
757} 1030}
758 1031
1032static inline void
1033xpc_IPI_send_disengage(struct xpc_partition *part)
1034{
1035 xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
1036 part->remote_act_nasid, part->remote_act_phys_cpuid);
1037}
1038
759 1039
760/* 1040/*
761 * IPIs associated with SGI_XPC_NOTIFY IRQ. 1041 * IPIs associated with SGI_XPC_NOTIFY IRQ.
@@ -836,6 +1116,7 @@ xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
836 1116
837/* given an AMO variable and a channel#, get its associated IPI flags */ 1117/* given an AMO variable and a channel#, get its associated IPI flags */
838#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff)) 1118#define XPC_GET_IPI_FLAGS(_amo, _c) ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
1119#define XPC_SET_IPI_FLAGS(_amo, _c, _f) (_amo) |= ((u64) (_f) << ((_c) * 8))
839 1120
840#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f) 1121#define XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
841#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010) 1122#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo) ((_amo) & 0x1010101010101010)
@@ -903,17 +1184,18 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
903 * cacheable mapping for the entire region. This will prevent speculative 1184 * cacheable mapping for the entire region. This will prevent speculative
904 * reading of cached copies of our lines from being issued which will cause 1185 * reading of cached copies of our lines from being issued which will cause
905 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 1186 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
906 * (XP_MAX_PARTITIONS) AMO variables for message notification (xpc_main.c) 1187 * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
907 * and an additional 16 AMO variables for partition activation (xpc_hb.c). 1188 * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
1189 * activation and 2 AMO variables for partition deactivation.
908 */ 1190 */
909static inline AMO_t * 1191static inline AMO_t *
910xpc_IPI_init(partid_t partid) 1192xpc_IPI_init(int index)
911{ 1193{
912 AMO_t *part_amo = xpc_vars->amos_page + partid; 1194 AMO_t *amo = xpc_vars->amos_page + index;
913 1195
914 1196
915 xpc_IPI_receive(part_amo); 1197 (void) xpc_IPI_receive(amo); /* clear AMO variable */
916 return part_amo; 1198 return amo;
917} 1199}
918 1200
919 1201