aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-xp')
-rw-r--r--drivers/misc/sgi-xp/xp_sn2.c2
-rw-r--r--drivers/misc/sgi-xp/xpc.h24
-rw-r--r--drivers/misc/sgi-xp/xpc_main.c16
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c139
4 files changed, 93 insertions, 88 deletions
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c
index 3d553fa73f4d..1fcfdebca2c5 100644
--- a/drivers/misc/sgi-xp/xp_sn2.c
+++ b/drivers/misc/sgi-xp/xp_sn2.c
@@ -32,7 +32,7 @@ EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
32 * If the PIO read times out, the MCA handler will consume the error and 32 * If the PIO read times out, the MCA handler will consume the error and
33 * return to a kernel-provided instruction to indicate an error. This PIO read 33 * return to a kernel-provided instruction to indicate an error. This PIO read
34 * exists because it is guaranteed to timeout if the destination is down 34 * exists because it is guaranteed to timeout if the destination is down
35 * (AMO operations do not timeout on at least some CPUs on Shubs <= v1.2, 35 * (amo operations do not timeout on at least some CPUs on Shubs <= v1.2,
36 * which unfortunately we have to work around). 36 * which unfortunately we have to work around).
37 */ 37 */
38static enum xp_retval 38static enum xp_retval
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 26a1725f68ad..da2680892dfa 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -38,8 +38,8 @@
38/* 38/*
39 * The next macros define word or bit representations for given 39 * The next macros define word or bit representations for given
40 * C-brick nasid in either the SAL provided bit array representing 40 * C-brick nasid in either the SAL provided bit array representing
41 * nasids in the partition/machine or the AMO_t array used for 41 * nasids in the partition/machine or the array of amo structures used
42 * inter-partition initiation communications. 42 * for inter-partition initiation communications.
43 * 43 *
44 * For SN2 machines, C-Bricks are alway even numbered NASIDs. As 44 * For SN2 machines, C-Bricks are alway even numbered NASIDs. As
45 * such, some space will be saved by insisting that nasid information 45 * such, some space will be saved by insisting that nasid information
@@ -144,8 +144,8 @@ struct xpc_vars_sn2 {
144 int activate_IRQ_nasid; 144 int activate_IRQ_nasid;
145 int activate_IRQ_phys_cpuid; 145 int activate_IRQ_phys_cpuid;
146 u64 vars_part_pa; 146 u64 vars_part_pa;
147 u64 amos_page_pa; /* paddr of page of AMOs from MSPEC driver */ 147 u64 amos_page_pa; /* paddr of page of amos from MSPEC driver */
148 AMO_t *amos_page; /* vaddr of page of AMOs from MSPEC driver */ 148 struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */
149}; 149};
150 150
151#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */ 151#define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */
@@ -153,17 +153,17 @@ struct xpc_vars_sn2 {
153/* 153/*
154 * The following pertains to ia64-sn2 only. 154 * The following pertains to ia64-sn2 only.
155 * 155 *
156 * Memory for XPC's AMO variables is allocated by the MSPEC driver. These 156 * Memory for XPC's amo variables is allocated by the MSPEC driver. These
157 * pages are located in the lowest granule. The lowest granule uses 4k pages 157 * pages are located in the lowest granule. The lowest granule uses 4k pages
158 * for cached references and an alternate TLB handler to never provide a 158 * for cached references and an alternate TLB handler to never provide a
159 * cacheable mapping for the entire region. This will prevent speculative 159 * cacheable mapping for the entire region. This will prevent speculative
160 * reading of cached copies of our lines from being issued which will cause 160 * reading of cached copies of our lines from being issued which will cause
161 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 161 * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
162 * AMO variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of 162 * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of
163 * NOTIFY IRQs, 128 AMO variables (based on XP_NASID_MASK_WORDS) to identify 163 * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS) to identify
164 * the senders of ACTIVATE IRQs, 1 AMO variable to identify which remote 164 * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote
165 * partitions (i.e., XPCs) consider themselves currently engaged with the 165 * partitions (i.e., XPCs) consider themselves currently engaged with the
166 * local XPC and 1 AMO variable to request partition deactivation. 166 * local XPC and 1 amo variable to request partition deactivation.
167 */ 167 */
168#define XPC_NOTIFY_IRQ_AMOS 0 168#define XPC_NOTIFY_IRQ_AMOS 0
169#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2) 169#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2)
@@ -186,7 +186,7 @@ struct xpc_vars_part_sn2 {
186 u64 openclose_args_pa; /* physical address of open and close args */ 186 u64 openclose_args_pa; /* physical address of open and close args */
187 u64 GPs_pa; /* physical address of Get/Put values */ 187 u64 GPs_pa; /* physical address of Get/Put values */
188 188
189 u64 chctl_amo_pa; /* physical address of chctl flags' AMO_t */ 189 u64 chctl_amo_pa; /* physical address of chctl flags' amo */
190 190
191 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ 191 int notify_IRQ_nasid; /* nasid of where to send notify IRQs */
192 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ 192 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
@@ -547,8 +547,8 @@ struct xpc_partition_sn2 {
547 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ 547 int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */
548 char notify_IRQ_owner[8]; /* notify IRQ's owner's name */ 548 char notify_IRQ_owner[8]; /* notify IRQ's owner's name */
549 549
550 AMO_t *remote_chctl_amo_va; /* address of remote chctl flags' AMO_t */ 550 struct amo *remote_chctl_amo_va; /* addr of remote chctl flags' amo */
551 AMO_t *local_chctl_amo_va; /* address of chctl flags' AMO_t */ 551 struct amo *local_chctl_amo_va; /* address of chctl flags' amo */
552 552
553 struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */ 553 struct timer_list dropped_notify_IRQ_timer; /* dropped IRQ timer */
554}; 554};
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
index 43f5b686ecf3..2934b4473001 100644
--- a/drivers/misc/sgi-xp/xpc_main.c
+++ b/drivers/misc/sgi-xp/xpc_main.c
@@ -26,16 +26,16 @@
26 * Caveats: 26 * Caveats:
27 * 27 *
28 * . Currently on sn2, we have no way to determine which nasid an IRQ 28 * . Currently on sn2, we have no way to determine which nasid an IRQ
29 * came from. Thus, xpc_send_IRQ_sn2() does a remote AMO write 29 * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
30 * followed by an IPI. The AMO indicates where data is to be pulled 30 * followed by an IPI. The amo indicates where data is to be pulled
31 * from, so after the IPI arrives, the remote partition checks the AMO 31 * from, so after the IPI arrives, the remote partition checks the amo
32 * word. The IPI can actually arrive before the AMO however, so other 32 * word. The IPI can actually arrive before the amo however, so other
33 * code must periodically check for this case. Also, remote AMO 33 * code must periodically check for this case. Also, remote amo
34 * operations do not reliably time out. Thus we do a remote PIO read 34 * operations do not reliably time out. Thus we do a remote PIO read
35 * solely to know whether the remote partition is down and whether we 35 * solely to know whether the remote partition is down and whether we
36 * should stop sending IPIs to it. This remote PIO read operation is 36 * should stop sending IPIs to it. This remote PIO read operation is
37 * set up in a special nofault region so SAL knows to ignore (and 37 * set up in a special nofault region so SAL knows to ignore (and
38 * cleanup) any errors due to the remote AMO write, PIO read, and/or 38 * cleanup) any errors due to the remote amo write, PIO read, and/or
39 * PIO write operations. 39 * PIO write operations.
40 * 40 *
41 * If/when new hardware solves this IPI problem, we should abandon 41 * If/when new hardware solves this IPI problem, we should abandon
@@ -302,7 +302,7 @@ xpc_hb_checker(void *ignore)
302 302
303 /* 303 /*
304 * We need to periodically recheck to ensure no 304 * We need to periodically recheck to ensure no
305 * IRQ/AMO pairs have been missed. That check 305 * IRQ/amo pairs have been missed. That check
306 * must always reset xpc_hb_check_timeout. 306 * must always reset xpc_hb_check_timeout.
307 */ 307 */
308 force_IRQ = 1; 308 force_IRQ = 1;
@@ -1034,7 +1034,7 @@ xpc_init(void)
1034 if (is_shub()) { 1034 if (is_shub()) {
1035 /* 1035 /*
1036 * The ia64-sn2 architecture supports at most 64 partitions. 1036 * The ia64-sn2 architecture supports at most 64 partitions.
1037 * And the inability to unregister remote AMOs restricts us 1037 * And the inability to unregister remote amos restricts us
1038 * further to only support exactly 64 partitions on this 1038 * further to only support exactly 64 partitions on this
1039 * architecture, no less. 1039 * architecture, no less.
1040 */ 1040 */
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 0fef7d86a5a2..01dd40ec2a89 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -111,13 +111,14 @@ xpc_disallow_IPI_ops_sn2(void)
111 */ 111 */
112 112
113static u64 113static u64
114xpc_receive_IRQ_amo_sn2(AMO_t *amo) 114xpc_receive_IRQ_amo_sn2(struct amo *amo)
115{ 115{
116 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); 116 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR);
117} 117}
118 118
119static enum xp_retval 119static enum xp_retval
120xpc_send_IRQ_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) 120xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid,
121 int vector)
121{ 122{
122 int ret = 0; 123 int ret = 0;
123 unsigned long irq_flags; 124 unsigned long irq_flags;
@@ -131,7 +132,7 @@ xpc_send_IRQ_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
131 * We must always use the nofault function regardless of whether we 132 * We must always use the nofault function regardless of whether we
132 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 133 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
133 * didn't, we'd never know that the other partition is down and would 134 * didn't, we'd never know that the other partition is down and would
134 * keep sending IRQs and AMOs to it until the heartbeat times out. 135 * keep sending IRQs and amos to it until the heartbeat times out.
135 */ 136 */
136 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable), 137 ret = xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
137 xp_nofault_PIOR_target)); 138 xp_nofault_PIOR_target));
@@ -141,12 +142,12 @@ xpc_send_IRQ_sn2(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
141 return ((ret == 0) ? xpSuccess : xpPioReadError); 142 return ((ret == 0) ? xpSuccess : xpPioReadError);
142} 143}
143 144
144static AMO_t * 145static struct amo *
145xpc_init_IRQ_amo_sn2(int index) 146xpc_init_IRQ_amo_sn2(int index)
146{ 147{
147 AMO_t *amo = xpc_vars->amos_page + index; 148 struct amo *amo = xpc_vars->amos_page + index;
148 149
149 (void)xpc_receive_IRQ_amo_sn2(amo); /* clear AMO variable */ 150 (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */
150 return amo; 151 return amo;
151} 152}
152 153
@@ -166,7 +167,7 @@ xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
166} 167}
167 168
168/* 169/*
169 * Flag the appropriate AMO variable and send an IRQ to the specified node. 170 * Flag the appropriate amo variable and send an IRQ to the specified node.
170 */ 171 */
171static void 172static void
172xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, 173xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid,
@@ -174,8 +175,9 @@ xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid,
174{ 175{
175 int w_index = XPC_NASID_W_INDEX(from_nasid); 176 int w_index = XPC_NASID_W_INDEX(from_nasid);
176 int b_index = XPC_NASID_B_INDEX(from_nasid); 177 int b_index = XPC_NASID_B_INDEX(from_nasid);
177 AMO_t *amos = (AMO_t *)__va(amos_page_pa + 178 struct amo *amos = (struct amo *)__va(amos_page_pa +
178 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); 179 (XPC_ACTIVATE_IRQ_AMOS *
180 sizeof(struct amo)));
179 181
180 (void)xpc_send_IRQ_sn2(&amos[w_index], (1UL << b_index), to_nasid, 182 (void)xpc_send_IRQ_sn2(&amos[w_index], (1UL << b_index), to_nasid,
181 to_phys_cpuid, SGI_XPC_ACTIVATE); 183 to_phys_cpuid, SGI_XPC_ACTIVATE);
@@ -186,8 +188,9 @@ xpc_send_local_activate_IRQ_sn2(int from_nasid)
186{ 188{
187 int w_index = XPC_NASID_W_INDEX(from_nasid); 189 int w_index = XPC_NASID_W_INDEX(from_nasid);
188 int b_index = XPC_NASID_B_INDEX(from_nasid); 190 int b_index = XPC_NASID_B_INDEX(from_nasid);
189 AMO_t *amos = (AMO_t *)__va(xpc_vars->amos_page_pa + 191 struct amo *amos = (struct amo *)__va(xpc_vars->amos_page_pa +
190 (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t))); 192 (XPC_ACTIVATE_IRQ_AMOS *
193 sizeof(struct amo)));
191 194
192 /* fake the sending and receipt of an activate IRQ from remote nasid */ 195 /* fake the sending and receipt of an activate IRQ from remote nasid */
193 FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR, 196 FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR,
@@ -227,7 +230,7 @@ xpc_check_for_sent_chctl_flags_sn2(struct xpc_partition *part)
227/* 230/*
228 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified 231 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
229 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more 232 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
230 * than one partition, we use an AMO_t structure per partition to indicate 233 * than one partition, we use an amo structure per partition to indicate
231 * whether a partition has sent an IRQ or not. If it has, then wake up the 234 * whether a partition has sent an IRQ or not. If it has, then wake up the
232 * associated kthread to handle it. 235 * associated kthread to handle it.
233 * 236 *
@@ -391,20 +394,20 @@ static void
391xpc_indicate_partition_engaged_sn2(struct xpc_partition *part) 394xpc_indicate_partition_engaged_sn2(struct xpc_partition *part)
392{ 395{
393 unsigned long irq_flags; 396 unsigned long irq_flags;
394 AMO_t *amo = (AMO_t *)__va(part->sn.sn2.remote_amos_page_pa + 397 struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
395 (XPC_ENGAGED_PARTITIONS_AMO * 398 (XPC_ENGAGED_PARTITIONS_AMO *
396 sizeof(AMO_t))); 399 sizeof(struct amo)));
397 400
398 local_irq_save(irq_flags); 401 local_irq_save(irq_flags);
399 402
400 /* set bit corresponding to our partid in remote partition's AMO */ 403 /* set bit corresponding to our partid in remote partition's amo */
401 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, 404 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
402 (1UL << sn_partition_id)); 405 (1UL << sn_partition_id));
403 /* 406 /*
404 * We must always use the nofault function regardless of whether we 407 * We must always use the nofault function regardless of whether we
405 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 408 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
406 * didn't, we'd never know that the other partition is down and would 409 * didn't, we'd never know that the other partition is down and would
407 * keep sending IRQs and AMOs to it until the heartbeat times out. 410 * keep sending IRQs and amos to it until the heartbeat times out.
408 */ 411 */
409 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 412 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
410 variable), 413 variable),
@@ -418,20 +421,20 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
418{ 421{
419 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 422 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
420 unsigned long irq_flags; 423 unsigned long irq_flags;
421 AMO_t *amo = (AMO_t *)__va(part_sn2->remote_amos_page_pa + 424 struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
422 (XPC_ENGAGED_PARTITIONS_AMO * 425 (XPC_ENGAGED_PARTITIONS_AMO *
423 sizeof(AMO_t))); 426 sizeof(struct amo)));
424 427
425 local_irq_save(irq_flags); 428 local_irq_save(irq_flags);
426 429
427 /* clear bit corresponding to our partid in remote partition's AMO */ 430 /* clear bit corresponding to our partid in remote partition's amo */
428 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 431 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
429 ~(1UL << sn_partition_id)); 432 ~(1UL << sn_partition_id));
430 /* 433 /*
431 * We must always use the nofault function regardless of whether we 434 * We must always use the nofault function regardless of whether we
432 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 435 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
433 * didn't, we'd never know that the other partition is down and would 436 * didn't, we'd never know that the other partition is down and would
434 * keep sending IRQs and AMOs to it until the heartbeat times out. 437 * keep sending IRQs and amos to it until the heartbeat times out.
435 */ 438 */
436 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 439 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
437 variable), 440 variable),
@@ -441,7 +444,7 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
441 444
442 /* 445 /*
443 * Send activate IRQ to get other side to see that we've cleared our 446 * Send activate IRQ to get other side to see that we've cleared our
444 * bit in their engaged partitions AMO. 447 * bit in their engaged partitions amo.
445 */ 448 */
446 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, 449 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
447 cnodeid_to_nasid(0), 450 cnodeid_to_nasid(0),
@@ -452,9 +455,9 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
452static int 455static int
453xpc_partition_engaged_sn2(short partid) 456xpc_partition_engaged_sn2(short partid)
454{ 457{
455 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 458 struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
456 459
457 /* our partition's AMO variable ANDed with partid mask */ 460 /* our partition's amo variable ANDed with partid mask */
458 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & 461 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
459 (1UL << partid)) != 0; 462 (1UL << partid)) != 0;
460} 463}
@@ -462,18 +465,18 @@ xpc_partition_engaged_sn2(short partid)
462static int 465static int
463xpc_any_partition_engaged_sn2(void) 466xpc_any_partition_engaged_sn2(void)
464{ 467{
465 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 468 struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
466 469
467 /* our partition's AMO variable */ 470 /* our partition's amo variable */
468 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; 471 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
469} 472}
470 473
471static void 474static void
472xpc_assume_partition_disengaged_sn2(short partid) 475xpc_assume_partition_disengaged_sn2(short partid)
473{ 476{
474 AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; 477 struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
475 478
476 /* clear bit(s) based on partid mask in our partition's AMO */ 479 /* clear bit(s) based on partid mask in our partition's amo */
477 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 480 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
478 ~(1UL << partid)); 481 ~(1UL << partid));
479} 482}
@@ -482,10 +485,10 @@ xpc_assume_partition_disengaged_sn2(short partid)
482static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; 485static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
483 486
484/* 487/*
485 * Change protections to allow AMO operations on non-Shub 1.1 systems. 488 * Change protections to allow amo operations on non-Shub 1.1 systems.
486 */ 489 */
487static enum xp_retval 490static enum xp_retval
488xpc_allow_AMO_ops_sn2(AMO_t *amos_page) 491xpc_allow_amo_ops_sn2(struct amo *amos_page)
489{ 492{
490 u64 nasid_array = 0; 493 u64 nasid_array = 0;
491 int ret; 494 int ret;
@@ -493,7 +496,7 @@ xpc_allow_AMO_ops_sn2(AMO_t *amos_page)
493 /* 496 /*
494 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST 497 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
495 * collides with memory operations. On those systems we call 498 * collides with memory operations. On those systems we call
496 * xpc_allow_AMO_ops_shub_wars_1_1_sn2() instead. 499 * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
497 */ 500 */
498 if (!enable_shub_wars_1_1()) { 501 if (!enable_shub_wars_1_1()) {
499 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE, 502 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
@@ -506,10 +509,10 @@ xpc_allow_AMO_ops_sn2(AMO_t *amos_page)
506} 509}
507 510
508/* 511/*
509 * Change protections to allow AMO operations on Shub 1.1 systems. 512 * Change protections to allow amo operations on Shub 1.1 systems.
510 */ 513 */
511static void 514static void
512xpc_allow_AMO_ops_shub_wars_1_1_sn2(void) 515xpc_allow_amo_ops_shub_wars_1_1_sn2(void)
513{ 516{
514 int node; 517 int node;
515 int nasid; 518 int nasid;
@@ -536,7 +539,7 @@ xpc_allow_AMO_ops_shub_wars_1_1_sn2(void)
536static enum xp_retval 539static enum xp_retval
537xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) 540xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
538{ 541{
539 AMO_t *amos_page; 542 struct amo *amos_page;
540 int i; 543 int i;
541 int ret; 544 int ret;
542 545
@@ -549,32 +552,32 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
549 XPC_RP_VARS_SIZE); 552 XPC_RP_VARS_SIZE);
550 553
551 /* 554 /*
552 * Before clearing xpc_vars, see if a page of AMOs had been previously 555 * Before clearing xpc_vars, see if a page of amos had been previously
553 * allocated. If not we'll need to allocate one and set permissions 556 * allocated. If not we'll need to allocate one and set permissions
554 * so that cross-partition AMOs are allowed. 557 * so that cross-partition amos are allowed.
555 * 558 *
556 * The allocated AMO page needs MCA reporting to remain disabled after 559 * The allocated amo page needs MCA reporting to remain disabled after
557 * XPC has unloaded. To make this work, we keep a copy of the pointer 560 * XPC has unloaded. To make this work, we keep a copy of the pointer
558 * to this page (i.e., amos_page) in the struct xpc_vars structure, 561 * to this page (i.e., amos_page) in the struct xpc_vars structure,
559 * which is pointed to by the reserved page, and re-use that saved copy 562 * which is pointed to by the reserved page, and re-use that saved copy
560 * on subsequent loads of XPC. This AMO page is never freed, and its 563 * on subsequent loads of XPC. This amo page is never freed, and its
561 * memory protections are never restricted. 564 * memory protections are never restricted.
562 */ 565 */
563 amos_page = xpc_vars->amos_page; 566 amos_page = xpc_vars->amos_page;
564 if (amos_page == NULL) { 567 if (amos_page == NULL) {
565 amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1)); 568 amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
566 if (amos_page == NULL) { 569 if (amos_page == NULL) {
567 dev_err(xpc_part, "can't allocate page of AMOs\n"); 570 dev_err(xpc_part, "can't allocate page of amos\n");
568 return xpNoMemory; 571 return xpNoMemory;
569 } 572 }
570 573
571 /* 574 /*
572 * Open up AMO-R/W to cpu. This is done on Shub 1.1 systems 575 * Open up amo-R/W to cpu. This is done on Shub 1.1 systems
573 * when xpc_allow_AMO_ops_shub_wars_1_1_sn2() is called. 576 * when xpc_allow_amo_ops_shub_wars_1_1_sn2() is called.
574 */ 577 */
575 ret = xpc_allow_AMO_ops_sn2(amos_page); 578 ret = xpc_allow_amo_ops_sn2(amos_page);
576 if (ret != xpSuccess) { 579 if (ret != xpSuccess) {
577 dev_err(xpc_part, "can't allow AMO operations\n"); 580 dev_err(xpc_part, "can't allow amo operations\n");
578 uncached_free_page(__IA64_UNCACHED_OFFSET | 581 uncached_free_page(__IA64_UNCACHED_OFFSET |
579 TO_PHYS((u64)amos_page), 1); 582 TO_PHYS((u64)amos_page), 1);
580 return ret; 583 return ret;
@@ -595,11 +598,11 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
595 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part_sn2) * 598 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part_sn2) *
596 xp_max_npartitions); 599 xp_max_npartitions);
597 600
598 /* initialize the activate IRQ related AMO variables */ 601 /* initialize the activate IRQ related amo variables */
599 for (i = 0; i < xp_nasid_mask_words; i++) 602 for (i = 0; i < xp_nasid_mask_words; i++)
600 (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS + i); 603 (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS + i);
601 604
602 /* initialize the engaged remote partitions related AMO variables */ 605 /* initialize the engaged remote partitions related amo variables */
603 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO); 606 (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO);
604 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO); 607 (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO);
605 608
@@ -745,19 +748,20 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
745{ 748{
746 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; 749 struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2;
747 unsigned long irq_flags; 750 unsigned long irq_flags;
748 AMO_t *amo = (AMO_t *)__va(part_sn2->remote_amos_page_pa + 751 struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa +
749 (XPC_DEACTIVATE_REQUEST_AMO * sizeof(AMO_t))); 752 (XPC_DEACTIVATE_REQUEST_AMO *
753 sizeof(struct amo)));
750 754
751 local_irq_save(irq_flags); 755 local_irq_save(irq_flags);
752 756
753 /* set bit corresponding to our partid in remote partition's AMO */ 757 /* set bit corresponding to our partid in remote partition's amo */
754 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, 758 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR,
755 (1UL << sn_partition_id)); 759 (1UL << sn_partition_id));
756 /* 760 /*
757 * We must always use the nofault function regardless of whether we 761 * We must always use the nofault function regardless of whether we
758 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 762 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
759 * didn't, we'd never know that the other partition is down and would 763 * didn't, we'd never know that the other partition is down and would
760 * keep sending IRQs and AMOs to it until the heartbeat times out. 764 * keep sending IRQs and amos to it until the heartbeat times out.
761 */ 765 */
762 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 766 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
763 variable), 767 variable),
@@ -767,7 +771,7 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part)
767 771
768 /* 772 /*
769 * Send activate IRQ to get other side to see that we've set our 773 * Send activate IRQ to get other side to see that we've set our
770 * bit in their deactivate request AMO. 774 * bit in their deactivate request amo.
771 */ 775 */
772 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa, 776 xpc_send_activate_IRQ_sn2(part_sn2->remote_amos_page_pa,
773 cnodeid_to_nasid(0), 777 cnodeid_to_nasid(0),
@@ -779,19 +783,20 @@ static void
779xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) 783xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
780{ 784{
781 unsigned long irq_flags; 785 unsigned long irq_flags;
782 AMO_t *amo = (AMO_t *)__va(part->sn.sn2.remote_amos_page_pa + 786 struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa +
783 (XPC_DEACTIVATE_REQUEST_AMO * sizeof(AMO_t))); 787 (XPC_DEACTIVATE_REQUEST_AMO *
788 sizeof(struct amo)));
784 789
785 local_irq_save(irq_flags); 790 local_irq_save(irq_flags);
786 791
787 /* clear bit corresponding to our partid in remote partition's AMO */ 792 /* clear bit corresponding to our partid in remote partition's amo */
788 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 793 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
789 ~(1UL << sn_partition_id)); 794 ~(1UL << sn_partition_id));
790 /* 795 /*
791 * We must always use the nofault function regardless of whether we 796 * We must always use the nofault function regardless of whether we
792 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we 797 * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
793 * didn't, we'd never know that the other partition is down and would 798 * didn't, we'd never know that the other partition is down and would
794 * keep sending IRQs and AMOs to it until the heartbeat times out. 799 * keep sending IRQs and amos to it until the heartbeat times out.
795 */ 800 */
796 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo-> 801 (void)xp_nofault_PIOR((u64 *)GLOBAL_MMR_ADDR(NASID_GET(&amo->
797 variable), 802 variable),
@@ -803,9 +808,9 @@ xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
803static int 808static int
804xpc_partition_deactivation_requested_sn2(short partid) 809xpc_partition_deactivation_requested_sn2(short partid)
805{ 810{
806 AMO_t *amo = xpc_vars->amos_page + XPC_DEACTIVATE_REQUEST_AMO; 811 struct amo *amo = xpc_vars->amos_page + XPC_DEACTIVATE_REQUEST_AMO;
807 812
808 /* our partition's AMO variable ANDed with partid mask */ 813 /* our partition's amo variable ANDed with partid mask */
809 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & 814 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
810 (1UL << partid)) != 0; 815 (1UL << partid)) != 0;
811} 816}
@@ -976,7 +981,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid)
976} 981}
977 982
978/* 983/*
979 * Loop through the activation AMO variables and process any bits 984 * Loop through the activation amo variables and process any bits
980 * which are set. Each bit indicates a nasid sending a partition 985 * which are set. Each bit indicates a nasid sending a partition
981 * activation or deactivation request. 986 * activation or deactivation request.
982 * 987 *
@@ -989,11 +994,11 @@ xpc_identify_activate_IRQ_sender_sn2(void)
989 u64 nasid_mask; 994 u64 nasid_mask;
990 u64 nasid; /* remote nasid */ 995 u64 nasid; /* remote nasid */
991 int n_IRQs_detected = 0; 996 int n_IRQs_detected = 0;
992 AMO_t *act_amos; 997 struct amo *act_amos;
993 998
994 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; 999 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS;
995 1000
996 /* scan through act AMO variable looking for non-zero entries */ 1001 /* scan through act amo variable looking for non-zero entries */
997 for (word = 0; word < xp_nasid_mask_words; word++) { 1002 for (word = 0; word < xp_nasid_mask_words; word++) {
998 1003
999 if (xpc_exiting) 1004 if (xpc_exiting)
@@ -1005,7 +1010,7 @@ xpc_identify_activate_IRQ_sender_sn2(void)
1005 continue; 1010 continue;
1006 } 1011 }
1007 1012
1008 dev_dbg(xpc_part, "AMO[%d] gave back 0x%lx\n", word, 1013 dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", word,
1009 nasid_mask); 1014 nasid_mask);
1010 1015
1011 /* 1016 /*
@@ -1038,7 +1043,7 @@ xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected)
1038 1043
1039 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2(); 1044 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
1040 if (n_IRQs_detected < n_IRQs_expected) { 1045 if (n_IRQs_detected < n_IRQs_expected) {
1041 /* retry once to help avoid missing AMO */ 1046 /* retry once to help avoid missing amo */
1042 (void)xpc_identify_activate_IRQ_sender_sn2(); 1047 (void)xpc_identify_activate_IRQ_sender_sn2();
1043 } 1048 }
1044} 1049}
@@ -1386,7 +1391,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1386 part_sn2->remote_openclose_args_pa = 1391 part_sn2->remote_openclose_args_pa =
1387 pulled_entry->openclose_args_pa; 1392 pulled_entry->openclose_args_pa;
1388 part_sn2->remote_chctl_amo_va = 1393 part_sn2->remote_chctl_amo_va =
1389 (AMO_t *)__va(pulled_entry->chctl_amo_pa); 1394 (struct amo *)__va(pulled_entry->chctl_amo_pa);
1390 part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid; 1395 part_sn2->notify_IRQ_nasid = pulled_entry->notify_IRQ_nasid;
1391 part_sn2->notify_IRQ_phys_cpuid = 1396 part_sn2->notify_IRQ_phys_cpuid =
1392 pulled_entry->notify_IRQ_phys_cpuid; 1397 pulled_entry->notify_IRQ_phys_cpuid;
@@ -1417,7 +1422,7 @@ xpc_make_first_contact_sn2(struct xpc_partition *part)
1417 enum xp_retval ret; 1422 enum xp_retval ret;
1418 1423
1419 /* 1424 /*
1420 * Register the remote partition's AMOs with SAL so it can handle 1425 * Register the remote partition's amos with SAL so it can handle
1421 * and cleanup errors within that address range should the remote 1426 * and cleanup errors within that address range should the remote
1422 * partition go down. We don't unregister this range because it is 1427 * partition go down. We don't unregister this range because it is
1423 * difficult to tell when outstanding writes to the remote partition 1428 * difficult to tell when outstanding writes to the remote partition
@@ -2192,9 +2197,9 @@ xpc_init_sn2(void)
2192 xpc_send_msg = xpc_send_msg_sn2; 2197 xpc_send_msg = xpc_send_msg_sn2;
2193 xpc_received_msg = xpc_received_msg_sn2; 2198 xpc_received_msg = xpc_received_msg_sn2;
2194 2199
2195 /* open up protections for IPI and [potentially] AMO operations */ 2200 /* open up protections for IPI and [potentially] amo operations */
2196 xpc_allow_IPI_ops_sn2(); 2201 xpc_allow_IPI_ops_sn2();
2197 xpc_allow_AMO_ops_shub_wars_1_1_sn2(); 2202 xpc_allow_amo_ops_shub_wars_1_1_sn2();
2198 2203
2199 /* 2204 /*
2200 * This is safe to do before the xpc_hb_checker thread has started 2205 * This is safe to do before the xpc_hb_checker thread has started