aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-07-30 01:34:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-30 12:41:50 -0400
commit8e85c23ef04fe0d8414e0b1dc04543095282a27a (patch)
treeda805cb04c535eb15b493a9055bbb05866c03329
parentee6665e3b6e1283c30ae240732af1345bc02154e (diff)
sgi-xp: add _sn2 suffix to a few variables
Add an '_sn2' suffix to some variables found in xpc_sn2.c. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c124
1 files changed, 65 insertions, 59 deletions
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 9c0c29a2ac86..63fe59a5bfac 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -56,15 +56,15 @@
56static char *xpc_remote_copy_buffer_sn2; 56static char *xpc_remote_copy_buffer_sn2;
57static void *xpc_remote_copy_buffer_base_sn2; 57static void *xpc_remote_copy_buffer_base_sn2;
58 58
59static struct xpc_vars_sn2 *xpc_vars; /* >>> Add _sn2 suffix? */ 59static struct xpc_vars_sn2 *xpc_vars_sn2;
60static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */ 60static struct xpc_vars_part_sn2 *xpc_vars_part_sn2;
61 61
62/* SH_IPI_ACCESS shub register value on startup */ 62/* SH_IPI_ACCESS shub register value on startup */
63static u64 xpc_sh1_IPI_access; 63static u64 xpc_sh1_IPI_access_sn2;
64static u64 xpc_sh2_IPI_access0; 64static u64 xpc_sh2_IPI_access0_sn2;
65static u64 xpc_sh2_IPI_access1; 65static u64 xpc_sh2_IPI_access1_sn2;
66static u64 xpc_sh2_IPI_access2; 66static u64 xpc_sh2_IPI_access2_sn2;
67static u64 xpc_sh2_IPI_access3; 67static u64 xpc_sh2_IPI_access3_sn2;
68 68
69/* 69/*
70 * Change protections to allow IPI operations. 70 * Change protections to allow IPI operations.
@@ -77,13 +77,13 @@ xpc_allow_IPI_ops_sn2(void)
77 77
78 /* >>> The following should get moved into SAL. */ 78 /* >>> The following should get moved into SAL. */
79 if (is_shub2()) { 79 if (is_shub2()) {
80 xpc_sh2_IPI_access0 = 80 xpc_sh2_IPI_access0_sn2 =
81 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); 81 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
82 xpc_sh2_IPI_access1 = 82 xpc_sh2_IPI_access1_sn2 =
83 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1)); 83 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
84 xpc_sh2_IPI_access2 = 84 xpc_sh2_IPI_access2_sn2 =
85 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2)); 85 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
86 xpc_sh2_IPI_access3 = 86 xpc_sh2_IPI_access3_sn2 =
87 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3)); 87 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
88 88
89 for_each_online_node(node) { 89 for_each_online_node(node) {
@@ -98,7 +98,7 @@ xpc_allow_IPI_ops_sn2(void)
98 -1UL); 98 -1UL);
99 } 99 }
100 } else { 100 } else {
101 xpc_sh1_IPI_access = 101 xpc_sh1_IPI_access_sn2 =
102 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS)); 102 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
103 103
104 for_each_online_node(node) { 104 for_each_online_node(node) {
@@ -123,19 +123,19 @@ xpc_disallow_IPI_ops_sn2(void)
123 for_each_online_node(node) { 123 for_each_online_node(node) {
124 nasid = cnodeid_to_nasid(node); 124 nasid = cnodeid_to_nasid(node);
125 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0), 125 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
126 xpc_sh2_IPI_access0); 126 xpc_sh2_IPI_access0_sn2);
127 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1), 127 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
128 xpc_sh2_IPI_access1); 128 xpc_sh2_IPI_access1_sn2);
129 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2), 129 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
130 xpc_sh2_IPI_access2); 130 xpc_sh2_IPI_access2_sn2);
131 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3), 131 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
132 xpc_sh2_IPI_access3); 132 xpc_sh2_IPI_access3_sn2);
133 } 133 }
134 } else { 134 } else {
135 for_each_online_node(node) { 135 for_each_online_node(node) {
136 nasid = cnodeid_to_nasid(node); 136 nasid = cnodeid_to_nasid(node);
137 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS), 137 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
138 xpc_sh1_IPI_access); 138 xpc_sh1_IPI_access_sn2);
139 } 139 }
140 } 140 }
141} 141}
@@ -182,7 +182,7 @@ xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid,
182static struct amo * 182static struct amo *
183xpc_init_IRQ_amo_sn2(int index) 183xpc_init_IRQ_amo_sn2(int index)
184{ 184{
185 struct amo *amo = xpc_vars->amos_page + index; 185 struct amo *amo = xpc_vars_sn2->amos_page + index;
186 186
187 (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */ 187 (void)xpc_receive_IRQ_amo_sn2(amo); /* clear amo variable */
188 return amo; 188 return amo;
@@ -225,7 +225,7 @@ xpc_send_local_activate_IRQ_sn2(int from_nasid)
225{ 225{
226 int w_index = XPC_NASID_W_INDEX(from_nasid); 226 int w_index = XPC_NASID_W_INDEX(from_nasid);
227 int b_index = XPC_NASID_B_INDEX(from_nasid); 227 int b_index = XPC_NASID_B_INDEX(from_nasid);
228 struct amo *amos = (struct amo *)__va(xpc_vars->amos_page_pa + 228 struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa +
229 (XPC_ACTIVATE_IRQ_AMOS_SN2 * 229 (XPC_ACTIVATE_IRQ_AMOS_SN2 *
230 sizeof(struct amo))); 230 sizeof(struct amo)));
231 231
@@ -492,7 +492,8 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part)
492static int 492static int
493xpc_partition_engaged_sn2(short partid) 493xpc_partition_engaged_sn2(short partid)
494{ 494{
495 struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; 495 struct amo *amo = xpc_vars_sn2->amos_page +
496 XPC_ENGAGED_PARTITIONS_AMO_SN2;
496 497
497 /* our partition's amo variable ANDed with partid mask */ 498 /* our partition's amo variable ANDed with partid mask */
498 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & 499 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
@@ -502,7 +503,8 @@ xpc_partition_engaged_sn2(short partid)
502static int 503static int
503xpc_any_partition_engaged_sn2(void) 504xpc_any_partition_engaged_sn2(void)
504{ 505{
505 struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; 506 struct amo *amo = xpc_vars_sn2->amos_page +
507 XPC_ENGAGED_PARTITIONS_AMO_SN2;
506 508
507 /* our partition's amo variable */ 509 /* our partition's amo variable */
508 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; 510 return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0;
@@ -511,7 +513,8 @@ xpc_any_partition_engaged_sn2(void)
511static void 513static void
512xpc_assume_partition_disengaged_sn2(short partid) 514xpc_assume_partition_disengaged_sn2(short partid)
513{ 515{
514 struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; 516 struct amo *amo = xpc_vars_sn2->amos_page +
517 XPC_ENGAGED_PARTITIONS_AMO_SN2;
515 518
516 /* clear bit(s) based on partid mask in our partition's amo */ 519 /* clear bit(s) based on partid mask in our partition's amo */
517 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, 520 FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND,
@@ -580,27 +583,27 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
580 int i; 583 int i;
581 int ret; 584 int ret;
582 585
583 xpc_vars = XPC_RP_VARS(rp); 586 xpc_vars_sn2 = XPC_RP_VARS(rp);
584 587
585 rp->sn.vars_pa = __pa(xpc_vars); 588 rp->sn.vars_pa = __pa(xpc_vars_sn2);
586 589
587 /* vars_part array follows immediately after vars */ 590 /* vars_part array follows immediately after vars */
588 xpc_vars_part = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + 591 xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) +
589 XPC_RP_VARS_SIZE); 592 XPC_RP_VARS_SIZE);
590 593
591 /* 594 /*
592 * Before clearing xpc_vars, see if a page of amos had been previously 595 * Before clearing xpc_vars_sn2, see if a page of amos had been
593 * allocated. If not we'll need to allocate one and set permissions 596 * previously allocated. If not we'll need to allocate one and set
594 * so that cross-partition amos are allowed. 597 * permissions so that cross-partition amos are allowed.
595 * 598 *
596 * The allocated amo page needs MCA reporting to remain disabled after 599 * The allocated amo page needs MCA reporting to remain disabled after
597 * XPC has unloaded. To make this work, we keep a copy of the pointer 600 * XPC has unloaded. To make this work, we keep a copy of the pointer
598 * to this page (i.e., amos_page) in the struct xpc_vars structure, 601 * to this page (i.e., amos_page) in the struct xpc_vars_sn2 structure,
599 * which is pointed to by the reserved page, and re-use that saved copy 602 * which is pointed to by the reserved page, and re-use that saved copy
600 * on subsequent loads of XPC. This amo page is never freed, and its 603 * on subsequent loads of XPC. This amo page is never freed, and its
601 * memory protections are never restricted. 604 * memory protections are never restricted.
602 */ 605 */
603 amos_page = xpc_vars->amos_page; 606 amos_page = xpc_vars_sn2->amos_page;
604 if (amos_page == NULL) { 607 if (amos_page == NULL) {
605 amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1)); 608 amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1));
606 if (amos_page == NULL) { 609 if (amos_page == NULL) {
@@ -621,18 +624,18 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
621 } 624 }
622 } 625 }
623 626
624 /* clear xpc_vars */ 627 /* clear xpc_vars_sn2 */
625 memset(xpc_vars, 0, sizeof(struct xpc_vars_sn2)); 628 memset(xpc_vars_sn2, 0, sizeof(struct xpc_vars_sn2));
626 629
627 xpc_vars->version = XPC_V_VERSION; 630 xpc_vars_sn2->version = XPC_V_VERSION;
628 xpc_vars->activate_IRQ_nasid = cpuid_to_nasid(0); 631 xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0);
629 xpc_vars->activate_IRQ_phys_cpuid = cpu_physical_id(0); 632 xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0);
630 xpc_vars->vars_part_pa = __pa(xpc_vars_part); 633 xpc_vars_sn2->vars_part_pa = __pa(xpc_vars_part_sn2);
631 xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); 634 xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page);
632 xpc_vars->amos_page = amos_page; /* save for next load of XPC */ 635 xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */
633 636
634 /* clear xpc_vars_part */ 637 /* clear xpc_vars_part_sn2 */
635 memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part_sn2) * 638 memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) *
636 xp_max_npartitions); 639 xp_max_npartitions);
637 640
638 /* initialize the activate IRQ related amo variables */ 641 /* initialize the activate IRQ related amo variables */
@@ -649,30 +652,30 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
649static void 652static void
650xpc_increment_heartbeat_sn2(void) 653xpc_increment_heartbeat_sn2(void)
651{ 654{
652 xpc_vars->heartbeat++; 655 xpc_vars_sn2->heartbeat++;
653} 656}
654 657
655static void 658static void
656xpc_offline_heartbeat_sn2(void) 659xpc_offline_heartbeat_sn2(void)
657{ 660{
658 xpc_increment_heartbeat_sn2(); 661 xpc_increment_heartbeat_sn2();
659 xpc_vars->heartbeat_offline = 1; 662 xpc_vars_sn2->heartbeat_offline = 1;
660} 663}
661 664
662static void 665static void
663xpc_online_heartbeat_sn2(void) 666xpc_online_heartbeat_sn2(void)
664{ 667{
665 xpc_increment_heartbeat_sn2(); 668 xpc_increment_heartbeat_sn2();
666 xpc_vars->heartbeat_offline = 0; 669 xpc_vars_sn2->heartbeat_offline = 0;
667} 670}
668 671
669static void 672static void
670xpc_heartbeat_init_sn2(void) 673xpc_heartbeat_init_sn2(void)
671{ 674{
672 DBUG_ON(xpc_vars == NULL); 675 DBUG_ON(xpc_vars_sn2 == NULL);
673 676
674 bitmap_zero(xpc_vars->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); 677 bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2);
675 xpc_heartbeating_to_mask = &xpc_vars->heartbeating_to_mask[0]; 678 xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0];
676 xpc_online_heartbeat_sn2(); 679 xpc_online_heartbeat_sn2();
677} 680}
678 681
@@ -845,7 +848,8 @@ xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part)
845static int 848static int
846xpc_partition_deactivation_requested_sn2(short partid) 849xpc_partition_deactivation_requested_sn2(short partid)
847{ 850{
848 struct amo *amo = xpc_vars->amos_page + XPC_DEACTIVATE_REQUEST_AMO_SN2; 851 struct amo *amo = xpc_vars_sn2->amos_page +
852 XPC_DEACTIVATE_REQUEST_AMO_SN2;
849 853
850 /* our partition's amo variable ANDed with partid mask */ 854 /* our partition's amo variable ANDed with partid mask */
851 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & 855 return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) &
@@ -1033,7 +1037,7 @@ xpc_identify_activate_IRQ_sender_sn2(void)
1033 int n_IRQs_detected = 0; 1037 int n_IRQs_detected = 0;
1034 struct amo *act_amos; 1038 struct amo *act_amos;
1035 1039
1036 act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2; 1040 act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2;
1037 1041
1038 /* scan through act amo variable looking for non-zero entries */ 1042 /* scan through act amo variable looking for non-zero entries */
1039 for (word = 0; word < xpc_nasid_mask_words; word++) { 1043 for (word = 0; word < xpc_nasid_mask_words; word++) {
@@ -1261,15 +1265,17 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part)
1261 * The setting of the magic # indicates that these per partition 1265 * The setting of the magic # indicates that these per partition
1262 * specific variables are ready to be used. 1266 * specific variables are ready to be used.
1263 */ 1267 */
1264 xpc_vars_part[partid].GPs_pa = __pa(part_sn2->local_GPs); 1268 xpc_vars_part_sn2[partid].GPs_pa = __pa(part_sn2->local_GPs);
1265 xpc_vars_part[partid].openclose_args_pa = 1269 xpc_vars_part_sn2[partid].openclose_args_pa =
1266 __pa(part->local_openclose_args); 1270 __pa(part->local_openclose_args);
1267 xpc_vars_part[partid].chctl_amo_pa = __pa(part_sn2->local_chctl_amo_va); 1271 xpc_vars_part_sn2[partid].chctl_amo_pa =
1272 __pa(part_sn2->local_chctl_amo_va);
1268 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ 1273 cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */
1269 xpc_vars_part[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); 1274 xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid);
1270 xpc_vars_part[partid].notify_IRQ_phys_cpuid = cpu_physical_id(cpuid); 1275 xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid =
1271 xpc_vars_part[partid].nchannels = part->nchannels; 1276 cpu_physical_id(cpuid);
1272 xpc_vars_part[partid].magic = XPC_VP_MAGIC1; 1277 xpc_vars_part_sn2[partid].nchannels = part->nchannels;
1278 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1;
1273 1279
1274 return xpSuccess; 1280 return xpSuccess;
1275 1281
@@ -1316,7 +1322,7 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part)
1316 DBUG_ON(part->setup_state != XPC_P_SETUP); 1322 DBUG_ON(part->setup_state != XPC_P_SETUP);
1317 part->setup_state = XPC_P_WTEARDOWN; 1323 part->setup_state = XPC_P_WTEARDOWN;
1318 1324
1319 xpc_vars_part[partid].magic = 0; 1325 xpc_vars_part_sn2[partid].magic = 0;
1320 1326
1321 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); 1327 free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid);
1322 1328
@@ -1432,7 +1438,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1432 return xpRetry; 1438 return xpRetry;
1433 } 1439 }
1434 1440
1435 if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { 1441 if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1) {
1436 1442
1437 /* validate the variables */ 1443 /* validate the variables */
1438 1444
@@ -1462,7 +1468,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part)
1462 1468
1463 /* let the other side know that we've pulled their variables */ 1469 /* let the other side know that we've pulled their variables */
1464 1470
1465 xpc_vars_part[partid].magic = XPC_VP_MAGIC2; 1471 xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2;
1466 } 1472 }
1467 1473
1468 if (pulled_entry->magic == XPC_VP_MAGIC1) 1474 if (pulled_entry->magic == XPC_VP_MAGIC1)