aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xpc_sn2.c
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-07-30 01:34:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-30 12:41:49 -0400
commit6e41017aad9ed175ca51e4828eabc8c5cf5910be (patch)
tree388f0bd12f15d8c3d4a45d53ce99c24c33454e3a /drivers/misc/sgi-xp/xpc_sn2.c
parent97bf1aa1e1bb18de9bb1987c6eb9ad751bf08aab (diff)
sgi-xp: isolate activate IRQ's hardware specific components
Isolate architecture specific code related to XPC's activate IRQ. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_sn2.c')
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c217
1 files changed, 190 insertions, 27 deletions
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index db67d348b35c..4659f6cb885e 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -22,6 +22,87 @@
22static struct xpc_vars_sn2 *xpc_vars; /* >>> Add _sn2 suffix? */ 22static struct xpc_vars_sn2 *xpc_vars; /* >>> Add _sn2 suffix? */
23static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */ 23static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */
24 24
25/* SH_IPI_ACCESS shub register value on startup */
26static u64 xpc_sh1_IPI_access;
27static u64 xpc_sh2_IPI_access0;
28static u64 xpc_sh2_IPI_access1;
29static u64 xpc_sh2_IPI_access2;
30static u64 xpc_sh2_IPI_access3;
31
32/*
33 * Change protections to allow IPI operations.
34 */
35static void
36xpc_allow_IPI_ops_sn2(void)
37{
38 int node;
39 int nasid;
40
41 /* >>> The following should get moved into SAL. */
42 if (is_shub2()) {
43 xpc_sh2_IPI_access0 =
44 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0));
45 xpc_sh2_IPI_access1 =
46 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS1));
47 xpc_sh2_IPI_access2 =
48 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS2));
49 xpc_sh2_IPI_access3 =
50 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS3));
51
52 for_each_online_node(node) {
53 nasid = cnodeid_to_nasid(node);
54 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
55 -1UL);
56 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
57 -1UL);
58 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
59 -1UL);
60 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
61 -1UL);
62 }
63 } else {
64 xpc_sh1_IPI_access =
65 (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH1_IPI_ACCESS));
66
67 for_each_online_node(node) {
68 nasid = cnodeid_to_nasid(node);
69 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
70 -1UL);
71 }
72 }
73}
74
75/*
76 * Restrict protections to disallow IPI operations.
77 */
78static void
79xpc_disallow_IPI_ops_sn2(void)
80{
81 int node;
82 int nasid;
83
84 /* >>> The following should get moved into SAL. */
85 if (is_shub2()) {
86 for_each_online_node(node) {
87 nasid = cnodeid_to_nasid(node);
88 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS0),
89 xpc_sh2_IPI_access0);
90 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS1),
91 xpc_sh2_IPI_access1);
92 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS2),
93 xpc_sh2_IPI_access2);
94 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH2_IPI_ACCESS3),
95 xpc_sh2_IPI_access3);
96 }
97 } else {
98 for_each_online_node(node) {
99 nasid = cnodeid_to_nasid(node);
100 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid, SH1_IPI_ACCESS),
101 xpc_sh1_IPI_access);
102 }
103 }
104}
105
25/* 106/*
26 * The following set of macros and functions are used for the sending and 107 * The following set of macros and functions are used for the sending and
27 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs, 108 * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
@@ -74,6 +155,17 @@ xpc_IPI_init_sn2(int index)
74 */ 155 */
75 156
76/* 157/*
158 * Notify the heartbeat check thread that an activate IRQ has been received.
159 */
160static irqreturn_t
161xpc_handle_activate_IRQ_sn2(int irq, void *dev_id)
162{
163 atomic_inc(&xpc_activate_IRQ_rcvd);
164 wake_up_interruptible(&xpc_activate_IRQ_wq);
165 return IRQ_HANDLED;
166}
167
168/*
77 * Flag the appropriate AMO variable and send an IPI to the specified node. 169 * Flag the appropriate AMO variable and send an IPI to the specified node.
78 */ 170 */
79static void 171static void
@@ -100,8 +192,8 @@ xpc_activate_IRQ_send_local_sn2(int from_nasid)
100 /* fake the sending and receipt of an activate IRQ from remote nasid */ 192 /* fake the sending and receipt of an activate IRQ from remote nasid */
101 FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR, 193 FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR,
102 (1UL << b_index)); 194 (1UL << b_index));
103 atomic_inc(&xpc_act_IRQ_rcvd); 195 atomic_inc(&xpc_activate_IRQ_rcvd);
104 wake_up_interruptible(&xpc_act_IRQ_wq); 196 wake_up_interruptible(&xpc_activate_IRQ_wq);
105} 197}
106 198
107static void 199static void
@@ -383,11 +475,65 @@ xpc_clear_partition_disengage_request_sn2(u64 partid_mask)
383 ~partid_mask); 475 ~partid_mask);
384} 476}
385 477
478/* original protection values for each node */
479static u64 xpc_prot_vec_sn2[MAX_NUMNODES];
480
481/*
482 * Change protections to allow AMO operations on non-Shub 1.1 systems.
483 */
484static enum xp_retval
485xpc_allow_AMO_ops_sn2(AMO_t *amos_page)
486{
487 u64 nasid_array = 0;
488 int ret;
489
490 /*
491 * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
492 * collides with memory operations. On those systems we call
493 * xpc_allow_AMO_ops_shub_wars_1_1_sn2() instead.
494 */
495 if (!enable_shub_wars_1_1()) {
496 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
497 SN_MEMPROT_ACCESS_CLASS_1,
498 &nasid_array);
499 if (ret != 0)
500 return xpSalError;
501 }
502 return xpSuccess;
503}
504
505/*
506 * Change protections to allow AMO operations on Shub 1.1 systems.
507 */
508static void
509xpc_allow_AMO_ops_shub_wars_1_1_sn2(void)
510{
511 int node;
512 int nasid;
513
514 if (!enable_shub_wars_1_1())
515 return;
516
517 for_each_online_node(node) {
518 nasid = cnodeid_to_nasid(node);
519 /* save current protection values */
520 xpc_prot_vec_sn2[node] =
521 (u64)HUB_L((u64 *)GLOBAL_MMR_ADDR(nasid,
522 SH1_MD_DQLP_MMR_DIR_PRIVEC0));
523 /* open up everything */
524 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
525 SH1_MD_DQLP_MMR_DIR_PRIVEC0),
526 -1UL);
527 HUB_S((u64 *)GLOBAL_MMR_ADDR(nasid,
528 SH1_MD_DQRP_MMR_DIR_PRIVEC0),
529 -1UL);
530 }
531}
532
386static enum xp_retval 533static enum xp_retval
387xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) 534xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
388{ 535{
389 AMO_t *amos_page; 536 AMO_t *amos_page;
390 u64 nasid_array = 0;
391 int i; 537 int i;
392 int ret; 538 int ret;
393 539
@@ -421,21 +567,15 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp)
421 } 567 }
422 568
423 /* 569 /*
424 * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems 570 * Open up AMO-R/W to cpu. This is done on Shub 1.1 systems
425 * when xpc_allow_IPI_ops() is called via xpc_hb_init(). 571 * when xpc_allow_AMO_ops_shub_wars_1_1_sn2() is called.
426 */ 572 */
427 if (!enable_shub_wars_1_1()) { 573 ret = xpc_allow_AMO_ops_sn2(amos_page);
428 ret = sn_change_memprotect(ia64_tpa((u64)amos_page), 574 if (ret != xpSuccess) {
429 PAGE_SIZE, 575 dev_err(xpc_part, "can't allow AMO operations\n");
430 SN_MEMPROT_ACCESS_CLASS_1, 576 uncached_free_page(__IA64_UNCACHED_OFFSET |
431 &nasid_array); 577 TO_PHYS((u64)amos_page), 1);
432 if (ret != 0) { 578 return ret;
433 dev_err(xpc_part, "can't change memory "
434 "protections\n");
435 uncached_free_page(__IA64_UNCACHED_OFFSET |
436 TO_PHYS((u64)amos_page), 1);
437 return xpSalError;
438 }
439 } 579 }
440 } 580 }
441 581
@@ -656,7 +796,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
656 * initialized reserved page. 796 * initialized reserved page.
657 */ 797 */
658static void 798static void
659xpc_identify_act_IRQ_req_sn2(int nasid) 799xpc_identify_activate_IRQ_req_sn2(int nasid)
660{ 800{
661 struct xpc_rsvd_page *remote_rp; 801 struct xpc_rsvd_page *remote_rp;
662 struct xpc_vars_sn2 *remote_vars; 802 struct xpc_vars_sn2 *remote_vars;
@@ -702,10 +842,10 @@ xpc_identify_act_IRQ_req_sn2(int nasid)
702 return; 842 return;
703 } 843 }
704 844
705 part->act_IRQ_rcvd++; 845 part->activate_IRQ_rcvd++;
706 846
707 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = " 847 dev_dbg(xpc_part, "partid for nasid %d is %d; IRQs = %d; HB = "
708 "%ld:0x%lx\n", (int)nasid, (int)partid, part->act_IRQ_rcvd, 848 "%ld:0x%lx\n", (int)nasid, (int)partid, part->activate_IRQ_rcvd,
709 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]); 849 remote_vars->heartbeat, remote_vars->heartbeating_to_mask[0]);
710 850
711 if (xpc_partition_disengaged(part) && 851 if (xpc_partition_disengaged(part) &&
@@ -831,7 +971,7 @@ xpc_identify_act_IRQ_req_sn2(int nasid)
831 * Return #of IRQs detected. 971 * Return #of IRQs detected.
832 */ 972 */
833int 973int
834xpc_identify_act_IRQ_sender_sn2(void) 974xpc_identify_activate_IRQ_sender_sn2(void)
835{ 975{
836 int word, bit; 976 int word, bit;
837 u64 nasid_mask; 977 u64 nasid_mask;
@@ -872,7 +1012,7 @@ xpc_identify_act_IRQ_sender_sn2(void)
872 nasid = XPC_NASID_FROM_W_B(word, bit); 1012 nasid = XPC_NASID_FROM_W_B(word, bit);
873 dev_dbg(xpc_part, "interrupt from nasid %ld\n", 1013 dev_dbg(xpc_part, "interrupt from nasid %ld\n",
874 nasid); 1014 nasid);
875 xpc_identify_act_IRQ_req_sn2(nasid); 1015 xpc_identify_activate_IRQ_req_sn2(nasid);
876 } 1016 }
877 } 1017 }
878 } 1018 }
@@ -880,14 +1020,14 @@ xpc_identify_act_IRQ_sender_sn2(void)
880} 1020}
881 1021
882static void 1022static void
883xpc_process_act_IRQ_rcvd_sn2(int n_IRQs_expected) 1023xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected)
884{ 1024{
885 int n_IRQs_detected; 1025 int n_IRQs_detected;
886 1026
887 n_IRQs_detected = xpc_identify_act_IRQ_sender_sn2(); 1027 n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2();
888 if (n_IRQs_detected < n_IRQs_expected) { 1028 if (n_IRQs_detected < n_IRQs_expected) {
889 /* retry once to help avoid missing AMO */ 1029 /* retry once to help avoid missing AMO */
890 (void)xpc_identify_act_IRQ_sender_sn2(); 1030 (void)xpc_identify_activate_IRQ_sender_sn2();
891 } 1031 }
892} 1032}
893 1033
@@ -1775,9 +1915,11 @@ xpc_received_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg)
1775 xpc_acknowledge_msgs_sn2(ch, get, msg->flags); 1915 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
1776} 1916}
1777 1917
1778void 1918int
1779xpc_init_sn2(void) 1919xpc_init_sn2(void)
1780{ 1920{
1921 int ret;
1922
1781 xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; 1923 xpc_rsvd_page_init = xpc_rsvd_page_init_sn2;
1782 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; 1924 xpc_increment_heartbeat = xpc_increment_heartbeat_sn2;
1783 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; 1925 xpc_offline_heartbeat = xpc_offline_heartbeat_sn2;
@@ -1788,7 +1930,7 @@ xpc_init_sn2(void)
1788 1930
1789 xpc_initiate_partition_activation = 1931 xpc_initiate_partition_activation =
1790 xpc_initiate_partition_activation_sn2; 1932 xpc_initiate_partition_activation_sn2;
1791 xpc_process_act_IRQ_rcvd = xpc_process_act_IRQ_rcvd_sn2; 1933 xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2;
1792 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; 1934 xpc_setup_infrastructure = xpc_setup_infrastructure_sn2;
1793 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; 1935 xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2;
1794 xpc_make_first_contact = xpc_make_first_contact_sn2; 1936 xpc_make_first_contact = xpc_make_first_contact_sn2;
@@ -1819,9 +1961,30 @@ xpc_init_sn2(void)
1819 1961
1820 xpc_send_msg = xpc_send_msg_sn2; 1962 xpc_send_msg = xpc_send_msg_sn2;
1821 xpc_received_msg = xpc_received_msg_sn2; 1963 xpc_received_msg = xpc_received_msg_sn2;
1964
1965 /* open up protections for IPI and [potentially] AMO operations */
1966 xpc_allow_IPI_ops_sn2();
1967 xpc_allow_AMO_ops_shub_wars_1_1_sn2();
1968
1969 /*
1970 * This is safe to do before the xpc_hb_checker thread has started
1971 * because the handler releases a wait queue. If an interrupt is
1972 * received before the thread is waiting, it will not go to sleep,
1973 * but rather immediately process the interrupt.
1974 */
1975 ret = request_irq(SGI_XPC_ACTIVATE, xpc_handle_activate_IRQ_sn2, 0,
1976 "xpc hb", NULL);
1977 if (ret != 0) {
1978 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1979 "errno=%d\n", -ret);
1980 xpc_disallow_IPI_ops_sn2();
1981 }
1982 return ret;
1822} 1983}
1823 1984
1824void 1985void
1825xpc_exit_sn2(void) 1986xpc_exit_sn2(void)
1826{ 1987{
1988 free_irq(SGI_XPC_ACTIVATE, NULL);
1989 xpc_disallow_IPI_ops_sn2();
1827} 1990}