diff options
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/sgi-gru/gruprocfs.c | 1 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp.h | 7 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_main.c | 7 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_sn2.c | 34 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_uv.c | 70 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc.h | 12 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_sn2.c | 15 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 290 |
8 files changed, 365 insertions, 71 deletions
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 533923f83f1a..73b0ca061bb5 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c | |||
@@ -317,7 +317,6 @@ int gru_proc_init(void) | |||
317 | { | 317 | { |
318 | struct proc_entry *p; | 318 | struct proc_entry *p; |
319 | 319 | ||
320 | proc_mkdir("sgi_uv", NULL); | ||
321 | proc_gru = proc_mkdir("sgi_uv/gru", NULL); | 320 | proc_gru = proc_mkdir("sgi_uv/gru", NULL); |
322 | 321 | ||
323 | for (p = proc_files; p->name; p++) | 322 | for (p = proc_files; p->name; p++) |
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index ed1722e50049..7b4cbd5e03e9 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -194,9 +194,10 @@ enum xp_retval { | |||
194 | xpGruSendMqError, /* 59: gru send message queue related error */ | 194 | xpGruSendMqError, /* 59: gru send message queue related error */ |
195 | 195 | ||
196 | xpBadChannelNumber, /* 60: invalid channel number */ | 196 | xpBadChannelNumber, /* 60: invalid channel number */ |
197 | xpBadMsgType, /* 60: invalid message type */ | 197 | xpBadMsgType, /* 61: invalid message type */ |
198 | xpBiosError, /* 62: BIOS error */ | ||
198 | 199 | ||
199 | xpUnknownReason /* 61: unknown reason - must be last in enum */ | 200 | xpUnknownReason /* 63: unknown reason - must be last in enum */ |
200 | }; | 201 | }; |
201 | 202 | ||
202 | /* | 203 | /* |
@@ -345,6 +346,8 @@ extern unsigned long (*xp_pa) (void *); | |||
345 | extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, | 346 | extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, |
346 | size_t); | 347 | size_t); |
347 | extern int (*xp_cpu_to_nasid) (int); | 348 | extern int (*xp_cpu_to_nasid) (int); |
349 | extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long); | ||
350 | extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long); | ||
348 | 351 | ||
349 | extern u64 xp_nofault_PIOR_target; | 352 | extern u64 xp_nofault_PIOR_target; |
350 | extern int xp_nofault_PIOR(void *); | 353 | extern int xp_nofault_PIOR(void *); |
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 66a1d19e08ad..9a2e77172d94 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy); | |||
51 | int (*xp_cpu_to_nasid) (int cpuid); | 51 | int (*xp_cpu_to_nasid) (int cpuid); |
52 | EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); | 52 | EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); |
53 | 53 | ||
54 | enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr, | ||
55 | unsigned long size); | ||
56 | EXPORT_SYMBOL_GPL(xp_expand_memprotect); | ||
57 | enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr, | ||
58 | unsigned long size); | ||
59 | EXPORT_SYMBOL_GPL(xp_restrict_memprotect); | ||
60 | |||
54 | /* | 61 | /* |
55 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level | 62 | * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level |
56 | * users of XPC. | 63 | * users of XPC. |
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index 1440134caf31..fb3ec9d735a9 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c | |||
@@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid) | |||
120 | return cpuid_to_nasid(cpuid); | 120 | return cpuid_to_nasid(cpuid); |
121 | } | 121 | } |
122 | 122 | ||
123 | static enum xp_retval | ||
124 | xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size) | ||
125 | { | ||
126 | u64 nasid_array = 0; | ||
127 | int ret; | ||
128 | |||
129 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, | ||
130 | &nasid_array); | ||
131 | if (ret != 0) { | ||
132 | dev_err(xp, "sn_change_memprotect(,, " | ||
133 | "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); | ||
134 | return xpSalError; | ||
135 | } | ||
136 | return xpSuccess; | ||
137 | } | ||
138 | |||
139 | static enum xp_retval | ||
140 | xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size) | ||
141 | { | ||
142 | u64 nasid_array = 0; | ||
143 | int ret; | ||
144 | |||
145 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, | ||
146 | &nasid_array); | ||
147 | if (ret != 0) { | ||
148 | dev_err(xp, "sn_change_memprotect(,, " | ||
149 | "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); | ||
150 | return xpSalError; | ||
151 | } | ||
152 | return xpSuccess; | ||
153 | } | ||
154 | |||
123 | enum xp_retval | 155 | enum xp_retval |
124 | xp_init_sn2(void) | 156 | xp_init_sn2(void) |
125 | { | 157 | { |
@@ -132,6 +164,8 @@ xp_init_sn2(void) | |||
132 | xp_pa = xp_pa_sn2; | 164 | xp_pa = xp_pa_sn2; |
133 | xp_remote_memcpy = xp_remote_memcpy_sn2; | 165 | xp_remote_memcpy = xp_remote_memcpy_sn2; |
134 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; | 166 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; |
167 | xp_expand_memprotect = xp_expand_memprotect_sn2; | ||
168 | xp_restrict_memprotect = xp_restrict_memprotect_sn2; | ||
135 | 169 | ||
136 | return xp_register_nofault_code_sn2(); | 170 | return xp_register_nofault_code_sn2(); |
137 | } | 171 | } |
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index d9f7ce2510bc..d238576b26fa 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c | |||
@@ -15,6 +15,11 @@ | |||
15 | 15 | ||
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <asm/uv/uv_hub.h> | 17 | #include <asm/uv/uv_hub.h> |
18 | #if defined CONFIG_X86_64 | ||
19 | #include <asm/uv/bios.h> | ||
20 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
21 | #include <asm/sn/sn_sal.h> | ||
22 | #endif | ||
18 | #include "../sgi-gru/grukservices.h" | 23 | #include "../sgi-gru/grukservices.h" |
19 | #include "xp.h" | 24 | #include "xp.h" |
20 | 25 | ||
@@ -49,18 +54,79 @@ xp_cpu_to_nasid_uv(int cpuid) | |||
49 | return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); | 54 | return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); |
50 | } | 55 | } |
51 | 56 | ||
57 | static enum xp_retval | ||
58 | xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size) | ||
59 | { | ||
60 | int ret; | ||
61 | |||
62 | #if defined CONFIG_X86_64 | ||
63 | ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW); | ||
64 | if (ret != BIOS_STATUS_SUCCESS) { | ||
65 | dev_err(xp, "uv_bios_change_memprotect(,, " | ||
66 | "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret); | ||
67 | return xpBiosError; | ||
68 | } | ||
69 | |||
70 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
71 | u64 nasid_array; | ||
72 | |||
73 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1, | ||
74 | &nasid_array); | ||
75 | if (ret != 0) { | ||
76 | dev_err(xp, "sn_change_memprotect(,, " | ||
77 | "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret); | ||
78 | return xpSalError; | ||
79 | } | ||
80 | #else | ||
81 | #error not a supported configuration | ||
82 | #endif | ||
83 | return xpSuccess; | ||
84 | } | ||
85 | |||
86 | static enum xp_retval | ||
87 | xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size) | ||
88 | { | ||
89 | int ret; | ||
90 | |||
91 | #if defined CONFIG_X86_64 | ||
92 | ret = uv_bios_change_memprotect(phys_addr, size, | ||
93 | UV_MEMPROT_RESTRICT_ACCESS); | ||
94 | if (ret != BIOS_STATUS_SUCCESS) { | ||
95 | dev_err(xp, "uv_bios_change_memprotect(,, " | ||
96 | "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret); | ||
97 | return xpBiosError; | ||
98 | } | ||
99 | |||
100 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
101 | u64 nasid_array; | ||
102 | |||
103 | ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0, | ||
104 | &nasid_array); | ||
105 | if (ret != 0) { | ||
106 | dev_err(xp, "sn_change_memprotect(,, " | ||
107 | "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret); | ||
108 | return xpSalError; | ||
109 | } | ||
110 | #else | ||
111 | #error not a supported configuration | ||
112 | #endif | ||
113 | return xpSuccess; | ||
114 | } | ||
115 | |||
52 | enum xp_retval | 116 | enum xp_retval |
53 | xp_init_uv(void) | 117 | xp_init_uv(void) |
54 | { | 118 | { |
55 | BUG_ON(!is_uv()); | 119 | BUG_ON(!is_uv()); |
56 | 120 | ||
57 | xp_max_npartitions = XP_MAX_NPARTITIONS_UV; | 121 | xp_max_npartitions = XP_MAX_NPARTITIONS_UV; |
58 | xp_partition_id = 0; /* !!! not correct value */ | 122 | xp_partition_id = sn_partition_id; |
59 | xp_region_size = 0; /* !!! not correct value */ | 123 | xp_region_size = sn_region_size; |
60 | 124 | ||
61 | xp_pa = xp_pa_uv; | 125 | xp_pa = xp_pa_uv; |
62 | xp_remote_memcpy = xp_remote_memcpy_uv; | 126 | xp_remote_memcpy = xp_remote_memcpy_uv; |
63 | xp_cpu_to_nasid = xp_cpu_to_nasid_uv; | 127 | xp_cpu_to_nasid = xp_cpu_to_nasid_uv; |
128 | xp_expand_memprotect = xp_expand_memprotect_uv; | ||
129 | xp_restrict_memprotect = xp_restrict_memprotect_uv; | ||
64 | 130 | ||
65 | return xpSuccess; | 131 | return xpSuccess; |
66 | } | 132 | } |
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 619208d61862..a5bd658c2e83 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 { | |||
181 | xpc_nasid_mask_nlongs)) | 181 | xpc_nasid_mask_nlongs)) |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * Info pertinent to a GRU message queue using a watch list for irq generation. | ||
185 | */ | ||
186 | struct xpc_gru_mq_uv { | ||
187 | void *address; /* address of GRU message queue */ | ||
188 | unsigned int order; /* size of GRU message queue as a power of 2 */ | ||
189 | int irq; /* irq raised when message is received in mq */ | ||
190 | int mmr_blade; /* blade where watchlist was allocated from */ | ||
191 | unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */ | ||
192 | int watchlist_num; /* number of watchlist allocatd by BIOS */ | ||
193 | }; | ||
194 | |||
195 | /* | ||
184 | * The activate_mq is used to send/receive GRU messages that affect XPC's | 196 | * The activate_mq is used to send/receive GRU messages that affect XPC's |
185 | * heartbeat, partition active state, and channel state. This is UV only. | 197 | * heartbeat, partition active state, and channel state. This is UV only. |
186 | */ | 198 | */ |
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index b4882ccf6344..73b7fb8de47a 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c | |||
@@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; | |||
553 | static enum xp_retval | 553 | static enum xp_retval |
554 | xpc_allow_amo_ops_sn2(struct amo *amos_page) | 554 | xpc_allow_amo_ops_sn2(struct amo *amos_page) |
555 | { | 555 | { |
556 | u64 nasid_array = 0; | 556 | enum xp_retval ret = xpSuccess; |
557 | int ret; | ||
558 | 557 | ||
559 | /* | 558 | /* |
560 | * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST | 559 | * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST |
561 | * collides with memory operations. On those systems we call | 560 | * collides with memory operations. On those systems we call |
562 | * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. | 561 | * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead. |
563 | */ | 562 | */ |
564 | if (!enable_shub_wars_1_1()) { | 563 | if (!enable_shub_wars_1_1()) |
565 | ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE, | 564 | ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE); |
566 | SN_MEMPROT_ACCESS_CLASS_1, | 565 | |
567 | &nasid_array); | 566 | return ret; |
568 | if (ret != 0) | ||
569 | return xpSalError; | ||
570 | } | ||
571 | return xpSuccess; | ||
572 | } | 567 | } |
573 | 568 | ||
574 | /* | 569 | /* |
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 1ac694c01623..684b2dd17583 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -18,7 +18,15 @@ | |||
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/delay.h> | 19 | #include <linux/delay.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/err.h> | ||
21 | #include <asm/uv/uv_hub.h> | 22 | #include <asm/uv/uv_hub.h> |
23 | #if defined CONFIG_X86_64 | ||
24 | #include <asm/uv/bios.h> | ||
25 | #include <asm/uv/uv_irq.h> | ||
26 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
27 | #include <asm/sn/intr.h> | ||
28 | #include <asm/sn/sn_sal.h> | ||
29 | #endif | ||
22 | #include "../sgi-gru/gru.h" | 30 | #include "../sgi-gru/gru.h" |
23 | #include "../sgi-gru/grukservices.h" | 31 | #include "../sgi-gru/grukservices.h" |
24 | #include "xpc.h" | 32 | #include "xpc.h" |
@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv; | |||
27 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); | 35 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); |
28 | 36 | ||
29 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) | 37 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) |
30 | #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) | 38 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
39 | XPC_ACTIVATE_MSG_SIZE_UV) | ||
40 | #define XPC_ACTIVATE_IRQ_NAME "xpc_activate" | ||
31 | 41 | ||
32 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | 42 | #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) |
33 | XPC_ACTIVATE_MSG_SIZE_UV) | 43 | #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ |
34 | #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | 44 | XPC_NOTIFY_MSG_SIZE_UV) |
35 | XPC_NOTIFY_MSG_SIZE_UV) | 45 | #define XPC_NOTIFY_IRQ_NAME "xpc_notify" |
36 | 46 | ||
37 | static void *xpc_activate_mq_uv; | 47 | static struct xpc_gru_mq_uv *xpc_activate_mq_uv; |
38 | static void *xpc_notify_mq_uv; | 48 | static struct xpc_gru_mq_uv *xpc_notify_mq_uv; |
39 | 49 | ||
40 | static int | 50 | static int |
41 | xpc_setup_partitions_sn_uv(void) | 51 | xpc_setup_partitions_sn_uv(void) |
@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void) | |||
52 | return 0; | 62 | return 0; |
53 | } | 63 | } |
54 | 64 | ||
55 | static void * | 65 | static int |
56 | xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, | 66 | xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) |
67 | { | ||
68 | #if defined CONFIG_X86_64 | ||
69 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); | ||
70 | if (mq->irq < 0) { | ||
71 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | ||
72 | mq->irq); | ||
73 | } | ||
74 | |||
75 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
76 | int mmr_pnode; | ||
77 | unsigned long mmr_value; | ||
78 | |||
79 | if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0) | ||
80 | mq->irq = SGI_XPC_ACTIVATE; | ||
81 | else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0) | ||
82 | mq->irq = SGI_XPC_NOTIFY; | ||
83 | else | ||
84 | return -EINVAL; | ||
85 | |||
86 | mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | ||
87 | mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq; | ||
88 | |||
89 | uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); | ||
90 | #else | ||
91 | #error not a supported configuration | ||
92 | #endif | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static void | ||
98 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | ||
99 | { | ||
100 | #if defined CONFIG_X86_64 | ||
101 | uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); | ||
102 | |||
103 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
104 | int mmr_pnode; | ||
105 | unsigned long mmr_value; | ||
106 | |||
107 | mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | ||
108 | mmr_value = 1UL << 16; | ||
109 | |||
110 | uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value); | ||
111 | #else | ||
112 | #error not a supported configuration | ||
113 | #endif | ||
114 | } | ||
115 | |||
116 | static int | ||
117 | xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) | ||
118 | { | ||
119 | int ret; | ||
120 | |||
121 | #if defined CONFIG_X86_64 | ||
122 | ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order, | ||
123 | &mq->mmr_offset); | ||
124 | if (ret < 0) { | ||
125 | dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, " | ||
126 | "ret=%d\n", ret); | ||
127 | return ret; | ||
128 | } | ||
129 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
130 | ret = sn_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order, | ||
131 | &mq->mmr_offset); | ||
132 | if (ret < 0) { | ||
133 | dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n", | ||
134 | ret); | ||
135 | return -EBUSY; | ||
136 | } | ||
137 | #else | ||
138 | #error not a supported configuration | ||
139 | #endif | ||
140 | |||
141 | mq->watchlist_num = ret; | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static void | ||
146 | xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) | ||
147 | { | ||
148 | int ret; | ||
149 | |||
150 | #if defined CONFIG_X86_64 | ||
151 | ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | ||
152 | BUG_ON(ret != BIOS_STATUS_SUCCESS); | ||
153 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
154 | ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num); | ||
155 | BUG_ON(ret != SALRET_OK); | ||
156 | #else | ||
157 | #error not a supported configuration | ||
158 | #endif | ||
159 | } | ||
160 | |||
161 | static struct xpc_gru_mq_uv * | ||
162 | xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, | ||
57 | irq_handler_t irq_handler) | 163 | irq_handler_t irq_handler) |
58 | { | 164 | { |
165 | enum xp_retval xp_ret; | ||
59 | int ret; | 166 | int ret; |
60 | int nid; | 167 | int nid; |
61 | int mq_order; | 168 | int pg_order; |
62 | struct page *page; | 169 | struct page *page; |
63 | void *mq; | 170 | struct xpc_gru_mq_uv *mq; |
171 | |||
172 | mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL); | ||
173 | if (mq == NULL) { | ||
174 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() " | ||
175 | "a xpc_gru_mq_uv structure\n"); | ||
176 | ret = -ENOMEM; | ||
177 | goto out_1; | ||
178 | } | ||
179 | |||
180 | pg_order = get_order(mq_size); | ||
181 | mq->order = pg_order + PAGE_SHIFT; | ||
182 | mq_size = 1UL << mq->order; | ||
183 | |||
184 | mq->mmr_blade = uv_cpu_to_blade_id(cpu); | ||
64 | 185 | ||
65 | nid = cpu_to_node(cpuid); | 186 | nid = cpu_to_node(cpu); |
66 | mq_order = get_order(mq_size); | ||
67 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 187 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
68 | mq_order); | 188 | pg_order); |
69 | if (page == NULL) { | 189 | if (page == NULL) { |
70 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | 190 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " |
71 | "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); | 191 | "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); |
72 | return NULL; | 192 | ret = -ENOMEM; |
193 | goto out_2; | ||
73 | } | 194 | } |
195 | mq->address = page_address(page); | ||
74 | 196 | ||
75 | mq = page_address(page); | 197 | ret = gru_create_message_queue(mq->address, mq_size); |
76 | ret = gru_create_message_queue(mq, mq_size); | ||
77 | if (ret != 0) { | 198 | if (ret != 0) { |
78 | dev_err(xpc_part, "gru_create_message_queue() returned " | 199 | dev_err(xpc_part, "gru_create_message_queue() returned " |
79 | "error=%d\n", ret); | 200 | "error=%d\n", ret); |
80 | free_pages((unsigned long)mq, mq_order); | 201 | ret = -EINVAL; |
81 | return NULL; | 202 | goto out_3; |
82 | } | 203 | } |
83 | 204 | ||
84 | /* !!! Need to do some other things to set up IRQ */ | 205 | /* enable generation of irq when GRU mq operation occurs to this mq */ |
206 | ret = xpc_gru_mq_watchlist_alloc_uv(mq); | ||
207 | if (ret != 0) | ||
208 | goto out_3; | ||
85 | 209 | ||
86 | ret = request_irq(irq, irq_handler, 0, "xpc", NULL); | 210 | ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name); |
211 | if (ret != 0) | ||
212 | goto out_4; | ||
213 | |||
214 | ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL); | ||
87 | if (ret != 0) { | 215 | if (ret != 0) { |
88 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", | 216 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", |
89 | irq, ret); | 217 | mq->irq, ret); |
90 | free_pages((unsigned long)mq, mq_order); | 218 | goto out_5; |
91 | return NULL; | ||
92 | } | 219 | } |
93 | 220 | ||
94 | /* !!! enable generation of irq when GRU mq op occurs to this mq */ | 221 | /* allow other partitions to access this GRU mq */ |
95 | 222 | xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size); | |
96 | /* ??? allow other partitions to access GRU mq? */ | 223 | if (xp_ret != xpSuccess) { |
224 | ret = -EACCES; | ||
225 | goto out_6; | ||
226 | } | ||
97 | 227 | ||
98 | return mq; | 228 | return mq; |
229 | |||
230 | /* something went wrong */ | ||
231 | out_6: | ||
232 | free_irq(mq->irq, NULL); | ||
233 | out_5: | ||
234 | xpc_release_gru_mq_irq_uv(mq); | ||
235 | out_4: | ||
236 | xpc_gru_mq_watchlist_free_uv(mq); | ||
237 | out_3: | ||
238 | free_pages((unsigned long)mq->address, pg_order); | ||
239 | out_2: | ||
240 | kfree(mq); | ||
241 | out_1: | ||
242 | return ERR_PTR(ret); | ||
99 | } | 243 | } |
100 | 244 | ||
101 | static void | 245 | static void |
102 | xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) | 246 | xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) |
103 | { | 247 | { |
104 | /* ??? disallow other partitions to access GRU mq? */ | 248 | unsigned int mq_size; |
249 | int pg_order; | ||
250 | int ret; | ||
251 | |||
252 | /* disallow other partitions to access GRU mq */ | ||
253 | mq_size = 1UL << mq->order; | ||
254 | ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size); | ||
255 | BUG_ON(ret != xpSuccess); | ||
105 | 256 | ||
106 | /* !!! disable generation of irq when GRU mq op occurs to this mq */ | 257 | /* unregister irq handler and release mq irq/vector mapping */ |
258 | free_irq(mq->irq, NULL); | ||
259 | xpc_release_gru_mq_irq_uv(mq); | ||
107 | 260 | ||
108 | free_irq(irq, NULL); | 261 | /* disable generation of irq when GRU mq op occurs to this mq */ |
262 | xpc_gru_mq_watchlist_free_uv(mq); | ||
109 | 263 | ||
110 | free_pages((unsigned long)mq, get_order(mq_size)); | 264 | pg_order = mq->order - PAGE_SHIFT; |
265 | free_pages((unsigned long)mq->address, pg_order); | ||
266 | |||
267 | kfree(mq); | ||
111 | } | 268 | } |
112 | 269 | ||
113 | static enum xp_retval | 270 | static enum xp_retval |
@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) | |||
402 | struct xpc_partition *part; | 559 | struct xpc_partition *part; |
403 | int wakeup_hb_checker = 0; | 560 | int wakeup_hb_checker = 0; |
404 | 561 | ||
405 | while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { | 562 | while (1) { |
563 | msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address); | ||
564 | if (msg_hdr == NULL) | ||
565 | break; | ||
406 | 566 | ||
407 | partid = msg_hdr->partid; | 567 | partid = msg_hdr->partid; |
408 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | 568 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { |
@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) | |||
418 | } | 578 | } |
419 | } | 579 | } |
420 | 580 | ||
421 | gru_free_message(xpc_activate_mq_uv, msg_hdr); | 581 | gru_free_message(xpc_activate_mq_uv->address, msg_hdr); |
422 | } | 582 | } |
423 | 583 | ||
424 | if (wakeup_hb_checker) | 584 | if (wakeup_hb_checker) |
@@ -482,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) | |||
482 | struct xpc_partition_uv *part_uv = &part->sn.uv; | 642 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
483 | 643 | ||
484 | /* | 644 | /* |
485 | * !!! Make our side think that the remote parition sent an activate | 645 | * !!! Make our side think that the remote partition sent an activate |
486 | * !!! message our way by doing what the activate IRQ handler would | 646 | * !!! message our way by doing what the activate IRQ handler would |
487 | * !!! do had one really been sent. | 647 | * !!! do had one really been sent. |
488 | */ | 648 | */ |
@@ -500,14 +660,39 @@ static enum xp_retval | |||
500 | xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, | 660 | xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, |
501 | size_t *len) | 661 | size_t *len) |
502 | { | 662 | { |
503 | /* !!! call the UV version of sn_partition_reserved_page_pa() */ | 663 | s64 status; |
504 | return xpUnsupported; | 664 | enum xp_retval ret; |
665 | |||
666 | #if defined CONFIG_X86_64 | ||
667 | status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa, | ||
668 | (u64 *)len); | ||
669 | if (status == BIOS_STATUS_SUCCESS) | ||
670 | ret = xpSuccess; | ||
671 | else if (status == BIOS_STATUS_MORE_PASSES) | ||
672 | ret = xpNeedMoreInfo; | ||
673 | else | ||
674 | ret = xpBiosError; | ||
675 | |||
676 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | ||
677 | status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); | ||
678 | if (status == SALRET_OK) | ||
679 | ret = xpSuccess; | ||
680 | else if (status == SALRET_MORE_PASSES) | ||
681 | ret = xpNeedMoreInfo; | ||
682 | else | ||
683 | ret = xpSalError; | ||
684 | |||
685 | #else | ||
686 | #error not a supported configuration | ||
687 | #endif | ||
688 | |||
689 | return ret; | ||
505 | } | 690 | } |
506 | 691 | ||
507 | static int | 692 | static int |
508 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) | 693 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) |
509 | { | 694 | { |
510 | rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); | 695 | rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address); |
511 | return 0; | 696 | return 0; |
512 | } | 697 | } |
513 | 698 | ||
@@ -1411,22 +1596,18 @@ xpc_init_uv(void) | |||
1411 | return -E2BIG; | 1596 | return -E2BIG; |
1412 | } | 1597 | } |
1413 | 1598 | ||
1414 | /* ??? The cpuid argument's value is 0, is that what we want? */ | 1599 | xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, |
1415 | /* !!! The irq argument's value isn't correct. */ | 1600 | XPC_ACTIVATE_IRQ_NAME, |
1416 | xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, | ||
1417 | xpc_handle_activate_IRQ_uv); | 1601 | xpc_handle_activate_IRQ_uv); |
1418 | if (xpc_activate_mq_uv == NULL) | 1602 | if (IS_ERR(xpc_activate_mq_uv)) |
1419 | return -ENOMEM; | 1603 | return PTR_ERR(xpc_activate_mq_uv); |
1420 | 1604 | ||
1421 | /* ??? The cpuid argument's value is 0, is that what we want? */ | 1605 | xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, |
1422 | /* !!! The irq argument's value isn't correct. */ | 1606 | XPC_NOTIFY_IRQ_NAME, |
1423 | xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, | ||
1424 | xpc_handle_notify_IRQ_uv); | 1607 | xpc_handle_notify_IRQ_uv); |
1425 | if (xpc_notify_mq_uv == NULL) { | 1608 | if (IS_ERR(xpc_notify_mq_uv)) { |
1426 | /* !!! The irq argument's value isn't correct. */ | 1609 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); |
1427 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, | 1610 | return PTR_ERR(xpc_notify_mq_uv); |
1428 | XPC_ACTIVATE_MQ_SIZE_UV, 0); | ||
1429 | return -ENOMEM; | ||
1430 | } | 1611 | } |
1431 | 1612 | ||
1432 | return 0; | 1613 | return 0; |
@@ -1435,9 +1616,6 @@ xpc_init_uv(void) | |||
1435 | void | 1616 | void |
1436 | xpc_exit_uv(void) | 1617 | xpc_exit_uv(void) |
1437 | { | 1618 | { |
1438 | /* !!! The irq argument's value isn't correct. */ | 1619 | xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); |
1439 | xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); | 1620 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); |
1440 | |||
1441 | /* !!! The irq argument's value isn't correct. */ | ||
1442 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); | ||
1443 | } | 1621 | } |