aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp/xp_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-xp/xp_main.c')
-rw-r--r--drivers/misc/sgi-xp/xp_main.c131
1 files changed, 64 insertions, 67 deletions
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c
index 196480b691a..66a1d19e08a 100644
--- a/drivers/misc/sgi-xp/xp_main.c
+++ b/drivers/misc/sgi-xp/xp_main.c
@@ -14,29 +14,48 @@
14 * 14 *
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/interrupt.h>
19#include <linux/module.h> 17#include <linux/module.h>
20#include <linux/mutex.h> 18#include <linux/device.h>
21#include <asm/sn/intr.h>
22#include <asm/sn/sn_sal.h>
23#include "xp.h" 19#include "xp.h"
24 20
25/* 21/* define the XP debug device structures to be used with dev_dbg() et al */
26 * The export of xp_nofault_PIOR needs to happen here since it is defined 22
27 * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is 23struct device_driver xp_dbg_name = {
28 * defined here. 24 .name = "xp"
29 */ 25};
30EXPORT_SYMBOL_GPL(xp_nofault_PIOR); 26
27struct device xp_dbg_subname = {
28 .bus_id = {0}, /* set to "" */
29 .driver = &xp_dbg_name
30};
31
32struct device *xp = &xp_dbg_subname;
33
34/* max #of partitions possible */
35short xp_max_npartitions;
36EXPORT_SYMBOL_GPL(xp_max_npartitions);
37
38short xp_partition_id;
39EXPORT_SYMBOL_GPL(xp_partition_id);
40
41u8 xp_region_size;
42EXPORT_SYMBOL_GPL(xp_region_size);
43
44unsigned long (*xp_pa) (void *addr);
45EXPORT_SYMBOL_GPL(xp_pa);
46
47enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa,
48 const unsigned long src_gpa, size_t len);
49EXPORT_SYMBOL_GPL(xp_remote_memcpy);
31 50
32u64 xp_nofault_PIOR_target; 51int (*xp_cpu_to_nasid) (int cpuid);
33EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); 52EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
34 53
35/* 54/*
36 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level 55 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
37 * users of XPC. 56 * users of XPC.
38 */ 57 */
39struct xpc_registration xpc_registrations[XPC_NCHANNELS]; 58struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS];
40EXPORT_SYMBOL_GPL(xpc_registrations); 59EXPORT_SYMBOL_GPL(xpc_registrations);
41 60
42/* 61/*
@@ -51,10 +70,9 @@ xpc_notloaded(void)
51struct xpc_interface xpc_interface = { 70struct xpc_interface xpc_interface = {
52 (void (*)(int))xpc_notloaded, 71 (void (*)(int))xpc_notloaded,
53 (void (*)(int))xpc_notloaded, 72 (void (*)(int))xpc_notloaded,
54 (enum xp_retval(*)(short, int, u32, void **))xpc_notloaded, 73 (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded,
55 (enum xp_retval(*)(short, int, void *))xpc_notloaded, 74 (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func,
56 (enum xp_retval(*)(short, int, void *, xpc_notify_func, void *)) 75 void *))xpc_notloaded,
57 xpc_notloaded,
58 (void (*)(short, int, void *))xpc_notloaded, 76 (void (*)(short, int, void *))xpc_notloaded,
59 (enum xp_retval(*)(short, void *))xpc_notloaded 77 (enum xp_retval(*)(short, void *))xpc_notloaded
60}; 78};
@@ -66,16 +84,14 @@ EXPORT_SYMBOL_GPL(xpc_interface);
66void 84void
67xpc_set_interface(void (*connect) (int), 85xpc_set_interface(void (*connect) (int),
68 void (*disconnect) (int), 86 void (*disconnect) (int),
69 enum xp_retval (*allocate) (short, int, u32, void **), 87 enum xp_retval (*send) (short, int, u32, void *, u16),
70 enum xp_retval (*send) (short, int, void *), 88 enum xp_retval (*send_notify) (short, int, u32, void *, u16,
71 enum xp_retval (*send_notify) (short, int, void *,
72 xpc_notify_func, void *), 89 xpc_notify_func, void *),
73 void (*received) (short, int, void *), 90 void (*received) (short, int, void *),
74 enum xp_retval (*partid_to_nasids) (short, void *)) 91 enum xp_retval (*partid_to_nasids) (short, void *))
75{ 92{
76 xpc_interface.connect = connect; 93 xpc_interface.connect = connect;
77 xpc_interface.disconnect = disconnect; 94 xpc_interface.disconnect = disconnect;
78 xpc_interface.allocate = allocate;
79 xpc_interface.send = send; 95 xpc_interface.send = send;
80 xpc_interface.send_notify = send_notify; 96 xpc_interface.send_notify = send_notify;
81 xpc_interface.received = received; 97 xpc_interface.received = received;
@@ -91,13 +107,11 @@ xpc_clear_interface(void)
91{ 107{
92 xpc_interface.connect = (void (*)(int))xpc_notloaded; 108 xpc_interface.connect = (void (*)(int))xpc_notloaded;
93 xpc_interface.disconnect = (void (*)(int))xpc_notloaded; 109 xpc_interface.disconnect = (void (*)(int))xpc_notloaded;
94 xpc_interface.allocate = (enum xp_retval(*)(short, int, u32, 110 xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16))
95 void **))xpc_notloaded;
96 xpc_interface.send = (enum xp_retval(*)(short, int, void *))
97 xpc_notloaded; 111 xpc_notloaded;
98 xpc_interface.send_notify = (enum xp_retval(*)(short, int, void *, 112 xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *,
99 xpc_notify_func, 113 u16, xpc_notify_func,
100 void *))xpc_notloaded; 114 void *))xpc_notloaded;
101 xpc_interface.received = (void (*)(short, int, void *)) 115 xpc_interface.received = (void (*)(short, int, void *))
102 xpc_notloaded; 116 xpc_notloaded;
103 xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) 117 xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *))
@@ -135,11 +149,14 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
135{ 149{
136 struct xpc_registration *registration; 150 struct xpc_registration *registration;
137 151
138 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 152 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
139 DBUG_ON(payload_size == 0 || nentries == 0); 153 DBUG_ON(payload_size == 0 || nentries == 0);
140 DBUG_ON(func == NULL); 154 DBUG_ON(func == NULL);
141 DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); 155 DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
142 156
157 if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE)
158 return xpPayloadTooBig;
159
143 registration = &xpc_registrations[ch_number]; 160 registration = &xpc_registrations[ch_number];
144 161
145 if (mutex_lock_interruptible(&registration->mutex) != 0) 162 if (mutex_lock_interruptible(&registration->mutex) != 0)
@@ -152,7 +169,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
152 } 169 }
153 170
154 /* register the channel for connection */ 171 /* register the channel for connection */
155 registration->msg_size = XPC_MSG_SIZE(payload_size); 172 registration->entry_size = XPC_MSG_SIZE(payload_size);
156 registration->nentries = nentries; 173 registration->nentries = nentries;
157 registration->assigned_limit = assigned_limit; 174 registration->assigned_limit = assigned_limit;
158 registration->idle_limit = idle_limit; 175 registration->idle_limit = idle_limit;
@@ -185,7 +202,7 @@ xpc_disconnect(int ch_number)
185{ 202{
186 struct xpc_registration *registration; 203 struct xpc_registration *registration;
187 204
188 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); 205 DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
189 206
190 registration = &xpc_registrations[ch_number]; 207 registration = &xpc_registrations[ch_number];
191 208
@@ -206,7 +223,7 @@ xpc_disconnect(int ch_number)
206 registration->func = NULL; 223 registration->func = NULL;
207 registration->key = NULL; 224 registration->key = NULL;
208 registration->nentries = 0; 225 registration->nentries = 0;
209 registration->msg_size = 0; 226 registration->entry_size = 0;
210 registration->assigned_limit = 0; 227 registration->assigned_limit = 0;
211 registration->idle_limit = 0; 228 registration->idle_limit = 0;
212 229
@@ -221,39 +238,21 @@ EXPORT_SYMBOL_GPL(xpc_disconnect);
221int __init 238int __init
222xp_init(void) 239xp_init(void)
223{ 240{
224 int ret, ch_number; 241 enum xp_retval ret;
225 u64 func_addr = *(u64 *)xp_nofault_PIOR; 242 int ch_number;
226 u64 err_func_addr = *(u64 *)xp_error_PIOR;
227
228 if (!ia64_platform_is("sn2"))
229 return -ENODEV;
230 243
231 /* 244 if (is_shub())
232 * Register a nofault code region which performs a cross-partition 245 ret = xp_init_sn2();
233 * PIO read. If the PIO read times out, the MCA handler will consume 246 else if (is_uv())
234 * the error and return to a kernel-provided instruction to indicate 247 ret = xp_init_uv();
235 * an error. This PIO read exists because it is guaranteed to timeout
236 * if the destination is down (AMO operations do not timeout on at
237 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
238 * work around).
239 */
240 ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
241 1, 1);
242 if (ret != 0) {
243 printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
244 ret);
245 }
246 /*
247 * Setup the nofault PIO read target. (There is no special reason why
248 * SH_IPI_ACCESS was selected.)
249 */
250 if (is_shub2())
251 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
252 else 248 else
253 xp_nofault_PIOR_target = SH1_IPI_ACCESS; 249 ret = xpUnsupported;
250
251 if (ret != xpSuccess)
252 return -ENODEV;
254 253
255 /* initialize the connection registration mutex */ 254 /* initialize the connection registration mutex */
256 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) 255 for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++)
257 mutex_init(&xpc_registrations[ch_number].mutex); 256 mutex_init(&xpc_registrations[ch_number].mutex);
258 257
259 return 0; 258 return 0;
@@ -264,12 +263,10 @@ module_init(xp_init);
264void __exit 263void __exit
265xp_exit(void) 264xp_exit(void)
266{ 265{
267 u64 func_addr = *(u64 *)xp_nofault_PIOR; 266 if (is_shub())
268 u64 err_func_addr = *(u64 *)xp_error_PIOR; 267 xp_exit_sn2();
269 268 else if (is_uv())
270 /* unregister the PIO read nofault code region */ 269 xp_exit_uv();
271 (void)sn_register_nofault_code(func_addr, err_func_addr,
272 err_func_addr, 1, 0);
273} 270}
274 271
275module_exit(xp_exit); 272module_exit(xp_exit);