aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig10
-rw-r--r--arch/ia64/sn/kernel/xp_main.c289
-rw-r--r--arch/ia64/sn/kernel/xp_nofault.S31
3 files changed, 330 insertions, 0 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index cad8346def1d..ce13ad689d19 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -217,6 +217,16 @@ config IA64_SGI_SN_SIM
217 If you are compiling a kernel that will run under SGI's IA-64 217 If you are compiling a kernel that will run under SGI's IA-64
218 simulator (Medusa) then say Y, otherwise say N. 218 simulator (Medusa) then say Y, otherwise say N.
219 219
220config IA64_SGI_SN_XP
221 tristate "Support communication between SGI SSIs"
222 depends on MSPEC
223 help
224 An SGI machine can be divided into multiple Single System
225 Images which act independently of each other and have
226 hardware based memory protection from the others. Enabling
227 this feature will allow for direct communication between SSIs
228 based on a network adapter and DMA messaging.
229
220config FORCE_MAX_ZONEORDER 230config FORCE_MAX_ZONEORDER
221 int 231 int
222 default "18" 232 default "18"
diff --git a/arch/ia64/sn/kernel/xp_main.c b/arch/ia64/sn/kernel/xp_main.c
new file mode 100644
index 000000000000..3be52a34c80f
--- /dev/null
+++ b/arch/ia64/sn/kernel/xp_main.c
@@ -0,0 +1,289 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * Cross Partition (XP) base.
12 *
13 * XP provides a base from which its users can interact
14 * with XPC, yet not be dependent on XPC.
15 *
16 */
17
18
19#include <linux/kernel.h>
20#include <linux/interrupt.h>
21#include <linux/module.h>
22#include <asm/sn/intr.h>
23#include <asm/sn/sn_sal.h>
24#include <asm/sn/xp.h>
25
26
27/*
28 * Target of nofault PIO read.
29 */
30u64 xp_nofault_PIOR_target;
31
32
33/*
34 * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
35 * users of XPC.
36 */
37struct xpc_registration xpc_registrations[XPC_NCHANNELS];
38
39
40/*
41 * Initialize the XPC interface to indicate that XPC isn't loaded.
42 */
43static enum xpc_retval xpc_notloaded(void) { return xpcNotLoaded; }
44
45struct xpc_interface xpc_interface = {
46 (void (*)(int)) xpc_notloaded,
47 (void (*)(int)) xpc_notloaded,
48 (enum xpc_retval (*)(partid_t, int, u32, void **)) xpc_notloaded,
49 (enum xpc_retval (*)(partid_t, int, void *)) xpc_notloaded,
50 (enum xpc_retval (*)(partid_t, int, void *, xpc_notify_func, void *))
51 xpc_notloaded,
52 (void (*)(partid_t, int, void *)) xpc_notloaded,
53 (enum xpc_retval (*)(partid_t, void *)) xpc_notloaded
54};
55
56
57/*
58 * XPC calls this when it (the XPC module) has been loaded.
59 */
60void
61xpc_set_interface(void (*connect)(int),
62 void (*disconnect)(int),
63 enum xpc_retval (*allocate)(partid_t, int, u32, void **),
64 enum xpc_retval (*send)(partid_t, int, void *),
65 enum xpc_retval (*send_notify)(partid_t, int, void *,
66 xpc_notify_func, void *),
67 void (*received)(partid_t, int, void *),
68 enum xpc_retval (*partid_to_nasids)(partid_t, void *))
69{
70 xpc_interface.connect = connect;
71 xpc_interface.disconnect = disconnect;
72 xpc_interface.allocate = allocate;
73 xpc_interface.send = send;
74 xpc_interface.send_notify = send_notify;
75 xpc_interface.received = received;
76 xpc_interface.partid_to_nasids = partid_to_nasids;
77}
78
79
80/*
81 * XPC calls this when it (the XPC module) is being unloaded.
82 */
83void
84xpc_clear_interface(void)
85{
86 xpc_interface.connect = (void (*)(int)) xpc_notloaded;
87 xpc_interface.disconnect = (void (*)(int)) xpc_notloaded;
88 xpc_interface.allocate = (enum xpc_retval (*)(partid_t, int, u32,
89 void **)) xpc_notloaded;
90 xpc_interface.send = (enum xpc_retval (*)(partid_t, int, void *))
91 xpc_notloaded;
92 xpc_interface.send_notify = (enum xpc_retval (*)(partid_t, int, void *,
93 xpc_notify_func, void *)) xpc_notloaded;
94 xpc_interface.received = (void (*)(partid_t, int, void *))
95 xpc_notloaded;
96 xpc_interface.partid_to_nasids = (enum xpc_retval (*)(partid_t, void *))
97 xpc_notloaded;
98}
99
100
101/*
102 * Register for automatic establishment of a channel connection whenever
103 * a partition comes up.
104 *
105 * Arguments:
106 *
107 * ch_number - channel # to register for connection.
108 * func - function to call for asynchronous notification of channel
109 * state changes (i.e., connection, disconnection, error) and
110 * the arrival of incoming messages.
111 * key - pointer to optional user-defined value that gets passed back
112 * to the user on any callouts made to func.
113 * payload_size - size in bytes of the XPC message's payload area which
114 * contains a user-defined message. The user should make
115 * this large enough to hold their largest message.
116 * nentries - max #of XPC message entries a message queue can contain.
117 * The actual number, which is determined when a connection
118 * is established and may be less then requested, will be
119 * passed to the user via the xpcConnected callout.
120 * assigned_limit - max number of kthreads allowed to be processing
121 * messages (per connection) at any given instant.
122 * idle_limit - max number of kthreads allowed to be idle at any given
123 * instant.
124 */
125enum xpc_retval
126xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
127 u16 nentries, u32 assigned_limit, u32 idle_limit)
128{
129 struct xpc_registration *registration;
130
131
132 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
133 DBUG_ON(payload_size == 0 || nentries == 0);
134 DBUG_ON(func == NULL);
135 DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);
136
137 registration = &xpc_registrations[ch_number];
138
139 if (down_interruptible(&registration->sema) != 0) {
140 return xpcInterrupted;
141 }
142
143 /* if XPC_CHANNEL_REGISTERED(ch_number) */
144 if (registration->func != NULL) {
145 up(&registration->sema);
146 return xpcAlreadyRegistered;
147 }
148
149 /* register the channel for connection */
150 registration->msg_size = XPC_MSG_SIZE(payload_size);
151 registration->nentries = nentries;
152 registration->assigned_limit = assigned_limit;
153 registration->idle_limit = idle_limit;
154 registration->key = key;
155 registration->func = func;
156
157 up(&registration->sema);
158
159 xpc_interface.connect(ch_number);
160
161 return xpcSuccess;
162}
163
164
165/*
166 * Remove the registration for automatic connection of the specified channel
167 * when a partition comes up.
168 *
169 * Before returning this xpc_disconnect() will wait for all connections on the
170 * specified channel have been closed/torndown. So the caller can be assured
171 * that they will not be receiving any more callouts from XPC to their
172 * function registered via xpc_connect().
173 *
174 * Arguments:
175 *
176 * ch_number - channel # to unregister.
177 */
178void
179xpc_disconnect(int ch_number)
180{
181 struct xpc_registration *registration;
182
183
184 DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
185
186 registration = &xpc_registrations[ch_number];
187
188 /*
189 * We've decided not to make this a down_interruptible(), since we
190 * figured XPC's users will just turn around and call xpc_disconnect()
191 * again anyways, so we might as well wait, if need be.
192 */
193 down(&registration->sema);
194
195 /* if !XPC_CHANNEL_REGISTERED(ch_number) */
196 if (registration->func == NULL) {
197 up(&registration->sema);
198 return;
199 }
200
201 /* remove the connection registration for the specified channel */
202 registration->func = NULL;
203 registration->key = NULL;
204 registration->nentries = 0;
205 registration->msg_size = 0;
206 registration->assigned_limit = 0;
207 registration->idle_limit = 0;
208
209 xpc_interface.disconnect(ch_number);
210
211 up(&registration->sema);
212
213 return;
214}
215
216
217int __init
218xp_init(void)
219{
220 int ret, ch_number;
221 u64 func_addr = *(u64 *) xp_nofault_PIOR;
222 u64 err_func_addr = *(u64 *) xp_error_PIOR;
223
224
225 if (!ia64_platform_is("sn2")) {
226 return -ENODEV;
227 }
228
229 /*
230 * Register a nofault code region which performs a cross-partition
231 * PIO read. If the PIO read times out, the MCA handler will consume
232 * the error and return to a kernel-provided instruction to indicate
233 * an error. This PIO read exists because it is guaranteed to timeout
234 * if the destination is down (AMO operations do not timeout on at
235 * least some CPUs on Shubs <= v1.2, which unfortunately we have to
236 * work around).
237 */
238 if ((ret = sn_register_nofault_code(func_addr, err_func_addr,
239 err_func_addr, 1, 1)) != 0) {
240 printk(KERN_ERR "XP: can't register nofault code, error=%d\n",
241 ret);
242 }
243 /*
244 * Setup the nofault PIO read target. (There is no special reason why
245 * SH_IPI_ACCESS was selected.)
246 */
247 if (is_shub2()) {
248 xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
249 } else {
250 xp_nofault_PIOR_target = SH1_IPI_ACCESS;
251 }
252
253 /* initialize the connection registration semaphores */
254 for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
255 sema_init(&xpc_registrations[ch_number].sema, 1); /* mutex */
256 }
257
258 return 0;
259}
260module_init(xp_init);
261
262
263void __exit
264xp_exit(void)
265{
266 u64 func_addr = *(u64 *) xp_nofault_PIOR;
267 u64 err_func_addr = *(u64 *) xp_error_PIOR;
268
269
270 /* unregister the PIO read nofault code region */
271 (void) sn_register_nofault_code(func_addr, err_func_addr,
272 err_func_addr, 1, 0);
273}
274module_exit(xp_exit);
275
276
277MODULE_AUTHOR("Silicon Graphics, Inc.");
278MODULE_DESCRIPTION("Cross Partition (XP) base");
279MODULE_LICENSE("GPL");
280
281EXPORT_SYMBOL(xp_nofault_PIOR);
282EXPORT_SYMBOL(xp_nofault_PIOR_target);
283EXPORT_SYMBOL(xpc_registrations);
284EXPORT_SYMBOL(xpc_interface);
285EXPORT_SYMBOL(xpc_clear_interface);
286EXPORT_SYMBOL(xpc_set_interface);
287EXPORT_SYMBOL(xpc_connect);
288EXPORT_SYMBOL(xpc_disconnect);
289
diff --git a/arch/ia64/sn/kernel/xp_nofault.S b/arch/ia64/sn/kernel/xp_nofault.S
new file mode 100644
index 000000000000..b772543053c9
--- /dev/null
+++ b/arch/ia64/sn/kernel/xp_nofault.S
@@ -0,0 +1,31 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2004-2005 Silicon Graphics, Inc. All Rights Reserved.
7 */
8
9
10/*
11 * The xp_nofault_PIOR function takes a pointer to a remote PIO register
12 * and attempts to load and consume a value from it. This function
13 * will be registered as a nofault code block. In the event that the
14 * PIO read fails, the MCA handler will force the error to look
15 * corrected and vector to the xp_error_PIOR which will return an error.
16 *
17 * extern int xp_nofault_PIOR(void *remote_register);
18 */
19
20 .global xp_nofault_PIOR
21xp_nofault_PIOR:
22 mov r8=r0 // Stage a success return value
23 ld8.acq r9=[r32];; // PIO Read the specified register
24 adds r9=1,r9 // Add to force a consume
25 br.ret.sptk.many b0;; // Return success
26
27 .global xp_error_PIOR
28xp_error_PIOR:
29 mov r8=1 // Return value of 1
30 br.ret.sptk.many b0;; // Return failure
31