diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:50 -0400 |
commit | 5b8669dfd110a62a74eea525a009342f73987ea0 (patch) | |
tree | 04572d8508f450131298b6ec072e97aa9fdba539 /drivers/misc/sgi-xp/xpc_uv.c | |
parent | 83469b5525b4a35be40b17cb41d64118d84d9f80 (diff) |
sgi-xp: setup the activate GRU message queue
Setup the activate GRU message queue that is used for partition activation
and channel connection on UV systems.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_uv.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 781 |
1 files changed, 751 insertions, 30 deletions
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index c2d4ddd6e955..689cb5c68ccf 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -14,41 +14,528 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/device.h> | ||
17 | #include <asm/uv/uv_hub.h> | 21 | #include <asm/uv/uv_hub.h> |
22 | #include "../sgi-gru/gru.h" | ||
18 | #include "../sgi-gru/grukservices.h" | 23 | #include "../sgi-gru/grukservices.h" |
19 | #include "xpc.h" | 24 | #include "xpc.h" |
20 | 25 | ||
26 | static atomic64_t xpc_heartbeat_uv; | ||
21 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); | 27 | static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); |
22 | 28 | ||
23 | static void *xpc_activate_mq; | 29 | #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) |
30 | #define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) | ||
31 | |||
32 | #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | ||
33 | XPC_ACTIVATE_MSG_SIZE_UV) | ||
34 | #define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ | ||
35 | XPC_NOTIFY_MSG_SIZE_UV) | ||
36 | |||
37 | static void *xpc_activate_mq_uv; | ||
38 | static void *xpc_notify_mq_uv; | ||
39 | |||
40 | static int | ||
41 | xpc_setup_partitions_sn_uv(void) | ||
42 | { | ||
43 | short partid; | ||
44 | struct xpc_partition_uv *part_uv; | ||
45 | |||
46 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { | ||
47 | part_uv = &xpc_partitions[partid].sn.uv; | ||
48 | |||
49 | spin_lock_init(&part_uv->flags_lock); | ||
50 | part_uv->remote_act_state = XPC_P_AS_INACTIVE; | ||
51 | } | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static void * | ||
56 | xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, | ||
57 | irq_handler_t irq_handler) | ||
58 | { | ||
59 | int ret; | ||
60 | int nid; | ||
61 | int mq_order; | ||
62 | struct page *page; | ||
63 | void *mq; | ||
64 | |||
65 | nid = cpu_to_node(cpuid); | ||
66 | mq_order = get_order(mq_size); | ||
67 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | ||
68 | mq_order); | ||
69 | if (page == NULL) | ||
70 | return NULL; | ||
71 | |||
72 | mq = page_address(page); | ||
73 | ret = gru_create_message_queue(mq, mq_size); | ||
74 | if (ret != 0) { | ||
75 | dev_err(xpc_part, "gru_create_message_queue() returned " | ||
76 | "error=%d\n", ret); | ||
77 | free_pages((unsigned long)mq, mq_order); | ||
78 | return NULL; | ||
79 | } | ||
80 | |||
81 | /* !!! Need to do some other things to set up IRQ */ | ||
82 | |||
83 | ret = request_irq(irq, irq_handler, 0, "xpc", NULL); | ||
84 | if (ret != 0) { | ||
85 | dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", | ||
86 | irq, ret); | ||
87 | free_pages((unsigned long)mq, mq_order); | ||
88 | return NULL; | ||
89 | } | ||
90 | |||
91 | /* !!! enable generation of irq when GRU mq op occurs to this mq */ | ||
92 | |||
93 | /* ??? allow other partitions to access GRU mq? */ | ||
94 | |||
95 | return mq; | ||
96 | } | ||
24 | 97 | ||
25 | static void | 98 | static void |
26 | xpc_send_local_activate_IRQ_uv(struct xpc_partition *part) | 99 | xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) |
100 | { | ||
101 | /* ??? disallow other partitions to access GRU mq? */ | ||
102 | |||
103 | /* !!! disable generation of irq when GRU mq op occurs to this mq */ | ||
104 | |||
105 | free_irq(irq, NULL); | ||
106 | |||
107 | free_pages((unsigned long)mq, get_order(mq_size)); | ||
108 | } | ||
109 | |||
110 | static enum xp_retval | ||
111 | xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size) | ||
27 | { | 112 | { |
113 | enum xp_retval xp_ret; | ||
114 | int ret; | ||
115 | |||
116 | while (1) { | ||
117 | ret = gru_send_message_gpa(mq_gpa, msg, msg_size); | ||
118 | if (ret == MQE_OK) { | ||
119 | xp_ret = xpSuccess; | ||
120 | break; | ||
121 | } | ||
122 | |||
123 | if (ret == MQE_QUEUE_FULL) { | ||
124 | dev_dbg(xpc_chan, "gru_send_message_gpa() returned " | ||
125 | "error=MQE_QUEUE_FULL\n"); | ||
126 | /* !!! handle QLimit reached; delay & try again */ | ||
127 | /* ??? Do we add a limit to the number of retries? */ | ||
128 | (void)msleep_interruptible(10); | ||
129 | } else if (ret == MQE_CONGESTION) { | ||
130 | dev_dbg(xpc_chan, "gru_send_message_gpa() returned " | ||
131 | "error=MQE_CONGESTION\n"); | ||
132 | /* !!! handle LB Overflow; simply try again */ | ||
133 | /* ??? Do we add a limit to the number of retries? */ | ||
134 | } else { | ||
135 | /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ | ||
136 | dev_err(xpc_chan, "gru_send_message_gpa() returned " | ||
137 | "error=%d\n", ret); | ||
138 | xp_ret = xpGruSendMqError; | ||
139 | break; | ||
140 | } | ||
141 | } | ||
142 | return xp_ret; | ||
143 | } | ||
144 | |||
145 | static void | ||
146 | xpc_process_activate_IRQ_rcvd_uv(void) | ||
147 | { | ||
148 | unsigned long irq_flags; | ||
149 | short partid; | ||
150 | struct xpc_partition *part; | ||
151 | u8 act_state_req; | ||
152 | |||
153 | DBUG_ON(xpc_activate_IRQ_rcvd == 0); | ||
154 | |||
155 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
156 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { | ||
157 | part = &xpc_partitions[partid]; | ||
158 | |||
159 | if (part->sn.uv.act_state_req == 0) | ||
160 | continue; | ||
161 | |||
162 | xpc_activate_IRQ_rcvd--; | ||
163 | BUG_ON(xpc_activate_IRQ_rcvd < 0); | ||
164 | |||
165 | act_state_req = part->sn.uv.act_state_req; | ||
166 | part->sn.uv.act_state_req = 0; | ||
167 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
168 | |||
169 | if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { | ||
170 | if (part->act_state == XPC_P_AS_INACTIVE) | ||
171 | xpc_activate_partition(part); | ||
172 | else if (part->act_state == XPC_P_AS_DEACTIVATING) | ||
173 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); | ||
174 | |||
175 | } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { | ||
176 | if (part->act_state == XPC_P_AS_INACTIVE) | ||
177 | xpc_activate_partition(part); | ||
178 | else | ||
179 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); | ||
180 | |||
181 | } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { | ||
182 | XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); | ||
183 | |||
184 | } else { | ||
185 | BUG(); | ||
186 | } | ||
187 | |||
188 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
189 | if (xpc_activate_IRQ_rcvd == 0) | ||
190 | break; | ||
191 | } | ||
192 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
193 | |||
194 | } | ||
195 | |||
196 | static irqreturn_t | ||
197 | xpc_handle_activate_IRQ_uv(int irq, void *dev_id) | ||
198 | { | ||
199 | unsigned long irq_flags; | ||
200 | struct xpc_activate_mq_msghdr_uv *msg_hdr; | ||
201 | short partid; | ||
202 | struct xpc_partition *part; | ||
203 | struct xpc_partition_uv *part_uv; | ||
204 | struct xpc_openclose_args *args; | ||
205 | int wakeup_hb_checker = 0; | ||
206 | |||
207 | while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { | ||
208 | |||
209 | partid = msg_hdr->partid; | ||
210 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | ||
211 | dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() invalid" | ||
212 | "partid=0x%x passed in message\n", partid); | ||
213 | gru_free_message(xpc_activate_mq_uv, msg_hdr); | ||
214 | continue; | ||
215 | } | ||
216 | part = &xpc_partitions[partid]; | ||
217 | part_uv = &part->sn.uv; | ||
218 | |||
219 | part_uv->remote_act_state = msg_hdr->act_state; | ||
220 | |||
221 | switch (msg_hdr->type) { | ||
222 | case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: | ||
223 | /* syncing of remote_act_state was just done above */ | ||
224 | break; | ||
225 | |||
226 | case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { | ||
227 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
228 | |||
229 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) | ||
230 | msg_hdr; | ||
231 | part_uv->heartbeat = msg->heartbeat; | ||
232 | break; | ||
233 | } | ||
234 | case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { | ||
235 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
236 | |||
237 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) | ||
238 | msg_hdr; | ||
239 | part_uv->heartbeat = msg->heartbeat; | ||
240 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
241 | part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; | ||
242 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
243 | break; | ||
244 | } | ||
245 | case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { | ||
246 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
247 | |||
248 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) | ||
249 | msg_hdr; | ||
250 | part_uv->heartbeat = msg->heartbeat; | ||
251 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
252 | part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; | ||
253 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
254 | break; | ||
255 | } | ||
256 | case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { | ||
257 | struct xpc_activate_mq_msg_activate_req_uv *msg; | ||
258 | |||
259 | /* | ||
260 | * ??? Do we deal here with ts_jiffies being different | ||
261 | * ??? if act_state != XPC_P_AS_INACTIVE instead of | ||
262 | * ??? below? | ||
263 | */ | ||
264 | msg = (struct xpc_activate_mq_msg_activate_req_uv *) | ||
265 | msg_hdr; | ||
266 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, | ||
267 | irq_flags); | ||
268 | if (part_uv->act_state_req == 0) | ||
269 | xpc_activate_IRQ_rcvd++; | ||
270 | part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; | ||
271 | part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ | ||
272 | part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; | ||
273 | part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; | ||
274 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, | ||
275 | irq_flags); | ||
276 | wakeup_hb_checker++; | ||
277 | break; | ||
278 | } | ||
279 | case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { | ||
280 | struct xpc_activate_mq_msg_deactivate_req_uv *msg; | ||
281 | |||
282 | msg = (struct xpc_activate_mq_msg_deactivate_req_uv *) | ||
283 | msg_hdr; | ||
284 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, | ||
285 | irq_flags); | ||
286 | if (part_uv->act_state_req == 0) | ||
287 | xpc_activate_IRQ_rcvd++; | ||
288 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; | ||
289 | part_uv->reason = msg->reason; | ||
290 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, | ||
291 | irq_flags); | ||
292 | wakeup_hb_checker++; | ||
293 | break; | ||
294 | } | ||
295 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { | ||
296 | struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; | ||
297 | |||
298 | msg = (struct xpc_activate_mq_msg_chctl_closerequest_uv | ||
299 | *)msg_hdr; | ||
300 | args = &part->remote_openclose_args[msg->ch_number]; | ||
301 | args->reason = msg->reason; | ||
302 | |||
303 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
304 | part->chctl.flags[msg->ch_number] |= | ||
305 | XPC_CHCTL_CLOSEREQUEST; | ||
306 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
307 | |||
308 | xpc_wakeup_channel_mgr(part); | ||
309 | break; | ||
310 | } | ||
311 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { | ||
312 | struct xpc_activate_mq_msg_chctl_closereply_uv *msg; | ||
313 | |||
314 | msg = (struct xpc_activate_mq_msg_chctl_closereply_uv *) | ||
315 | msg_hdr; | ||
316 | |||
317 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
318 | part->chctl.flags[msg->ch_number] |= | ||
319 | XPC_CHCTL_CLOSEREPLY; | ||
320 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
321 | |||
322 | xpc_wakeup_channel_mgr(part); | ||
323 | break; | ||
324 | } | ||
325 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { | ||
326 | struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; | ||
327 | |||
328 | msg = (struct xpc_activate_mq_msg_chctl_openrequest_uv | ||
329 | *)msg_hdr; | ||
330 | args = &part->remote_openclose_args[msg->ch_number]; | ||
331 | args->msg_size = msg->msg_size; | ||
332 | args->local_nentries = msg->local_nentries; | ||
333 | |||
334 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
335 | part->chctl.flags[msg->ch_number] |= | ||
336 | XPC_CHCTL_OPENREQUEST; | ||
337 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
338 | |||
339 | xpc_wakeup_channel_mgr(part); | ||
340 | break; | ||
341 | } | ||
342 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { | ||
343 | struct xpc_activate_mq_msg_chctl_openreply_uv *msg; | ||
344 | |||
345 | msg = (struct xpc_activate_mq_msg_chctl_openreply_uv *) | ||
346 | msg_hdr; | ||
347 | args = &part->remote_openclose_args[msg->ch_number]; | ||
348 | args->remote_nentries = msg->remote_nentries; | ||
349 | args->local_nentries = msg->local_nentries; | ||
350 | args->local_msgqueue_pa = msg->local_notify_mq_gpa; | ||
351 | |||
352 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
353 | part->chctl.flags[msg->ch_number] |= | ||
354 | XPC_CHCTL_OPENREPLY; | ||
355 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
356 | |||
357 | xpc_wakeup_channel_mgr(part); | ||
358 | break; | ||
359 | } | ||
360 | case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: | ||
361 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
362 | part_uv->flags |= XPC_P_ENGAGED_UV; | ||
363 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
364 | break; | ||
365 | |||
366 | case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: | ||
367 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
368 | part_uv->flags &= ~XPC_P_ENGAGED_UV; | ||
369 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
370 | break; | ||
371 | |||
372 | default: | ||
373 | dev_err(xpc_part, "received unknown activate_mq msg " | ||
374 | "type=%d from partition=%d\n", msg_hdr->type, | ||
375 | partid); | ||
376 | } | ||
377 | |||
378 | if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && | ||
379 | part->remote_rp_ts_jiffies != 0) { | ||
380 | /* | ||
381 | * ??? Does what we do here need to be sensitive to | ||
382 | * ??? act_state or remote_act_state? | ||
383 | */ | ||
384 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, | ||
385 | irq_flags); | ||
386 | if (part_uv->act_state_req == 0) | ||
387 | xpc_activate_IRQ_rcvd++; | ||
388 | part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; | ||
389 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, | ||
390 | irq_flags); | ||
391 | wakeup_hb_checker++; | ||
392 | } | ||
393 | |||
394 | gru_free_message(xpc_activate_mq_uv, msg_hdr); | ||
395 | } | ||
396 | |||
397 | if (wakeup_hb_checker) | ||
398 | wake_up_interruptible(&xpc_activate_IRQ_wq); | ||
399 | |||
400 | return IRQ_HANDLED; | ||
401 | } | ||
402 | |||
403 | static enum xp_retval | ||
404 | xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, | ||
405 | int msg_type) | ||
406 | { | ||
407 | struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; | ||
408 | |||
409 | DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); | ||
410 | |||
411 | msg_hdr->type = msg_type; | ||
412 | msg_hdr->partid = XPC_PARTID(part); | ||
413 | msg_hdr->act_state = part->act_state; | ||
414 | msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; | ||
415 | |||
416 | /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ | ||
417 | return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg, | ||
418 | msg_size); | ||
419 | } | ||
420 | |||
421 | static void | ||
422 | xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, | ||
423 | size_t msg_size, int msg_type) | ||
424 | { | ||
425 | enum xp_retval ret; | ||
426 | |||
427 | ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); | ||
428 | if (unlikely(ret != xpSuccess)) | ||
429 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
430 | } | ||
431 | |||
432 | static void | ||
433 | xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, | ||
434 | void *msg, size_t msg_size, int msg_type) | ||
435 | { | ||
436 | struct xpc_partition *part = &xpc_partitions[ch->number]; | ||
437 | enum xp_retval ret; | ||
438 | |||
439 | ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); | ||
440 | if (unlikely(ret != xpSuccess)) { | ||
441 | if (irq_flags != NULL) | ||
442 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | ||
443 | |||
444 | XPC_DEACTIVATE_PARTITION(part, ret); | ||
445 | |||
446 | if (irq_flags != NULL) | ||
447 | spin_lock_irqsave(&ch->lock, *irq_flags); | ||
448 | } | ||
449 | } | ||
450 | |||
451 | static void | ||
452 | xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) | ||
453 | { | ||
454 | unsigned long irq_flags; | ||
455 | struct xpc_partition_uv *part_uv = &part->sn.uv; | ||
456 | |||
28 | /* | 457 | /* |
29 | * !!! Make our side think that the remote parition sent an activate | 458 | * !!! Make our side think that the remote parition sent an activate |
30 | * !!! message our way. Also do what the activate IRQ handler would | 459 | * !!! message our way by doing what the activate IRQ handler would |
31 | * !!! do had one really been sent. | 460 | * !!! do had one really been sent. |
32 | */ | 461 | */ |
462 | |||
463 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
464 | if (part_uv->act_state_req == 0) | ||
465 | xpc_activate_IRQ_rcvd++; | ||
466 | part_uv->act_state_req = act_state_req; | ||
467 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
468 | |||
469 | wake_up_interruptible(&xpc_activate_IRQ_wq); | ||
33 | } | 470 | } |
34 | 471 | ||
35 | static enum xp_retval | 472 | static enum xp_retval |
36 | xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp) | 473 | xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, |
474 | size_t *len) | ||
37 | { | 475 | { |
38 | /* !!! need to have established xpc_activate_mq earlier */ | 476 | /* !!! call the UV version of sn_partition_reserved_page_pa() */ |
39 | rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq); | 477 | return xpUnsupported; |
40 | return xpSuccess; | 478 | } |
479 | |||
480 | static int | ||
481 | xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) | ||
482 | { | ||
483 | rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static void | ||
488 | xpc_send_heartbeat_uv(int msg_type) | ||
489 | { | ||
490 | short partid; | ||
491 | struct xpc_partition *part; | ||
492 | struct xpc_activate_mq_msg_heartbeat_req_uv msg; | ||
493 | |||
494 | /* | ||
495 | * !!! On uv we're broadcasting a heartbeat message every 5 seconds. | ||
496 | * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20 | ||
497 | * !!! seconds. This is an increase in numalink traffic. | ||
498 | * ??? Is this good? | ||
499 | */ | ||
500 | |||
501 | msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv); | ||
502 | |||
503 | partid = find_first_bit(xpc_heartbeating_to_mask_uv, | ||
504 | XP_MAX_NPARTITIONS_UV); | ||
505 | |||
506 | while (partid < XP_MAX_NPARTITIONS_UV) { | ||
507 | part = &xpc_partitions[partid]; | ||
508 | |||
509 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | ||
510 | msg_type); | ||
511 | |||
512 | partid = find_next_bit(xpc_heartbeating_to_mask_uv, | ||
513 | XP_MAX_NPARTITIONS_UV, partid + 1); | ||
514 | } | ||
41 | } | 515 | } |
42 | 516 | ||
43 | static void | 517 | static void |
44 | xpc_increment_heartbeat_uv(void) | 518 | xpc_increment_heartbeat_uv(void) |
45 | { | 519 | { |
46 | /* !!! send heartbeat msg to xpc_heartbeating_to_mask partids */ | 520 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV); |
521 | } | ||
522 | |||
523 | static void | ||
524 | xpc_offline_heartbeat_uv(void) | ||
525 | { | ||
526 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); | ||
527 | } | ||
528 | |||
529 | static void | ||
530 | xpc_online_heartbeat_uv(void) | ||
531 | { | ||
532 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV); | ||
47 | } | 533 | } |
48 | 534 | ||
49 | static void | 535 | static void |
50 | xpc_heartbeat_init_uv(void) | 536 | xpc_heartbeat_init_uv(void) |
51 | { | 537 | { |
538 | atomic64_set(&xpc_heartbeat_uv, 0); | ||
52 | bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); | 539 | bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); |
53 | xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0]; | 540 | xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0]; |
54 | } | 541 | } |
@@ -56,48 +543,94 @@ xpc_heartbeat_init_uv(void) | |||
56 | static void | 543 | static void |
57 | xpc_heartbeat_exit_uv(void) | 544 | xpc_heartbeat_exit_uv(void) |
58 | { | 545 | { |
59 | /* !!! send heartbeat_offline msg to xpc_heartbeating_to_mask partids */ | 546 | xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); |
547 | } | ||
548 | |||
549 | static enum xp_retval | ||
550 | xpc_get_remote_heartbeat_uv(struct xpc_partition *part) | ||
551 | { | ||
552 | struct xpc_partition_uv *part_uv = &part->sn.uv; | ||
553 | enum xp_retval ret = xpNoHeartbeat; | ||
554 | |||
555 | if (part_uv->remote_act_state != XPC_P_AS_INACTIVE && | ||
556 | part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) { | ||
557 | |||
558 | if (part_uv->heartbeat != part->last_heartbeat || | ||
559 | (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) { | ||
560 | |||
561 | part->last_heartbeat = part_uv->heartbeat; | ||
562 | ret = xpSuccess; | ||
563 | } | ||
564 | } | ||
565 | return ret; | ||
60 | } | 566 | } |
61 | 567 | ||
62 | static void | 568 | static void |
63 | xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, | 569 | xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, |
64 | unsigned long remote_rp_pa, int nasid) | 570 | unsigned long remote_rp_gpa, int nasid) |
65 | { | 571 | { |
66 | short partid = remote_rp->SAL_partid; | 572 | short partid = remote_rp->SAL_partid; |
67 | struct xpc_partition *part = &xpc_partitions[partid]; | 573 | struct xpc_partition *part = &xpc_partitions[partid]; |
574 | struct xpc_activate_mq_msg_activate_req_uv msg; | ||
68 | 575 | ||
69 | /* | 576 | part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ |
70 | * !!! Setup part structure with the bits of info we can glean from the rp: | 577 | part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; |
71 | * !!! part->remote_rp_pa = remote_rp_pa; | 578 | part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa; |
72 | * !!! part->sn.uv.activate_mq_gpa = remote_rp->sn.activate_mq_gpa; | 579 | |
73 | */ | 580 | /* |
581 | * ??? Is it a good idea to make this conditional on what is | ||
582 | * ??? potentially stale state information? | ||
583 | */ | ||
584 | if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { | ||
585 | msg.rp_gpa = uv_gpa(xpc_rsvd_page); | ||
586 | msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa; | ||
587 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | ||
588 | XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); | ||
589 | } | ||
74 | 590 | ||
75 | xpc_send_local_activate_IRQ_uv(part); | 591 | if (part->act_state == XPC_P_AS_INACTIVE) |
592 | xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); | ||
76 | } | 593 | } |
77 | 594 | ||
78 | static void | 595 | static void |
79 | xpc_request_partition_reactivation_uv(struct xpc_partition *part) | 596 | xpc_request_partition_reactivation_uv(struct xpc_partition *part) |
80 | { | 597 | { |
81 | xpc_send_local_activate_IRQ_uv(part); | 598 | xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); |
599 | } | ||
600 | |||
601 | static void | ||
602 | xpc_request_partition_deactivation_uv(struct xpc_partition *part) | ||
603 | { | ||
604 | struct xpc_activate_mq_msg_deactivate_req_uv msg; | ||
605 | |||
606 | /* | ||
607 | * ??? Is it a good idea to make this conditional on what is | ||
608 | * ??? potentially stale state information? | ||
609 | */ | ||
610 | if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && | ||
611 | part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { | ||
612 | |||
613 | msg.reason = part->reason; | ||
614 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | ||
615 | XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); | ||
616 | } | ||
82 | } | 617 | } |
83 | 618 | ||
84 | /* | 619 | /* |
85 | * Setup the infrastructure necessary to support XPartition Communication | 620 | * Setup the channel structures that are uv specific. |
86 | * between the specified remote partition and the local one. | ||
87 | */ | 621 | */ |
88 | static enum xp_retval | 622 | static enum xp_retval |
89 | xpc_setup_infrastructure_uv(struct xpc_partition *part) | 623 | xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) |
90 | { | 624 | { |
91 | /* !!! this function needs fleshing out */ | 625 | /* !!! this function needs fleshing out */ |
92 | return xpUnsupported; | 626 | return xpUnsupported; |
93 | } | 627 | } |
94 | 628 | ||
95 | /* | 629 | /* |
96 | * Teardown the infrastructure necessary to support XPartition Communication | 630 | * Teardown the channel structures that are uv specific. |
97 | * between the specified remote partition and the local one. | ||
98 | */ | 631 | */ |
99 | static void | 632 | static void |
100 | xpc_teardown_infrastructure_uv(struct xpc_partition *part) | 633 | xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) |
101 | { | 634 | { |
102 | /* !!! this function needs fleshing out */ | 635 | /* !!! this function needs fleshing out */ |
103 | return; | 636 | return; |
@@ -106,15 +639,163 @@ xpc_teardown_infrastructure_uv(struct xpc_partition *part) | |||
106 | static enum xp_retval | 639 | static enum xp_retval |
107 | xpc_make_first_contact_uv(struct xpc_partition *part) | 640 | xpc_make_first_contact_uv(struct xpc_partition *part) |
108 | { | 641 | { |
109 | /* !!! this function needs fleshing out */ | 642 | struct xpc_activate_mq_msg_uv msg; |
110 | return xpUnsupported; | 643 | |
644 | /* | ||
645 | * We send a sync msg to get the remote partition's remote_act_state | ||
646 | * updated to our current act_state which at this point should | ||
647 | * be XPC_P_AS_ACTIVATING. | ||
648 | */ | ||
649 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | ||
650 | XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); | ||
651 | |||
652 | while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { | ||
653 | |||
654 | dev_dbg(xpc_part, "waiting to make first contact with " | ||
655 | "partition %d\n", XPC_PARTID(part)); | ||
656 | |||
657 | /* wait a 1/4 of a second or so */ | ||
658 | (void)msleep_interruptible(250); | ||
659 | |||
660 | if (part->act_state == XPC_P_AS_DEACTIVATING) | ||
661 | return part->reason; | ||
662 | } | ||
663 | |||
664 | return xpSuccess; | ||
111 | } | 665 | } |
112 | 666 | ||
113 | static u64 | 667 | static u64 |
114 | xpc_get_chctl_all_flags_uv(struct xpc_partition *part) | 668 | xpc_get_chctl_all_flags_uv(struct xpc_partition *part) |
115 | { | 669 | { |
670 | unsigned long irq_flags; | ||
671 | union xpc_channel_ctl_flags chctl; | ||
672 | |||
673 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
674 | chctl = part->chctl; | ||
675 | if (chctl.all_flags != 0) | ||
676 | part->chctl.all_flags = 0; | ||
677 | |||
678 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
679 | return chctl.all_flags; | ||
680 | } | ||
681 | |||
682 | static enum xp_retval | ||
683 | xpc_setup_msg_structures_uv(struct xpc_channel *ch) | ||
684 | { | ||
685 | /* !!! this function needs fleshing out */ | ||
686 | return xpUnsupported; | ||
687 | } | ||
688 | |||
689 | static void | ||
690 | xpc_teardown_msg_structures_uv(struct xpc_channel *ch) | ||
691 | { | ||
692 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | ||
693 | |||
694 | ch_uv->remote_notify_mq_gpa = 0; | ||
695 | |||
116 | /* !!! this function needs fleshing out */ | 696 | /* !!! this function needs fleshing out */ |
117 | return 0UL; | 697 | } |
698 | |||
699 | static void | ||
700 | xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) | ||
701 | { | ||
702 | struct xpc_activate_mq_msg_chctl_closerequest_uv msg; | ||
703 | |||
704 | msg.ch_number = ch->number; | ||
705 | msg.reason = ch->reason; | ||
706 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | ||
707 | XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); | ||
708 | } | ||
709 | |||
710 | static void | ||
711 | xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) | ||
712 | { | ||
713 | struct xpc_activate_mq_msg_chctl_closereply_uv msg; | ||
714 | |||
715 | msg.ch_number = ch->number; | ||
716 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | ||
717 | XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); | ||
718 | } | ||
719 | |||
720 | static void | ||
721 | xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) | ||
722 | { | ||
723 | struct xpc_activate_mq_msg_chctl_openrequest_uv msg; | ||
724 | |||
725 | msg.ch_number = ch->number; | ||
726 | msg.msg_size = ch->msg_size; | ||
727 | msg.local_nentries = ch->local_nentries; | ||
728 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | ||
729 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); | ||
730 | } | ||
731 | |||
732 | static void | ||
733 | xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) | ||
734 | { | ||
735 | struct xpc_activate_mq_msg_chctl_openreply_uv msg; | ||
736 | |||
737 | msg.ch_number = ch->number; | ||
738 | msg.local_nentries = ch->local_nentries; | ||
739 | msg.remote_nentries = ch->remote_nentries; | ||
740 | msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv); | ||
741 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | ||
742 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); | ||
743 | } | ||
744 | |||
745 | static void | ||
746 | xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, | ||
747 | unsigned long msgqueue_pa) | ||
748 | { | ||
749 | ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa; | ||
750 | } | ||
751 | |||
752 | static void | ||
753 | xpc_indicate_partition_engaged_uv(struct xpc_partition *part) | ||
754 | { | ||
755 | struct xpc_activate_mq_msg_uv msg; | ||
756 | |||
757 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | ||
758 | XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); | ||
759 | } | ||
760 | |||
761 | static void | ||
762 | xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) | ||
763 | { | ||
764 | struct xpc_activate_mq_msg_uv msg; | ||
765 | |||
766 | xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), | ||
767 | XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); | ||
768 | } | ||
769 | |||
770 | static void | ||
771 | xpc_assume_partition_disengaged_uv(short partid) | ||
772 | { | ||
773 | struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; | ||
774 | unsigned long irq_flags; | ||
775 | |||
776 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
777 | part_uv->flags &= ~XPC_P_ENGAGED_UV; | ||
778 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
779 | } | ||
780 | |||
781 | static int | ||
782 | xpc_partition_engaged_uv(short partid) | ||
783 | { | ||
784 | return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; | ||
785 | } | ||
786 | |||
787 | static int | ||
788 | xpc_any_partition_engaged_uv(void) | ||
789 | { | ||
790 | struct xpc_partition_uv *part_uv; | ||
791 | short partid; | ||
792 | |||
793 | for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { | ||
794 | part_uv = &xpc_partitions[partid].sn.uv; | ||
795 | if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) | ||
796 | return 1; | ||
797 | } | ||
798 | return 0; | ||
118 | } | 799 | } |
119 | 800 | ||
120 | static struct xpc_msg * | 801 | static struct xpc_msg * |
@@ -124,24 +805,64 @@ xpc_get_deliverable_msg_uv(struct xpc_channel *ch) | |||
124 | return NULL; | 805 | return NULL; |
125 | } | 806 | } |
126 | 807 | ||
127 | void | 808 | int |
128 | xpc_init_uv(void) | 809 | xpc_init_uv(void) |
129 | { | 810 | { |
130 | xpc_rsvd_page_init = xpc_rsvd_page_init_uv; | 811 | xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; |
812 | xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; | ||
813 | xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; | ||
814 | xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; | ||
131 | xpc_increment_heartbeat = xpc_increment_heartbeat_uv; | 815 | xpc_increment_heartbeat = xpc_increment_heartbeat_uv; |
816 | xpc_offline_heartbeat = xpc_offline_heartbeat_uv; | ||
817 | xpc_online_heartbeat = xpc_online_heartbeat_uv; | ||
132 | xpc_heartbeat_init = xpc_heartbeat_init_uv; | 818 | xpc_heartbeat_init = xpc_heartbeat_init_uv; |
133 | xpc_heartbeat_exit = xpc_heartbeat_exit_uv; | 819 | xpc_heartbeat_exit = xpc_heartbeat_exit_uv; |
820 | xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv; | ||
821 | |||
134 | xpc_request_partition_activation = xpc_request_partition_activation_uv; | 822 | xpc_request_partition_activation = xpc_request_partition_activation_uv; |
135 | xpc_request_partition_reactivation = | 823 | xpc_request_partition_reactivation = |
136 | xpc_request_partition_reactivation_uv; | 824 | xpc_request_partition_reactivation_uv; |
137 | xpc_setup_infrastructure = xpc_setup_infrastructure_uv; | 825 | xpc_request_partition_deactivation = |
138 | xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv; | 826 | xpc_request_partition_deactivation_uv; |
827 | |||
828 | xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; | ||
829 | xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; | ||
830 | |||
139 | xpc_make_first_contact = xpc_make_first_contact_uv; | 831 | xpc_make_first_contact = xpc_make_first_contact_uv; |
832 | |||
140 | xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv; | 833 | xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv; |
834 | xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv; | ||
835 | xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv; | ||
836 | xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv; | ||
837 | xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv; | ||
838 | |||
839 | xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv; | ||
840 | |||
841 | xpc_setup_msg_structures = xpc_setup_msg_structures_uv; | ||
842 | xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv; | ||
843 | |||
844 | xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv; | ||
845 | xpc_indicate_partition_disengaged = | ||
846 | xpc_indicate_partition_disengaged_uv; | ||
847 | xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv; | ||
848 | xpc_partition_engaged = xpc_partition_engaged_uv; | ||
849 | xpc_any_partition_engaged = xpc_any_partition_engaged_uv; | ||
850 | |||
141 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; | 851 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; |
852 | |||
853 | /* ??? The cpuid argument's value is 0, is that what we want? */ | ||
854 | /* !!! The irq argument's value isn't correct. */ | ||
855 | xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, | ||
856 | xpc_handle_activate_IRQ_uv); | ||
857 | if (xpc_activate_mq_uv == NULL) | ||
858 | return -ENOMEM; | ||
859 | |||
860 | return 0; | ||
142 | } | 861 | } |
143 | 862 | ||
144 | void | 863 | void |
145 | xpc_exit_uv(void) | 864 | xpc_exit_uv(void) |
146 | { | 865 | { |
866 | /* !!! The irq argument's value isn't correct. */ | ||
867 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); | ||
147 | } | 868 | } |