diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:50 -0400 |
commit | bd3e64c1759e4930315ebf022611468ee9621486 (patch) | |
tree | 314f4f8a80788b181aa03714d952854c8e2a1866 /drivers/misc/sgi-xp/xpc_uv.c | |
parent | 5b8669dfd110a62a74eea525a009342f73987ea0 (diff) |
sgi-xp: setup the notify GRU message queue
Setup the notify GRU message queue that is used for sending user messages
on UV systems.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_uv.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 951 |
1 files changed, 763 insertions, 188 deletions
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 689cb5c68ccf..1ac694c01623 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -66,8 +66,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, | |||
66 | mq_order = get_order(mq_size); | 66 | mq_order = get_order(mq_size); |
67 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 67 | page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, |
68 | mq_order); | 68 | mq_order); |
69 | if (page == NULL) | 69 | if (page == NULL) { |
70 | dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " | ||
71 | "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); | ||
70 | return NULL; | 72 | return NULL; |
73 | } | ||
71 | 74 | ||
72 | mq = page_address(page); | 75 | mq = page_address(page); |
73 | ret = gru_create_message_queue(mq, mq_size); | 76 | ret = gru_create_message_queue(mq, mq_size); |
@@ -193,202 +196,226 @@ xpc_process_activate_IRQ_rcvd_uv(void) | |||
193 | 196 | ||
194 | } | 197 | } |
195 | 198 | ||
196 | static irqreturn_t | 199 | static void |
197 | xpc_handle_activate_IRQ_uv(int irq, void *dev_id) | 200 | xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, |
201 | struct xpc_activate_mq_msghdr_uv *msg_hdr, | ||
202 | int *wakeup_hb_checker) | ||
198 | { | 203 | { |
199 | unsigned long irq_flags; | 204 | unsigned long irq_flags; |
200 | struct xpc_activate_mq_msghdr_uv *msg_hdr; | 205 | struct xpc_partition_uv *part_uv = &part->sn.uv; |
201 | short partid; | ||
202 | struct xpc_partition *part; | ||
203 | struct xpc_partition_uv *part_uv; | ||
204 | struct xpc_openclose_args *args; | 206 | struct xpc_openclose_args *args; |
205 | int wakeup_hb_checker = 0; | ||
206 | 207 | ||
207 | while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { | 208 | part_uv->remote_act_state = msg_hdr->act_state; |
208 | 209 | ||
209 | partid = msg_hdr->partid; | 210 | switch (msg_hdr->type) { |
210 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | 211 | case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: |
211 | dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() invalid" | 212 | /* syncing of remote_act_state was just done above */ |
212 | "partid=0x%x passed in message\n", partid); | 213 | break; |
213 | gru_free_message(xpc_activate_mq_uv, msg_hdr); | ||
214 | continue; | ||
215 | } | ||
216 | part = &xpc_partitions[partid]; | ||
217 | part_uv = &part->sn.uv; | ||
218 | 214 | ||
219 | part_uv->remote_act_state = msg_hdr->act_state; | 215 | case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { |
216 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
220 | 217 | ||
221 | switch (msg_hdr->type) { | 218 | msg = container_of(msg_hdr, |
222 | case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: | 219 | struct xpc_activate_mq_msg_heartbeat_req_uv, |
223 | /* syncing of remote_act_state was just done above */ | 220 | hdr); |
224 | break; | 221 | part_uv->heartbeat = msg->heartbeat; |
222 | break; | ||
223 | } | ||
224 | case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { | ||
225 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
226 | |||
227 | msg = container_of(msg_hdr, | ||
228 | struct xpc_activate_mq_msg_heartbeat_req_uv, | ||
229 | hdr); | ||
230 | part_uv->heartbeat = msg->heartbeat; | ||
231 | |||
232 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
233 | part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; | ||
234 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
235 | break; | ||
236 | } | ||
237 | case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { | ||
238 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
239 | |||
240 | msg = container_of(msg_hdr, | ||
241 | struct xpc_activate_mq_msg_heartbeat_req_uv, | ||
242 | hdr); | ||
243 | part_uv->heartbeat = msg->heartbeat; | ||
244 | |||
245 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
246 | part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; | ||
247 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
248 | break; | ||
249 | } | ||
250 | case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { | ||
251 | struct xpc_activate_mq_msg_activate_req_uv *msg; | ||
225 | 252 | ||
226 | case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { | 253 | /* |
227 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | 254 | * ??? Do we deal here with ts_jiffies being different |
255 | * ??? if act_state != XPC_P_AS_INACTIVE instead of | ||
256 | * ??? below? | ||
257 | */ | ||
258 | msg = container_of(msg_hdr, struct | ||
259 | xpc_activate_mq_msg_activate_req_uv, hdr); | ||
228 | 260 | ||
229 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) | 261 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
230 | msg_hdr; | 262 | if (part_uv->act_state_req == 0) |
231 | part_uv->heartbeat = msg->heartbeat; | 263 | xpc_activate_IRQ_rcvd++; |
232 | break; | 264 | part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; |
233 | } | 265 | part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ |
234 | case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { | 266 | part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; |
235 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | 267 | part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; |
236 | 268 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | |
237 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) | ||
238 | msg_hdr; | ||
239 | part_uv->heartbeat = msg->heartbeat; | ||
240 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
241 | part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; | ||
242 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
243 | break; | ||
244 | } | ||
245 | case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { | ||
246 | struct xpc_activate_mq_msg_heartbeat_req_uv *msg; | ||
247 | |||
248 | msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) | ||
249 | msg_hdr; | ||
250 | part_uv->heartbeat = msg->heartbeat; | ||
251 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
252 | part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; | ||
253 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
254 | break; | ||
255 | } | ||
256 | case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { | ||
257 | struct xpc_activate_mq_msg_activate_req_uv *msg; | ||
258 | |||
259 | /* | ||
260 | * ??? Do we deal here with ts_jiffies being different | ||
261 | * ??? if act_state != XPC_P_AS_INACTIVE instead of | ||
262 | * ??? below? | ||
263 | */ | ||
264 | msg = (struct xpc_activate_mq_msg_activate_req_uv *) | ||
265 | msg_hdr; | ||
266 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, | ||
267 | irq_flags); | ||
268 | if (part_uv->act_state_req == 0) | ||
269 | xpc_activate_IRQ_rcvd++; | ||
270 | part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; | ||
271 | part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ | ||
272 | part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; | ||
273 | part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; | ||
274 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, | ||
275 | irq_flags); | ||
276 | wakeup_hb_checker++; | ||
277 | break; | ||
278 | } | ||
279 | case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { | ||
280 | struct xpc_activate_mq_msg_deactivate_req_uv *msg; | ||
281 | |||
282 | msg = (struct xpc_activate_mq_msg_deactivate_req_uv *) | ||
283 | msg_hdr; | ||
284 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, | ||
285 | irq_flags); | ||
286 | if (part_uv->act_state_req == 0) | ||
287 | xpc_activate_IRQ_rcvd++; | ||
288 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; | ||
289 | part_uv->reason = msg->reason; | ||
290 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, | ||
291 | irq_flags); | ||
292 | wakeup_hb_checker++; | ||
293 | break; | ||
294 | } | ||
295 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { | ||
296 | struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; | ||
297 | 269 | ||
298 | msg = (struct xpc_activate_mq_msg_chctl_closerequest_uv | 270 | (*wakeup_hb_checker)++; |
299 | *)msg_hdr; | 271 | break; |
300 | args = &part->remote_openclose_args[msg->ch_number]; | 272 | } |
301 | args->reason = msg->reason; | 273 | case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { |
274 | struct xpc_activate_mq_msg_deactivate_req_uv *msg; | ||
302 | 275 | ||
303 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | 276 | msg = container_of(msg_hdr, struct |
304 | part->chctl.flags[msg->ch_number] |= | 277 | xpc_activate_mq_msg_deactivate_req_uv, hdr); |
305 | XPC_CHCTL_CLOSEREQUEST; | ||
306 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
307 | 278 | ||
308 | xpc_wakeup_channel_mgr(part); | 279 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); |
309 | break; | 280 | if (part_uv->act_state_req == 0) |
310 | } | 281 | xpc_activate_IRQ_rcvd++; |
311 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { | 282 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; |
312 | struct xpc_activate_mq_msg_chctl_closereply_uv *msg; | 283 | part_uv->reason = msg->reason; |
284 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
285 | |||
286 | (*wakeup_hb_checker)++; | ||
287 | return; | ||
288 | } | ||
289 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { | ||
290 | struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; | ||
313 | 291 | ||
314 | msg = (struct xpc_activate_mq_msg_chctl_closereply_uv *) | 292 | msg = container_of(msg_hdr, struct |
315 | msg_hdr; | 293 | xpc_activate_mq_msg_chctl_closerequest_uv, |
294 | hdr); | ||
295 | args = &part->remote_openclose_args[msg->ch_number]; | ||
296 | args->reason = msg->reason; | ||
316 | 297 | ||
317 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | 298 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
318 | part->chctl.flags[msg->ch_number] |= | 299 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; |
319 | XPC_CHCTL_CLOSEREPLY; | 300 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
320 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
321 | 301 | ||
322 | xpc_wakeup_channel_mgr(part); | 302 | xpc_wakeup_channel_mgr(part); |
323 | break; | 303 | break; |
324 | } | 304 | } |
325 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { | 305 | case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { |
326 | struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; | 306 | struct xpc_activate_mq_msg_chctl_closereply_uv *msg; |
327 | 307 | ||
328 | msg = (struct xpc_activate_mq_msg_chctl_openrequest_uv | 308 | msg = container_of(msg_hdr, struct |
329 | *)msg_hdr; | 309 | xpc_activate_mq_msg_chctl_closereply_uv, |
330 | args = &part->remote_openclose_args[msg->ch_number]; | 310 | hdr); |
331 | args->msg_size = msg->msg_size; | ||
332 | args->local_nentries = msg->local_nentries; | ||
333 | 311 | ||
334 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | 312 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
335 | part->chctl.flags[msg->ch_number] |= | 313 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; |
336 | XPC_CHCTL_OPENREQUEST; | 314 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
337 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
338 | 315 | ||
339 | xpc_wakeup_channel_mgr(part); | 316 | xpc_wakeup_channel_mgr(part); |
340 | break; | 317 | break; |
341 | } | 318 | } |
342 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { | 319 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { |
343 | struct xpc_activate_mq_msg_chctl_openreply_uv *msg; | 320 | struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; |
344 | 321 | ||
345 | msg = (struct xpc_activate_mq_msg_chctl_openreply_uv *) | 322 | msg = container_of(msg_hdr, struct |
346 | msg_hdr; | 323 | xpc_activate_mq_msg_chctl_openrequest_uv, |
347 | args = &part->remote_openclose_args[msg->ch_number]; | 324 | hdr); |
348 | args->remote_nentries = msg->remote_nentries; | 325 | args = &part->remote_openclose_args[msg->ch_number]; |
349 | args->local_nentries = msg->local_nentries; | 326 | args->entry_size = msg->entry_size; |
350 | args->local_msgqueue_pa = msg->local_notify_mq_gpa; | 327 | args->local_nentries = msg->local_nentries; |
351 | 328 | ||
352 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | 329 | spin_lock_irqsave(&part->chctl_lock, irq_flags); |
353 | part->chctl.flags[msg->ch_number] |= | 330 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; |
354 | XPC_CHCTL_OPENREPLY; | 331 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); |
355 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | 332 | |
356 | 333 | xpc_wakeup_channel_mgr(part); | |
357 | xpc_wakeup_channel_mgr(part); | 334 | break; |
358 | break; | 335 | } |
359 | } | 336 | case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { |
360 | case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: | 337 | struct xpc_activate_mq_msg_chctl_openreply_uv *msg; |
361 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | 338 | |
362 | part_uv->flags |= XPC_P_ENGAGED_UV; | 339 | msg = container_of(msg_hdr, struct |
363 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | 340 | xpc_activate_mq_msg_chctl_openreply_uv, hdr); |
364 | break; | 341 | args = &part->remote_openclose_args[msg->ch_number]; |
342 | args->remote_nentries = msg->remote_nentries; | ||
343 | args->local_nentries = msg->local_nentries; | ||
344 | args->local_msgqueue_pa = msg->local_notify_mq_gpa; | ||
345 | |||
346 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
347 | part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; | ||
348 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
349 | |||
350 | xpc_wakeup_channel_mgr(part); | ||
351 | break; | ||
352 | } | ||
353 | case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: | ||
354 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
355 | part_uv->flags |= XPC_P_ENGAGED_UV; | ||
356 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
357 | break; | ||
358 | |||
359 | case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: | ||
360 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | ||
361 | part_uv->flags &= ~XPC_P_ENGAGED_UV; | ||
362 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
363 | break; | ||
364 | |||
365 | default: | ||
366 | dev_err(xpc_part, "received unknown activate_mq msg type=%d " | ||
367 | "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); | ||
368 | |||
369 | /* get hb checker to deactivate from the remote partition */ | ||
370 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
371 | if (part_uv->act_state_req == 0) | ||
372 | xpc_activate_IRQ_rcvd++; | ||
373 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; | ||
374 | part_uv->reason = xpBadMsgType; | ||
375 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
365 | 376 | ||
366 | case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: | 377 | (*wakeup_hb_checker)++; |
367 | spin_lock_irqsave(&part_uv->flags_lock, irq_flags); | 378 | return; |
368 | part_uv->flags &= ~XPC_P_ENGAGED_UV; | 379 | } |
369 | spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); | ||
370 | break; | ||
371 | 380 | ||
372 | default: | 381 | if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && |
373 | dev_err(xpc_part, "received unknown activate_mq msg " | 382 | part->remote_rp_ts_jiffies != 0) { |
374 | "type=%d from partition=%d\n", msg_hdr->type, | 383 | /* |
375 | partid); | 384 | * ??? Does what we do here need to be sensitive to |
376 | } | 385 | * ??? act_state or remote_act_state? |
386 | */ | ||
387 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
388 | if (part_uv->act_state_req == 0) | ||
389 | xpc_activate_IRQ_rcvd++; | ||
390 | part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; | ||
391 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
377 | 392 | ||
378 | if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && | 393 | (*wakeup_hb_checker)++; |
379 | part->remote_rp_ts_jiffies != 0) { | 394 | } |
380 | /* | 395 | } |
381 | * ??? Does what we do here need to be sensitive to | 396 | |
382 | * ??? act_state or remote_act_state? | 397 | static irqreturn_t |
383 | */ | 398 | xpc_handle_activate_IRQ_uv(int irq, void *dev_id) |
384 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, | 399 | { |
385 | irq_flags); | 400 | struct xpc_activate_mq_msghdr_uv *msg_hdr; |
386 | if (part_uv->act_state_req == 0) | 401 | short partid; |
387 | xpc_activate_IRQ_rcvd++; | 402 | struct xpc_partition *part; |
388 | part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; | 403 | int wakeup_hb_checker = 0; |
389 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, | 404 | |
390 | irq_flags); | 405 | while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { |
391 | wakeup_hb_checker++; | 406 | |
407 | partid = msg_hdr->partid; | ||
408 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | ||
409 | dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " | ||
410 | "received invalid partid=0x%x in message\n", | ||
411 | partid); | ||
412 | } else { | ||
413 | part = &xpc_partitions[partid]; | ||
414 | if (xpc_part_ref(part)) { | ||
415 | xpc_handle_activate_mq_msg_uv(part, msg_hdr, | ||
416 | &wakeup_hb_checker); | ||
417 | xpc_part_deref(part); | ||
418 | } | ||
392 | } | 419 | } |
393 | 420 | ||
394 | gru_free_message(xpc_activate_mq_uv, msg_hdr); | 421 | gru_free_message(xpc_activate_mq_uv, msg_hdr); |
@@ -616,14 +643,82 @@ xpc_request_partition_deactivation_uv(struct xpc_partition *part) | |||
616 | } | 643 | } |
617 | } | 644 | } |
618 | 645 | ||
646 | static void | ||
647 | xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) | ||
648 | { | ||
649 | /* nothing needs to be done */ | ||
650 | return; | ||
651 | } | ||
652 | |||
653 | static void | ||
654 | xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) | ||
655 | { | ||
656 | head->first = NULL; | ||
657 | head->last = NULL; | ||
658 | spin_lock_init(&head->lock); | ||
659 | head->n_entries = 0; | ||
660 | } | ||
661 | |||
662 | static void * | ||
663 | xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) | ||
664 | { | ||
665 | unsigned long irq_flags; | ||
666 | struct xpc_fifo_entry_uv *first; | ||
667 | |||
668 | spin_lock_irqsave(&head->lock, irq_flags); | ||
669 | first = head->first; | ||
670 | if (head->first != NULL) { | ||
671 | head->first = first->next; | ||
672 | if (head->first == NULL) | ||
673 | head->last = NULL; | ||
674 | } | ||
675 | head->n_entries++; | ||
676 | spin_unlock_irqrestore(&head->lock, irq_flags); | ||
677 | first->next = NULL; | ||
678 | return first; | ||
679 | } | ||
680 | |||
681 | static void | ||
682 | xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, | ||
683 | struct xpc_fifo_entry_uv *last) | ||
684 | { | ||
685 | unsigned long irq_flags; | ||
686 | |||
687 | last->next = NULL; | ||
688 | spin_lock_irqsave(&head->lock, irq_flags); | ||
689 | if (head->last != NULL) | ||
690 | head->last->next = last; | ||
691 | else | ||
692 | head->first = last; | ||
693 | head->last = last; | ||
694 | head->n_entries--; | ||
695 | BUG_ON(head->n_entries < 0); | ||
696 | spin_unlock_irqrestore(&head->lock, irq_flags); | ||
697 | } | ||
698 | |||
699 | static int | ||
700 | xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) | ||
701 | { | ||
702 | return head->n_entries; | ||
703 | } | ||
704 | |||
619 | /* | 705 | /* |
620 | * Setup the channel structures that are uv specific. | 706 | * Setup the channel structures that are uv specific. |
621 | */ | 707 | */ |
622 | static enum xp_retval | 708 | static enum xp_retval |
623 | xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) | 709 | xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) |
624 | { | 710 | { |
625 | /* !!! this function needs fleshing out */ | 711 | struct xpc_channel_uv *ch_uv; |
626 | return xpUnsupported; | 712 | int ch_number; |
713 | |||
714 | for (ch_number = 0; ch_number < part->nchannels; ch_number++) { | ||
715 | ch_uv = &part->channels[ch_number].sn.uv; | ||
716 | |||
717 | xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); | ||
718 | xpc_init_fifo_uv(&ch_uv->recv_msg_list); | ||
719 | } | ||
720 | |||
721 | return xpSuccess; | ||
627 | } | 722 | } |
628 | 723 | ||
629 | /* | 724 | /* |
@@ -632,7 +727,7 @@ xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) | |||
632 | static void | 727 | static void |
633 | xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) | 728 | xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) |
634 | { | 729 | { |
635 | /* !!! this function needs fleshing out */ | 730 | /* nothing needs to be done */ |
636 | return; | 731 | return; |
637 | } | 732 | } |
638 | 733 | ||
@@ -680,20 +775,114 @@ xpc_get_chctl_all_flags_uv(struct xpc_partition *part) | |||
680 | } | 775 | } |
681 | 776 | ||
682 | static enum xp_retval | 777 | static enum xp_retval |
778 | xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) | ||
779 | { | ||
780 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | ||
781 | struct xpc_send_msg_slot_uv *msg_slot; | ||
782 | unsigned long irq_flags; | ||
783 | int nentries; | ||
784 | int entry; | ||
785 | size_t nbytes; | ||
786 | |||
787 | for (nentries = ch->local_nentries; nentries > 0; nentries--) { | ||
788 | nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); | ||
789 | ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); | ||
790 | if (ch_uv->send_msg_slots == NULL) | ||
791 | continue; | ||
792 | |||
793 | for (entry = 0; entry < nentries; entry++) { | ||
794 | msg_slot = &ch_uv->send_msg_slots[entry]; | ||
795 | |||
796 | msg_slot->msg_slot_number = entry; | ||
797 | xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, | ||
798 | &msg_slot->next); | ||
799 | } | ||
800 | |||
801 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
802 | if (nentries < ch->local_nentries) | ||
803 | ch->local_nentries = nentries; | ||
804 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
805 | return xpSuccess; | ||
806 | } | ||
807 | |||
808 | return xpNoMemory; | ||
809 | } | ||
810 | |||
811 | static enum xp_retval | ||
812 | xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) | ||
813 | { | ||
814 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | ||
815 | struct xpc_notify_mq_msg_uv *msg_slot; | ||
816 | unsigned long irq_flags; | ||
817 | int nentries; | ||
818 | int entry; | ||
819 | size_t nbytes; | ||
820 | |||
821 | for (nentries = ch->remote_nentries; nentries > 0; nentries--) { | ||
822 | nbytes = nentries * ch->entry_size; | ||
823 | ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); | ||
824 | if (ch_uv->recv_msg_slots == NULL) | ||
825 | continue; | ||
826 | |||
827 | for (entry = 0; entry < nentries; entry++) { | ||
828 | msg_slot = ch_uv->recv_msg_slots + entry * | ||
829 | ch->entry_size; | ||
830 | |||
831 | msg_slot->hdr.msg_slot_number = entry; | ||
832 | } | ||
833 | |||
834 | spin_lock_irqsave(&ch->lock, irq_flags); | ||
835 | if (nentries < ch->remote_nentries) | ||
836 | ch->remote_nentries = nentries; | ||
837 | spin_unlock_irqrestore(&ch->lock, irq_flags); | ||
838 | return xpSuccess; | ||
839 | } | ||
840 | |||
841 | return xpNoMemory; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * Allocate msg_slots associated with the channel. | ||
846 | */ | ||
847 | static enum xp_retval | ||
683 | xpc_setup_msg_structures_uv(struct xpc_channel *ch) | 848 | xpc_setup_msg_structures_uv(struct xpc_channel *ch) |
684 | { | 849 | { |
685 | /* !!! this function needs fleshing out */ | 850 | static enum xp_retval ret; |
686 | return xpUnsupported; | 851 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; |
852 | |||
853 | DBUG_ON(ch->flags & XPC_C_SETUP); | ||
854 | |||
855 | ret = xpc_allocate_send_msg_slot_uv(ch); | ||
856 | if (ret == xpSuccess) { | ||
857 | |||
858 | ret = xpc_allocate_recv_msg_slot_uv(ch); | ||
859 | if (ret != xpSuccess) { | ||
860 | kfree(ch_uv->send_msg_slots); | ||
861 | xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); | ||
862 | } | ||
863 | } | ||
864 | return ret; | ||
687 | } | 865 | } |
688 | 866 | ||
867 | /* | ||
868 | * Free up msg_slots and clear other stuff that were setup for the specified | ||
869 | * channel. | ||
870 | */ | ||
689 | static void | 871 | static void |
690 | xpc_teardown_msg_structures_uv(struct xpc_channel *ch) | 872 | xpc_teardown_msg_structures_uv(struct xpc_channel *ch) |
691 | { | 873 | { |
692 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; | 874 | struct xpc_channel_uv *ch_uv = &ch->sn.uv; |
693 | 875 | ||
876 | DBUG_ON(!spin_is_locked(&ch->lock)); | ||
877 | |||
694 | ch_uv->remote_notify_mq_gpa = 0; | 878 | ch_uv->remote_notify_mq_gpa = 0; |
695 | 879 | ||
696 | /* !!! this function needs fleshing out */ | 880 | if (ch->flags & XPC_C_SETUP) { |
881 | xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); | ||
882 | kfree(ch_uv->send_msg_slots); | ||
883 | xpc_init_fifo_uv(&ch_uv->recv_msg_list); | ||
884 | kfree(ch_uv->recv_msg_slots); | ||
885 | } | ||
697 | } | 886 | } |
698 | 887 | ||
699 | static void | 888 | static void |
@@ -723,7 +912,7 @@ xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |||
723 | struct xpc_activate_mq_msg_chctl_openrequest_uv msg; | 912 | struct xpc_activate_mq_msg_chctl_openrequest_uv msg; |
724 | 913 | ||
725 | msg.ch_number = ch->number; | 914 | msg.ch_number = ch->number; |
726 | msg.msg_size = ch->msg_size; | 915 | msg.entry_size = ch->entry_size; |
727 | msg.local_nentries = ch->local_nentries; | 916 | msg.local_nentries = ch->local_nentries; |
728 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), | 917 | xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), |
729 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); | 918 | XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); |
@@ -743,6 +932,18 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) | |||
743 | } | 932 | } |
744 | 933 | ||
745 | static void | 934 | static void |
935 | xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) | ||
936 | { | ||
937 | unsigned long irq_flags; | ||
938 | |||
939 | spin_lock_irqsave(&part->chctl_lock, irq_flags); | ||
940 | part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; | ||
941 | spin_unlock_irqrestore(&part->chctl_lock, irq_flags); | ||
942 | |||
943 | xpc_wakeup_channel_mgr(part); | ||
944 | } | ||
945 | |||
946 | static void | ||
746 | xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, | 947 | xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, |
747 | unsigned long msgqueue_pa) | 948 | unsigned long msgqueue_pa) |
748 | { | 949 | { |
@@ -798,11 +999,358 @@ xpc_any_partition_engaged_uv(void) | |||
798 | return 0; | 999 | return 0; |
799 | } | 1000 | } |
800 | 1001 | ||
801 | static struct xpc_msg * | 1002 | static enum xp_retval |
802 | xpc_get_deliverable_msg_uv(struct xpc_channel *ch) | 1003 | xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, |
1004 | struct xpc_send_msg_slot_uv **address_of_msg_slot) | ||
1005 | { | ||
1006 | enum xp_retval ret; | ||
1007 | struct xpc_send_msg_slot_uv *msg_slot; | ||
1008 | struct xpc_fifo_entry_uv *entry; | ||
1009 | |||
1010 | while (1) { | ||
1011 | entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); | ||
1012 | if (entry != NULL) | ||
1013 | break; | ||
1014 | |||
1015 | if (flags & XPC_NOWAIT) | ||
1016 | return xpNoWait; | ||
1017 | |||
1018 | ret = xpc_allocate_msg_wait(ch); | ||
1019 | if (ret != xpInterrupted && ret != xpTimeout) | ||
1020 | return ret; | ||
1021 | } | ||
1022 | |||
1023 | msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); | ||
1024 | *address_of_msg_slot = msg_slot; | ||
1025 | return xpSuccess; | ||
1026 | } | ||
1027 | |||
1028 | static void | ||
1029 | xpc_free_msg_slot_uv(struct xpc_channel *ch, | ||
1030 | struct xpc_send_msg_slot_uv *msg_slot) | ||
1031 | { | ||
1032 | xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); | ||
1033 | |||
1034 | /* wakeup anyone waiting for a free msg slot */ | ||
1035 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) | ||
1036 | wake_up(&ch->msg_allocate_wq); | ||
1037 | } | ||
1038 | |||
1039 | static void | ||
1040 | xpc_notify_sender_uv(struct xpc_channel *ch, | ||
1041 | struct xpc_send_msg_slot_uv *msg_slot, | ||
1042 | enum xp_retval reason) | ||
1043 | { | ||
1044 | xpc_notify_func func = msg_slot->func; | ||
1045 | |||
1046 | if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { | ||
1047 | |||
1048 | atomic_dec(&ch->n_to_notify); | ||
1049 | |||
1050 | dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " | ||
1051 | "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, | ||
1052 | msg_slot->msg_slot_number, ch->partid, ch->number); | ||
1053 | |||
1054 | func(reason, ch->partid, ch->number, msg_slot->key); | ||
1055 | |||
1056 | dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " | ||
1057 | "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, | ||
1058 | msg_slot->msg_slot_number, ch->partid, ch->number); | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1062 | static void | ||
1063 | xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, | ||
1064 | struct xpc_notify_mq_msg_uv *msg) | ||
1065 | { | ||
1066 | struct xpc_send_msg_slot_uv *msg_slot; | ||
1067 | int entry = msg->hdr.msg_slot_number % ch->local_nentries; | ||
1068 | |||
1069 | msg_slot = &ch->sn.uv.send_msg_slots[entry]; | ||
1070 | |||
1071 | BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); | ||
1072 | msg_slot->msg_slot_number += ch->local_nentries; | ||
1073 | |||
1074 | if (msg_slot->func != NULL) | ||
1075 | xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); | ||
1076 | |||
1077 | xpc_free_msg_slot_uv(ch, msg_slot); | ||
1078 | } | ||
1079 | |||
1080 | static void | ||
1081 | xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, | ||
1082 | struct xpc_notify_mq_msg_uv *msg) | ||
1083 | { | ||
1084 | struct xpc_partition_uv *part_uv = &part->sn.uv; | ||
1085 | struct xpc_channel *ch; | ||
1086 | struct xpc_channel_uv *ch_uv; | ||
1087 | struct xpc_notify_mq_msg_uv *msg_slot; | ||
1088 | unsigned long irq_flags; | ||
1089 | int ch_number = msg->hdr.ch_number; | ||
1090 | |||
1091 | if (unlikely(ch_number >= part->nchannels)) { | ||
1092 | dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " | ||
1093 | "channel number=0x%x in message from partid=%d\n", | ||
1094 | ch_number, XPC_PARTID(part)); | ||
1095 | |||
1096 | /* get hb checker to deactivate from the remote partition */ | ||
1097 | spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
1098 | if (part_uv->act_state_req == 0) | ||
1099 | xpc_activate_IRQ_rcvd++; | ||
1100 | part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; | ||
1101 | part_uv->reason = xpBadChannelNumber; | ||
1102 | spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); | ||
1103 | |||
1104 | wake_up_interruptible(&xpc_activate_IRQ_wq); | ||
1105 | return; | ||
1106 | } | ||
1107 | |||
1108 | ch = &part->channels[ch_number]; | ||
1109 | xpc_msgqueue_ref(ch); | ||
1110 | |||
1111 | if (!(ch->flags & XPC_C_CONNECTED)) { | ||
1112 | xpc_msgqueue_deref(ch); | ||
1113 | return; | ||
1114 | } | ||
1115 | |||
1116 | /* see if we're really dealing with an ACK for a previously sent msg */ | ||
1117 | if (msg->hdr.size == 0) { | ||
1118 | xpc_handle_notify_mq_ack_uv(ch, msg); | ||
1119 | xpc_msgqueue_deref(ch); | ||
1120 | return; | ||
1121 | } | ||
1122 | |||
1123 | /* we're dealing with a normal message sent via the notify_mq */ | ||
1124 | ch_uv = &ch->sn.uv; | ||
1125 | |||
1126 | msg_slot = (struct xpc_notify_mq_msg_uv *)((u64)ch_uv->recv_msg_slots + | ||
1127 | (msg->hdr.msg_slot_number % ch->remote_nentries) * | ||
1128 | ch->entry_size); | ||
1129 | |||
1130 | BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); | ||
1131 | BUG_ON(msg_slot->hdr.size != 0); | ||
1132 | |||
1133 | memcpy(msg_slot, msg, msg->hdr.size); | ||
1134 | |||
1135 | xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); | ||
1136 | |||
1137 | if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { | ||
1138 | /* | ||
1139 | * If there is an existing idle kthread get it to deliver | ||
1140 | * the payload, otherwise we'll have to get the channel mgr | ||
1141 | * for this partition to create a kthread to do the delivery. | ||
1142 | */ | ||
1143 | if (atomic_read(&ch->kthreads_idle) > 0) | ||
1144 | wake_up_nr(&ch->idle_wq, 1); | ||
1145 | else | ||
1146 | xpc_send_chctl_local_msgrequest_uv(part, ch->number); | ||
1147 | } | ||
1148 | xpc_msgqueue_deref(ch); | ||
1149 | } | ||
1150 | |||
1151 | static irqreturn_t | ||
1152 | xpc_handle_notify_IRQ_uv(int irq, void *dev_id) | ||
1153 | { | ||
1154 | struct xpc_notify_mq_msg_uv *msg; | ||
1155 | short partid; | ||
1156 | struct xpc_partition *part; | ||
1157 | |||
1158 | while ((msg = gru_get_next_message(xpc_notify_mq_uv)) != NULL) { | ||
1159 | |||
1160 | partid = msg->hdr.partid; | ||
1161 | if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { | ||
1162 | dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " | ||
1163 | "invalid partid=0x%x in message\n", partid); | ||
1164 | } else { | ||
1165 | part = &xpc_partitions[partid]; | ||
1166 | |||
1167 | if (xpc_part_ref(part)) { | ||
1168 | xpc_handle_notify_mq_msg_uv(part, msg); | ||
1169 | xpc_part_deref(part); | ||
1170 | } | ||
1171 | } | ||
1172 | |||
1173 | gru_free_message(xpc_notify_mq_uv, msg); | ||
1174 | } | ||
1175 | |||
1176 | return IRQ_HANDLED; | ||
1177 | } | ||
1178 | |||
1179 | static int | ||
1180 | xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) | ||
1181 | { | ||
1182 | return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); | ||
1183 | } | ||
1184 | |||
1185 | static void | ||
1186 | xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) | ||
1187 | { | ||
1188 | struct xpc_channel *ch = &part->channels[ch_number]; | ||
1189 | int ndeliverable_payloads; | ||
1190 | |||
1191 | xpc_msgqueue_ref(ch); | ||
1192 | |||
1193 | ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); | ||
1194 | |||
1195 | if (ndeliverable_payloads > 0 && | ||
1196 | (ch->flags & XPC_C_CONNECTED) && | ||
1197 | (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { | ||
1198 | |||
1199 | xpc_activate_kthreads(ch, ndeliverable_payloads); | ||
1200 | } | ||
1201 | |||
1202 | xpc_msgqueue_deref(ch); | ||
1203 | } | ||
1204 | |||
1205 | static enum xp_retval | ||
1206 | xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, | ||
1207 | u16 payload_size, u8 notify_type, xpc_notify_func func, | ||
1208 | void *key) | ||
1209 | { | ||
1210 | enum xp_retval ret = xpSuccess; | ||
1211 | struct xpc_send_msg_slot_uv *msg_slot = NULL; | ||
1212 | struct xpc_notify_mq_msg_uv *msg; | ||
1213 | u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; | ||
1214 | size_t msg_size; | ||
1215 | |||
1216 | DBUG_ON(notify_type != XPC_N_CALL); | ||
1217 | |||
1218 | msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; | ||
1219 | if (msg_size > ch->entry_size) | ||
1220 | return xpPayloadTooBig; | ||
1221 | |||
1222 | xpc_msgqueue_ref(ch); | ||
1223 | |||
1224 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1225 | ret = ch->reason; | ||
1226 | goto out_1; | ||
1227 | } | ||
1228 | if (!(ch->flags & XPC_C_CONNECTED)) { | ||
1229 | ret = xpNotConnected; | ||
1230 | goto out_1; | ||
1231 | } | ||
1232 | |||
1233 | ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); | ||
1234 | if (ret != xpSuccess) | ||
1235 | goto out_1; | ||
1236 | |||
1237 | if (func != NULL) { | ||
1238 | atomic_inc(&ch->n_to_notify); | ||
1239 | |||
1240 | msg_slot->key = key; | ||
1241 | wmb(); /* a non-NULL func must hit memory after the key */ | ||
1242 | msg_slot->func = func; | ||
1243 | |||
1244 | if (ch->flags & XPC_C_DISCONNECTING) { | ||
1245 | ret = ch->reason; | ||
1246 | goto out_2; | ||
1247 | } | ||
1248 | } | ||
1249 | |||
1250 | msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; | ||
1251 | msg->hdr.partid = xp_partition_id; | ||
1252 | msg->hdr.ch_number = ch->number; | ||
1253 | msg->hdr.size = msg_size; | ||
1254 | msg->hdr.msg_slot_number = msg_slot->msg_slot_number; | ||
1255 | memcpy(&msg->payload, payload, payload_size); | ||
1256 | |||
1257 | ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, msg_size); | ||
1258 | if (ret == xpSuccess) | ||
1259 | goto out_1; | ||
1260 | |||
1261 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); | ||
1262 | out_2: | ||
1263 | if (func != NULL) { | ||
1264 | /* | ||
1265 | * Try to NULL the msg_slot's func field. If we fail, then | ||
1266 | * xpc_notify_senders_of_disconnect_uv() beat us to it, in which | ||
1267 | * case we need to pretend we succeeded to send the message | ||
1268 | * since the user will get a callout for the disconnect error | ||
1269 | * by xpc_notify_senders_of_disconnect_uv(), and to also get an | ||
1270 | * error returned here will confuse them. Additionally, since | ||
1271 | * in this case the channel is being disconnected we don't need | ||
1272 | * to put the the msg_slot back on the free list. | ||
1273 | */ | ||
1274 | if (cmpxchg(&msg_slot->func, func, NULL) != func) { | ||
1275 | ret = xpSuccess; | ||
1276 | goto out_1; | ||
1277 | } | ||
1278 | |||
1279 | msg_slot->key = NULL; | ||
1280 | atomic_dec(&ch->n_to_notify); | ||
1281 | } | ||
1282 | xpc_free_msg_slot_uv(ch, msg_slot); | ||
1283 | out_1: | ||
1284 | xpc_msgqueue_deref(ch); | ||
1285 | return ret; | ||
1286 | } | ||
1287 | |||
1288 | /* | ||
1289 | * Tell the callers of xpc_send_notify() that the status of their payloads | ||
1290 | * is unknown because the channel is now disconnecting. | ||
1291 | * | ||
1292 | * We don't worry about putting these msg_slots on the free list since the | ||
1293 | * msg_slots themselves are about to be kfree'd. | ||
1294 | */ | ||
1295 | static void | ||
1296 | xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) | ||
1297 | { | ||
1298 | struct xpc_send_msg_slot_uv *msg_slot; | ||
1299 | int entry; | ||
1300 | |||
1301 | DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); | ||
1302 | |||
1303 | for (entry = 0; entry < ch->local_nentries; entry++) { | ||
1304 | |||
1305 | if (atomic_read(&ch->n_to_notify) == 0) | ||
1306 | break; | ||
1307 | |||
1308 | msg_slot = &ch->sn.uv.send_msg_slots[entry]; | ||
1309 | if (msg_slot->func != NULL) | ||
1310 | xpc_notify_sender_uv(ch, msg_slot, ch->reason); | ||
1311 | } | ||
1312 | } | ||
1313 | |||
1314 | /* | ||
1315 | * Get the next deliverable message's payload. | ||
1316 | */ | ||
1317 | static void * | ||
1318 | xpc_get_deliverable_payload_uv(struct xpc_channel *ch) | ||
1319 | { | ||
1320 | struct xpc_fifo_entry_uv *entry; | ||
1321 | struct xpc_notify_mq_msg_uv *msg; | ||
1322 | void *payload = NULL; | ||
1323 | |||
1324 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | ||
1325 | entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); | ||
1326 | if (entry != NULL) { | ||
1327 | msg = container_of(entry, struct xpc_notify_mq_msg_uv, | ||
1328 | hdr.u.next); | ||
1329 | payload = &msg->payload; | ||
1330 | } | ||
1331 | } | ||
1332 | return payload; | ||
1333 | } | ||
1334 | |||
1335 | static void | ||
1336 | xpc_received_payload_uv(struct xpc_channel *ch, void *payload) | ||
803 | { | 1337 | { |
804 | /* !!! this function needs fleshing out */ | 1338 | struct xpc_notify_mq_msg_uv *msg; |
805 | return NULL; | 1339 | enum xp_retval ret; |
1340 | |||
1341 | msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); | ||
1342 | |||
1343 | /* return an ACK to the sender of this message */ | ||
1344 | |||
1345 | msg->hdr.partid = xp_partition_id; | ||
1346 | msg->hdr.size = 0; /* size of zero indicates this is an ACK */ | ||
1347 | |||
1348 | ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, | ||
1349 | sizeof(struct xpc_notify_mq_msghdr_uv)); | ||
1350 | if (ret != xpSuccess) | ||
1351 | XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); | ||
1352 | |||
1353 | msg->hdr.msg_slot_number += ch->remote_nentries; | ||
806 | } | 1354 | } |
807 | 1355 | ||
808 | int | 1356 | int |
@@ -824,6 +1372,8 @@ xpc_init_uv(void) | |||
824 | xpc_request_partition_reactivation_uv; | 1372 | xpc_request_partition_reactivation_uv; |
825 | xpc_request_partition_deactivation = | 1373 | xpc_request_partition_deactivation = |
826 | xpc_request_partition_deactivation_uv; | 1374 | xpc_request_partition_deactivation_uv; |
1375 | xpc_cancel_partition_deactivation_request = | ||
1376 | xpc_cancel_partition_deactivation_request_uv; | ||
827 | 1377 | ||
828 | xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; | 1378 | xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; |
829 | xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; | 1379 | xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; |
@@ -848,7 +1398,18 @@ xpc_init_uv(void) | |||
848 | xpc_partition_engaged = xpc_partition_engaged_uv; | 1398 | xpc_partition_engaged = xpc_partition_engaged_uv; |
849 | xpc_any_partition_engaged = xpc_any_partition_engaged_uv; | 1399 | xpc_any_partition_engaged = xpc_any_partition_engaged_uv; |
850 | 1400 | ||
851 | xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; | 1401 | xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv; |
1402 | xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv; | ||
1403 | xpc_send_payload = xpc_send_payload_uv; | ||
1404 | xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv; | ||
1405 | xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv; | ||
1406 | xpc_received_payload = xpc_received_payload_uv; | ||
1407 | |||
1408 | if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { | ||
1409 | dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", | ||
1410 | XPC_MSG_HDR_MAX_SIZE); | ||
1411 | return -E2BIG; | ||
1412 | } | ||
852 | 1413 | ||
853 | /* ??? The cpuid argument's value is 0, is that what we want? */ | 1414 | /* ??? The cpuid argument's value is 0, is that what we want? */ |
854 | /* !!! The irq argument's value isn't correct. */ | 1415 | /* !!! The irq argument's value isn't correct. */ |
@@ -857,6 +1418,17 @@ xpc_init_uv(void) | |||
857 | if (xpc_activate_mq_uv == NULL) | 1418 | if (xpc_activate_mq_uv == NULL) |
858 | return -ENOMEM; | 1419 | return -ENOMEM; |
859 | 1420 | ||
1421 | /* ??? The cpuid argument's value is 0, is that what we want? */ | ||
1422 | /* !!! The irq argument's value isn't correct. */ | ||
1423 | xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, | ||
1424 | xpc_handle_notify_IRQ_uv); | ||
1425 | if (xpc_notify_mq_uv == NULL) { | ||
1426 | /* !!! The irq argument's value isn't correct. */ | ||
1427 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, | ||
1428 | XPC_ACTIVATE_MQ_SIZE_UV, 0); | ||
1429 | return -ENOMEM; | ||
1430 | } | ||
1431 | |||
860 | return 0; | 1432 | return 0; |
861 | } | 1433 | } |
862 | 1434 | ||
@@ -864,5 +1436,8 @@ void | |||
864 | xpc_exit_uv(void) | 1436 | xpc_exit_uv(void) |
865 | { | 1437 | { |
866 | /* !!! The irq argument's value isn't correct. */ | 1438 | /* !!! The irq argument's value isn't correct. */ |
1439 | xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); | ||
1440 | |||
1441 | /* !!! The irq argument's value isn't correct. */ | ||
867 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); | 1442 | xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); |
868 | } | 1443 | } |