diff options
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_channel.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_channel.c | 186 |
1 files changed, 93 insertions, 93 deletions
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index bfcb9ea968e9..9c90c2d55c08 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -53,7 +53,7 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |||
53 | * Set up the initial values for the XPartition Communication channels. | 53 | * Set up the initial values for the XPartition Communication channels. |
54 | */ | 54 | */ |
55 | static void | 55 | static void |
56 | xpc_initialize_channels(struct xpc_partition *part, partid_t partid) | 56 | xpc_initialize_channels(struct xpc_partition *part, short partid) |
57 | { | 57 | { |
58 | int ch_number; | 58 | int ch_number; |
59 | struct xpc_channel *ch; | 59 | struct xpc_channel *ch; |
@@ -90,12 +90,12 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) | |||
90 | * Setup the infrastructure necessary to support XPartition Communication | 90 | * Setup the infrastructure necessary to support XPartition Communication |
91 | * between the specified remote partition and the local one. | 91 | * between the specified remote partition and the local one. |
92 | */ | 92 | */ |
93 | enum xpc_retval | 93 | enum xp_retval |
94 | xpc_setup_infrastructure(struct xpc_partition *part) | 94 | xpc_setup_infrastructure(struct xpc_partition *part) |
95 | { | 95 | { |
96 | int ret, cpuid; | 96 | int ret, cpuid; |
97 | struct timer_list *timer; | 97 | struct timer_list *timer; |
98 | partid_t partid = XPC_PARTID(part); | 98 | short partid = XPC_PARTID(part); |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * Zero out MOST of the entry for this partition. Only the fields | 101 | * Zero out MOST of the entry for this partition. Only the fields |
@@ -114,7 +114,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
114 | GFP_KERNEL); | 114 | GFP_KERNEL); |
115 | if (part->channels == NULL) { | 115 | if (part->channels == NULL) { |
116 | dev_err(xpc_chan, "can't get memory for channels\n"); | 116 | dev_err(xpc_chan, "can't get memory for channels\n"); |
117 | return xpcNoMemory; | 117 | return xpNoMemory; |
118 | } | 118 | } |
119 | 119 | ||
120 | part->nchannels = XPC_NCHANNELS; | 120 | part->nchannels = XPC_NCHANNELS; |
@@ -129,7 +129,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
129 | part->channels = NULL; | 129 | part->channels = NULL; |
130 | dev_err(xpc_chan, "can't get memory for local get/put " | 130 | dev_err(xpc_chan, "can't get memory for local get/put " |
131 | "values\n"); | 131 | "values\n"); |
132 | return xpcNoMemory; | 132 | return xpNoMemory; |
133 | } | 133 | } |
134 | 134 | ||
135 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | 135 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, |
@@ -143,7 +143,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
143 | part->local_GPs = NULL; | 143 | part->local_GPs = NULL; |
144 | kfree(part->channels); | 144 | kfree(part->channels); |
145 | part->channels = NULL; | 145 | part->channels = NULL; |
146 | return xpcNoMemory; | 146 | return xpNoMemory; |
147 | } | 147 | } |
148 | 148 | ||
149 | /* allocate all the required open and close args */ | 149 | /* allocate all the required open and close args */ |
@@ -159,7 +159,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
159 | part->local_GPs = NULL; | 159 | part->local_GPs = NULL; |
160 | kfree(part->channels); | 160 | kfree(part->channels); |
161 | part->channels = NULL; | 161 | part->channels = NULL; |
162 | return xpcNoMemory; | 162 | return xpNoMemory; |
163 | } | 163 | } |
164 | 164 | ||
165 | part->remote_openclose_args = | 165 | part->remote_openclose_args = |
@@ -175,7 +175,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
175 | part->local_GPs = NULL; | 175 | part->local_GPs = NULL; |
176 | kfree(part->channels); | 176 | kfree(part->channels); |
177 | part->channels = NULL; | 177 | part->channels = NULL; |
178 | return xpcNoMemory; | 178 | return xpNoMemory; |
179 | } | 179 | } |
180 | 180 | ||
181 | xpc_initialize_channels(part, partid); | 181 | xpc_initialize_channels(part, partid); |
@@ -209,7 +209,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
209 | part->local_GPs = NULL; | 209 | part->local_GPs = NULL; |
210 | kfree(part->channels); | 210 | kfree(part->channels); |
211 | part->channels = NULL; | 211 | part->channels = NULL; |
212 | return xpcLackOfResources; | 212 | return xpLackOfResources; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* Setup a timer to check for dropped IPIs */ | 215 | /* Setup a timer to check for dropped IPIs */ |
@@ -243,7 +243,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
243 | xpc_vars_part[partid].nchannels = part->nchannels; | 243 | xpc_vars_part[partid].nchannels = part->nchannels; |
244 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | 244 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; |
245 | 245 | ||
246 | return xpcSuccess; | 246 | return xpSuccess; |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | 249 | /* |
@@ -254,7 +254,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
254 | * dst must be a cacheline aligned virtual address on this partition. | 254 | * dst must be a cacheline aligned virtual address on this partition. |
255 | * cnt must be an cacheline sized | 255 | * cnt must be an cacheline sized |
256 | */ | 256 | */ |
257 | static enum xpc_retval | 257 | static enum xp_retval |
258 | xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | 258 | xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, |
259 | const void *src, size_t cnt) | 259 | const void *src, size_t cnt) |
260 | { | 260 | { |
@@ -270,7 +270,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | |||
270 | bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, | 270 | bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, |
271 | (BTE_NORMAL | BTE_WACQUIRE), NULL); | 271 | (BTE_NORMAL | BTE_WACQUIRE), NULL); |
272 | if (bte_ret == BTE_SUCCESS) | 272 | if (bte_ret == BTE_SUCCESS) |
273 | return xpcSuccess; | 273 | return xpSuccess; |
274 | 274 | ||
275 | dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", | 275 | dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", |
276 | XPC_PARTID(part), bte_ret); | 276 | XPC_PARTID(part), bte_ret); |
@@ -282,7 +282,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | |||
282 | * Pull the remote per partition specific variables from the specified | 282 | * Pull the remote per partition specific variables from the specified |
283 | * partition. | 283 | * partition. |
284 | */ | 284 | */ |
285 | enum xpc_retval | 285 | enum xp_retval |
286 | xpc_pull_remote_vars_part(struct xpc_partition *part) | 286 | xpc_pull_remote_vars_part(struct xpc_partition *part) |
287 | { | 287 | { |
288 | u8 buffer[L1_CACHE_BYTES * 2]; | 288 | u8 buffer[L1_CACHE_BYTES * 2]; |
@@ -290,8 +290,8 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
290 | (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer); | 290 | (struct xpc_vars_part *)L1_CACHE_ALIGN((u64)buffer); |
291 | struct xpc_vars_part *pulled_entry; | 291 | struct xpc_vars_part *pulled_entry; |
292 | u64 remote_entry_cacheline_pa, remote_entry_pa; | 292 | u64 remote_entry_cacheline_pa, remote_entry_pa; |
293 | partid_t partid = XPC_PARTID(part); | 293 | short partid = XPC_PARTID(part); |
294 | enum xpc_retval ret; | 294 | enum xp_retval ret; |
295 | 295 | ||
296 | /* pull the cacheline that contains the variables we're interested in */ | 296 | /* pull the cacheline that contains the variables we're interested in */ |
297 | 297 | ||
@@ -311,7 +311,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
311 | ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, | 311 | ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, |
312 | (void *)remote_entry_cacheline_pa, | 312 | (void *)remote_entry_cacheline_pa, |
313 | L1_CACHE_BYTES); | 313 | L1_CACHE_BYTES); |
314 | if (ret != xpcSuccess) { | 314 | if (ret != xpSuccess) { |
315 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " | 315 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " |
316 | "partition %d, ret=%d\n", partid, ret); | 316 | "partition %d, ret=%d\n", partid, ret); |
317 | return ret; | 317 | return ret; |
@@ -326,11 +326,11 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
326 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " | 326 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " |
327 | "partition %d has bad magic value (=0x%lx)\n", | 327 | "partition %d has bad magic value (=0x%lx)\n", |
328 | partid, sn_partition_id, pulled_entry->magic); | 328 | partid, sn_partition_id, pulled_entry->magic); |
329 | return xpcBadMagic; | 329 | return xpBadMagic; |
330 | } | 330 | } |
331 | 331 | ||
332 | /* they've not been initialized yet */ | 332 | /* they've not been initialized yet */ |
333 | return xpcRetry; | 333 | return xpRetry; |
334 | } | 334 | } |
335 | 335 | ||
336 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { | 336 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { |
@@ -344,7 +344,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
344 | dev_err(xpc_chan, "partition %d's XPC vars_part for " | 344 | dev_err(xpc_chan, "partition %d's XPC vars_part for " |
345 | "partition %d are not valid\n", partid, | 345 | "partition %d are not valid\n", partid, |
346 | sn_partition_id); | 346 | sn_partition_id); |
347 | return xpcInvalidAddress; | 347 | return xpInvalidAddress; |
348 | } | 348 | } |
349 | 349 | ||
350 | /* the variables we imported look to be valid */ | 350 | /* the variables we imported look to be valid */ |
@@ -366,9 +366,9 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
366 | } | 366 | } |
367 | 367 | ||
368 | if (pulled_entry->magic == XPC_VP_MAGIC1) | 368 | if (pulled_entry->magic == XPC_VP_MAGIC1) |
369 | return xpcRetry; | 369 | return xpRetry; |
370 | 370 | ||
371 | return xpcSuccess; | 371 | return xpSuccess; |
372 | } | 372 | } |
373 | 373 | ||
374 | /* | 374 | /* |
@@ -379,7 +379,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
379 | { | 379 | { |
380 | unsigned long irq_flags; | 380 | unsigned long irq_flags; |
381 | u64 IPI_amo; | 381 | u64 IPI_amo; |
382 | enum xpc_retval ret; | 382 | enum xp_retval ret; |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * See if there are any IPI flags to be handled. | 385 | * See if there are any IPI flags to be handled. |
@@ -398,7 +398,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
398 | (void *)part-> | 398 | (void *)part-> |
399 | remote_openclose_args_pa, | 399 | remote_openclose_args_pa, |
400 | XPC_OPENCLOSE_ARGS_SIZE); | 400 | XPC_OPENCLOSE_ARGS_SIZE); |
401 | if (ret != xpcSuccess) { | 401 | if (ret != xpSuccess) { |
402 | XPC_DEACTIVATE_PARTITION(part, ret); | 402 | XPC_DEACTIVATE_PARTITION(part, ret); |
403 | 403 | ||
404 | dev_dbg(xpc_chan, "failed to pull openclose args from " | 404 | dev_dbg(xpc_chan, "failed to pull openclose args from " |
@@ -414,7 +414,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
414 | ret = xpc_pull_remote_cachelines(part, part->remote_GPs, | 414 | ret = xpc_pull_remote_cachelines(part, part->remote_GPs, |
415 | (void *)part->remote_GPs_pa, | 415 | (void *)part->remote_GPs_pa, |
416 | XPC_GP_SIZE); | 416 | XPC_GP_SIZE); |
417 | if (ret != xpcSuccess) { | 417 | if (ret != xpSuccess) { |
418 | XPC_DEACTIVATE_PARTITION(part, ret); | 418 | XPC_DEACTIVATE_PARTITION(part, ret); |
419 | 419 | ||
420 | dev_dbg(xpc_chan, "failed to pull GPs from partition " | 420 | dev_dbg(xpc_chan, "failed to pull GPs from partition " |
@@ -431,7 +431,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
431 | /* | 431 | /* |
432 | * Allocate the local message queue and the notify queue. | 432 | * Allocate the local message queue and the notify queue. |
433 | */ | 433 | */ |
434 | static enum xpc_retval | 434 | static enum xp_retval |
435 | xpc_allocate_local_msgqueue(struct xpc_channel *ch) | 435 | xpc_allocate_local_msgqueue(struct xpc_channel *ch) |
436 | { | 436 | { |
437 | unsigned long irq_flags; | 437 | unsigned long irq_flags; |
@@ -464,18 +464,18 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch) | |||
464 | ch->local_nentries = nentries; | 464 | ch->local_nentries = nentries; |
465 | } | 465 | } |
466 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 466 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
467 | return xpcSuccess; | 467 | return xpSuccess; |
468 | } | 468 | } |
469 | 469 | ||
470 | dev_dbg(xpc_chan, "can't get memory for local message queue and notify " | 470 | dev_dbg(xpc_chan, "can't get memory for local message queue and notify " |
471 | "queue, partid=%d, channel=%d\n", ch->partid, ch->number); | 471 | "queue, partid=%d, channel=%d\n", ch->partid, ch->number); |
472 | return xpcNoMemory; | 472 | return xpNoMemory; |
473 | } | 473 | } |
474 | 474 | ||
475 | /* | 475 | /* |
476 | * Allocate the cached remote message queue. | 476 | * Allocate the cached remote message queue. |
477 | */ | 477 | */ |
478 | static enum xpc_retval | 478 | static enum xp_retval |
479 | xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | 479 | xpc_allocate_remote_msgqueue(struct xpc_channel *ch) |
480 | { | 480 | { |
481 | unsigned long irq_flags; | 481 | unsigned long irq_flags; |
@@ -502,12 +502,12 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | |||
502 | ch->remote_nentries = nentries; | 502 | ch->remote_nentries = nentries; |
503 | } | 503 | } |
504 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 504 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
505 | return xpcSuccess; | 505 | return xpSuccess; |
506 | } | 506 | } |
507 | 507 | ||
508 | dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " | 508 | dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " |
509 | "partid=%d, channel=%d\n", ch->partid, ch->number); | 509 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
510 | return xpcNoMemory; | 510 | return xpNoMemory; |
511 | } | 511 | } |
512 | 512 | ||
513 | /* | 513 | /* |
@@ -515,20 +515,20 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | |||
515 | * | 515 | * |
516 | * Note: Assumes all of the channel sizes are filled in. | 516 | * Note: Assumes all of the channel sizes are filled in. |
517 | */ | 517 | */ |
518 | static enum xpc_retval | 518 | static enum xp_retval |
519 | xpc_allocate_msgqueues(struct xpc_channel *ch) | 519 | xpc_allocate_msgqueues(struct xpc_channel *ch) |
520 | { | 520 | { |
521 | unsigned long irq_flags; | 521 | unsigned long irq_flags; |
522 | enum xpc_retval ret; | 522 | enum xp_retval ret; |
523 | 523 | ||
524 | DBUG_ON(ch->flags & XPC_C_SETUP); | 524 | DBUG_ON(ch->flags & XPC_C_SETUP); |
525 | 525 | ||
526 | ret = xpc_allocate_local_msgqueue(ch); | 526 | ret = xpc_allocate_local_msgqueue(ch); |
527 | if (ret != xpcSuccess) | 527 | if (ret != xpSuccess) |
528 | return ret; | 528 | return ret; |
529 | 529 | ||
530 | ret = xpc_allocate_remote_msgqueue(ch); | 530 | ret = xpc_allocate_remote_msgqueue(ch); |
531 | if (ret != xpcSuccess) { | 531 | if (ret != xpSuccess) { |
532 | kfree(ch->local_msgqueue_base); | 532 | kfree(ch->local_msgqueue_base); |
533 | ch->local_msgqueue = NULL; | 533 | ch->local_msgqueue = NULL; |
534 | kfree(ch->notify_queue); | 534 | kfree(ch->notify_queue); |
@@ -540,7 +540,7 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) | |||
540 | ch->flags |= XPC_C_SETUP; | 540 | ch->flags |= XPC_C_SETUP; |
541 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 541 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
542 | 542 | ||
543 | return xpcSuccess; | 543 | return xpSuccess; |
544 | } | 544 | } |
545 | 545 | ||
546 | /* | 546 | /* |
@@ -552,7 +552,7 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) | |||
552 | static void | 552 | static void |
553 | xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | 553 | xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) |
554 | { | 554 | { |
555 | enum xpc_retval ret; | 555 | enum xp_retval ret; |
556 | 556 | ||
557 | DBUG_ON(!spin_is_locked(&ch->lock)); | 557 | DBUG_ON(!spin_is_locked(&ch->lock)); |
558 | 558 | ||
@@ -568,7 +568,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
568 | ret = xpc_allocate_msgqueues(ch); | 568 | ret = xpc_allocate_msgqueues(ch); |
569 | spin_lock_irqsave(&ch->lock, *irq_flags); | 569 | spin_lock_irqsave(&ch->lock, *irq_flags); |
570 | 570 | ||
571 | if (ret != xpcSuccess) | 571 | if (ret != xpSuccess) |
572 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); | 572 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); |
573 | 573 | ||
574 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) | 574 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) |
@@ -603,7 +603,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
603 | * Notify those who wanted to be notified upon delivery of their message. | 603 | * Notify those who wanted to be notified upon delivery of their message. |
604 | */ | 604 | */ |
605 | static void | 605 | static void |
606 | xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) | 606 | xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put) |
607 | { | 607 | { |
608 | struct xpc_notify *notify; | 608 | struct xpc_notify *notify; |
609 | u8 notify_type; | 609 | u8 notify_type; |
@@ -748,7 +748,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
748 | 748 | ||
749 | if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { | 749 | if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { |
750 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 750 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
751 | xpc_disconnect_callout(ch, xpcDisconnected); | 751 | xpc_disconnect_callout(ch, xpDisconnected); |
752 | spin_lock_irqsave(&ch->lock, *irq_flags); | 752 | spin_lock_irqsave(&ch->lock, *irq_flags); |
753 | } | 753 | } |
754 | 754 | ||
@@ -791,7 +791,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
791 | struct xpc_openclose_args *args = | 791 | struct xpc_openclose_args *args = |
792 | &part->remote_openclose_args[ch_number]; | 792 | &part->remote_openclose_args[ch_number]; |
793 | struct xpc_channel *ch = &part->channels[ch_number]; | 793 | struct xpc_channel *ch = &part->channels[ch_number]; |
794 | enum xpc_retval reason; | 794 | enum xp_retval reason; |
795 | 795 | ||
796 | spin_lock_irqsave(&ch->lock, irq_flags); | 796 | spin_lock_irqsave(&ch->lock, irq_flags); |
797 | 797 | ||
@@ -871,10 +871,10 @@ again: | |||
871 | 871 | ||
872 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | 872 | if (!(ch->flags & XPC_C_DISCONNECTING)) { |
873 | reason = args->reason; | 873 | reason = args->reason; |
874 | if (reason <= xpcSuccess || reason > xpcUnknownReason) | 874 | if (reason <= xpSuccess || reason > xpUnknownReason) |
875 | reason = xpcUnknownReason; | 875 | reason = xpUnknownReason; |
876 | else if (reason == xpcUnregistering) | 876 | else if (reason == xpUnregistering) |
877 | reason = xpcOtherUnregistering; | 877 | reason = xpOtherUnregistering; |
878 | 878 | ||
879 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | 879 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); |
880 | 880 | ||
@@ -961,7 +961,7 @@ again: | |||
961 | 961 | ||
962 | if (ch->flags & XPC_C_OPENREQUEST) { | 962 | if (ch->flags & XPC_C_OPENREQUEST) { |
963 | if (args->msg_size != ch->msg_size) { | 963 | if (args->msg_size != ch->msg_size) { |
964 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, | 964 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, |
965 | &irq_flags); | 965 | &irq_flags); |
966 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 966 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
967 | return; | 967 | return; |
@@ -991,7 +991,7 @@ again: | |||
991 | return; | 991 | return; |
992 | } | 992 | } |
993 | if (!(ch->flags & XPC_C_OPENREQUEST)) { | 993 | if (!(ch->flags & XPC_C_OPENREQUEST)) { |
994 | XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, | 994 | XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, |
995 | &irq_flags); | 995 | &irq_flags); |
996 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 996 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
997 | return; | 997 | return; |
@@ -1042,18 +1042,18 @@ again: | |||
1042 | /* | 1042 | /* |
1043 | * Attempt to establish a channel connection to a remote partition. | 1043 | * Attempt to establish a channel connection to a remote partition. |
1044 | */ | 1044 | */ |
1045 | static enum xpc_retval | 1045 | static enum xp_retval |
1046 | xpc_connect_channel(struct xpc_channel *ch) | 1046 | xpc_connect_channel(struct xpc_channel *ch) |
1047 | { | 1047 | { |
1048 | unsigned long irq_flags; | 1048 | unsigned long irq_flags; |
1049 | struct xpc_registration *registration = &xpc_registrations[ch->number]; | 1049 | struct xpc_registration *registration = &xpc_registrations[ch->number]; |
1050 | 1050 | ||
1051 | if (mutex_trylock(®istration->mutex) == 0) | 1051 | if (mutex_trylock(®istration->mutex) == 0) |
1052 | return xpcRetry; | 1052 | return xpRetry; |
1053 | 1053 | ||
1054 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { | 1054 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { |
1055 | mutex_unlock(®istration->mutex); | 1055 | mutex_unlock(®istration->mutex); |
1056 | return xpcUnregistered; | 1056 | return xpUnregistered; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | spin_lock_irqsave(&ch->lock, irq_flags); | 1059 | spin_lock_irqsave(&ch->lock, irq_flags); |
@@ -1095,10 +1095,10 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1095 | * the channel lock as needed. | 1095 | * the channel lock as needed. |
1096 | */ | 1096 | */ |
1097 | mutex_unlock(®istration->mutex); | 1097 | mutex_unlock(®istration->mutex); |
1098 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, | 1098 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, |
1099 | &irq_flags); | 1099 | &irq_flags); |
1100 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1100 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
1101 | return xpcUnequalMsgSizes; | 1101 | return xpUnequalMsgSizes; |
1102 | } | 1102 | } |
1103 | } else { | 1103 | } else { |
1104 | ch->msg_size = registration->msg_size; | 1104 | ch->msg_size = registration->msg_size; |
@@ -1120,7 +1120,7 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1120 | 1120 | ||
1121 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1121 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
1122 | 1122 | ||
1123 | return xpcSuccess; | 1123 | return xpSuccess; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | /* | 1126 | /* |
@@ -1203,7 +1203,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | |||
1203 | * Notify senders that messages sent have been | 1203 | * Notify senders that messages sent have been |
1204 | * received and delivered by the other side. | 1204 | * received and delivered by the other side. |
1205 | */ | 1205 | */ |
1206 | xpc_notify_senders(ch, xpcMsgDelivered, | 1206 | xpc_notify_senders(ch, xpMsgDelivered, |
1207 | ch->remote_GP.get); | 1207 | ch->remote_GP.get); |
1208 | } | 1208 | } |
1209 | 1209 | ||
@@ -1335,7 +1335,7 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1335 | * at the same time. | 1335 | * at the same time. |
1336 | */ | 1336 | */ |
1337 | void | 1337 | void |
1338 | xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) | 1338 | xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) |
1339 | { | 1339 | { |
1340 | unsigned long irq_flags; | 1340 | unsigned long irq_flags; |
1341 | int ch_number; | 1341 | int ch_number; |
@@ -1375,7 +1375,7 @@ xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) | |||
1375 | void | 1375 | void |
1376 | xpc_teardown_infrastructure(struct xpc_partition *part) | 1376 | xpc_teardown_infrastructure(struct xpc_partition *part) |
1377 | { | 1377 | { |
1378 | partid_t partid = XPC_PARTID(part); | 1378 | short partid = XPC_PARTID(part); |
1379 | 1379 | ||
1380 | /* | 1380 | /* |
1381 | * We start off by making this partition inaccessible to local | 1381 | * We start off by making this partition inaccessible to local |
@@ -1428,7 +1428,7 @@ xpc_teardown_infrastructure(struct xpc_partition *part) | |||
1428 | void | 1428 | void |
1429 | xpc_initiate_connect(int ch_number) | 1429 | xpc_initiate_connect(int ch_number) |
1430 | { | 1430 | { |
1431 | partid_t partid; | 1431 | short partid; |
1432 | struct xpc_partition *part; | 1432 | struct xpc_partition *part; |
1433 | struct xpc_channel *ch; | 1433 | struct xpc_channel *ch; |
1434 | 1434 | ||
@@ -1456,13 +1456,13 @@ xpc_connected_callout(struct xpc_channel *ch) | |||
1456 | /* let the registerer know that a connection has been established */ | 1456 | /* let the registerer know that a connection has been established */ |
1457 | 1457 | ||
1458 | if (ch->func != NULL) { | 1458 | if (ch->func != NULL) { |
1459 | dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " | 1459 | dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " |
1460 | "partid=%d, channel=%d\n", ch->partid, ch->number); | 1460 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
1461 | 1461 | ||
1462 | ch->func(xpcConnected, ch->partid, ch->number, | 1462 | ch->func(xpConnected, ch->partid, ch->number, |
1463 | (void *)(u64)ch->local_nentries, ch->key); | 1463 | (void *)(u64)ch->local_nentries, ch->key); |
1464 | 1464 | ||
1465 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " | 1465 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " |
1466 | "partid=%d, channel=%d\n", ch->partid, ch->number); | 1466 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
1467 | } | 1467 | } |
1468 | } | 1468 | } |
@@ -1484,7 +1484,7 @@ void | |||
1484 | xpc_initiate_disconnect(int ch_number) | 1484 | xpc_initiate_disconnect(int ch_number) |
1485 | { | 1485 | { |
1486 | unsigned long irq_flags; | 1486 | unsigned long irq_flags; |
1487 | partid_t partid; | 1487 | short partid; |
1488 | struct xpc_partition *part; | 1488 | struct xpc_partition *part; |
1489 | struct xpc_channel *ch; | 1489 | struct xpc_channel *ch; |
1490 | 1490 | ||
@@ -1503,7 +1503,7 @@ xpc_initiate_disconnect(int ch_number) | |||
1503 | if (!(ch->flags & XPC_C_DISCONNECTED)) { | 1503 | if (!(ch->flags & XPC_C_DISCONNECTED)) { |
1504 | ch->flags |= XPC_C_WDISCONNECT; | 1504 | ch->flags |= XPC_C_WDISCONNECT; |
1505 | 1505 | ||
1506 | XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, | 1506 | XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, |
1507 | &irq_flags); | 1507 | &irq_flags); |
1508 | } | 1508 | } |
1509 | 1509 | ||
@@ -1528,7 +1528,7 @@ xpc_initiate_disconnect(int ch_number) | |||
1528 | */ | 1528 | */ |
1529 | void | 1529 | void |
1530 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, | 1530 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, |
1531 | enum xpc_retval reason, unsigned long *irq_flags) | 1531 | enum xp_retval reason, unsigned long *irq_flags) |
1532 | { | 1532 | { |
1533 | u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); | 1533 | u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); |
1534 | 1534 | ||
@@ -1563,7 +1563,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1563 | 1563 | ||
1564 | } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | 1564 | } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && |
1565 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { | 1565 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
1566 | /* start a kthread that will do the xpcDisconnecting callout */ | 1566 | /* start a kthread that will do the xpDisconnecting callout */ |
1567 | xpc_create_kthreads(ch, 1, 1); | 1567 | xpc_create_kthreads(ch, 1, 1); |
1568 | } | 1568 | } |
1569 | 1569 | ||
@@ -1575,7 +1575,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1575 | } | 1575 | } |
1576 | 1576 | ||
1577 | void | 1577 | void |
1578 | xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) | 1578 | xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) |
1579 | { | 1579 | { |
1580 | /* | 1580 | /* |
1581 | * Let the channel's registerer know that the channel is being | 1581 | * Let the channel's registerer know that the channel is being |
@@ -1598,13 +1598,13 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) | |||
1598 | * Wait for a message entry to become available for the specified channel, | 1598 | * Wait for a message entry to become available for the specified channel, |
1599 | * but don't wait any longer than 1 jiffy. | 1599 | * but don't wait any longer than 1 jiffy. |
1600 | */ | 1600 | */ |
1601 | static enum xpc_retval | 1601 | static enum xp_retval |
1602 | xpc_allocate_msg_wait(struct xpc_channel *ch) | 1602 | xpc_allocate_msg_wait(struct xpc_channel *ch) |
1603 | { | 1603 | { |
1604 | enum xpc_retval ret; | 1604 | enum xp_retval ret; |
1605 | 1605 | ||
1606 | if (ch->flags & XPC_C_DISCONNECTING) { | 1606 | if (ch->flags & XPC_C_DISCONNECTING) { |
1607 | DBUG_ON(ch->reason == xpcInterrupted); | 1607 | DBUG_ON(ch->reason == xpInterrupted); |
1608 | return ch->reason; | 1608 | return ch->reason; |
1609 | } | 1609 | } |
1610 | 1610 | ||
@@ -1614,11 +1614,11 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) | |||
1614 | 1614 | ||
1615 | if (ch->flags & XPC_C_DISCONNECTING) { | 1615 | if (ch->flags & XPC_C_DISCONNECTING) { |
1616 | ret = ch->reason; | 1616 | ret = ch->reason; |
1617 | DBUG_ON(ch->reason == xpcInterrupted); | 1617 | DBUG_ON(ch->reason == xpInterrupted); |
1618 | } else if (ret == 0) { | 1618 | } else if (ret == 0) { |
1619 | ret = xpcTimeout; | 1619 | ret = xpTimeout; |
1620 | } else { | 1620 | } else { |
1621 | ret = xpcInterrupted; | 1621 | ret = xpInterrupted; |
1622 | } | 1622 | } |
1623 | 1623 | ||
1624 | return ret; | 1624 | return ret; |
@@ -1628,12 +1628,12 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) | |||
1628 | * Allocate an entry for a message from the message queue associated with the | 1628 | * Allocate an entry for a message from the message queue associated with the |
1629 | * specified channel. | 1629 | * specified channel. |
1630 | */ | 1630 | */ |
1631 | static enum xpc_retval | 1631 | static enum xp_retval |
1632 | xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | 1632 | xpc_allocate_msg(struct xpc_channel *ch, u32 flags, |
1633 | struct xpc_msg **address_of_msg) | 1633 | struct xpc_msg **address_of_msg) |
1634 | { | 1634 | { |
1635 | struct xpc_msg *msg; | 1635 | struct xpc_msg *msg; |
1636 | enum xpc_retval ret; | 1636 | enum xp_retval ret; |
1637 | s64 put; | 1637 | s64 put; |
1638 | 1638 | ||
1639 | /* this reference will be dropped in xpc_send_msg() */ | 1639 | /* this reference will be dropped in xpc_send_msg() */ |
@@ -1645,7 +1645,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1645 | } | 1645 | } |
1646 | if (!(ch->flags & XPC_C_CONNECTED)) { | 1646 | if (!(ch->flags & XPC_C_CONNECTED)) { |
1647 | xpc_msgqueue_deref(ch); | 1647 | xpc_msgqueue_deref(ch); |
1648 | return xpcNotConnected; | 1648 | return xpNotConnected; |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | /* | 1651 | /* |
@@ -1653,7 +1653,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1653 | * If none are available, we'll make sure that we grab the latest | 1653 | * If none are available, we'll make sure that we grab the latest |
1654 | * GP values. | 1654 | * GP values. |
1655 | */ | 1655 | */ |
1656 | ret = xpcTimeout; | 1656 | ret = xpTimeout; |
1657 | 1657 | ||
1658 | while (1) { | 1658 | while (1) { |
1659 | 1659 | ||
@@ -1683,16 +1683,16 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1683 | * that will cause the IPI handler to fetch the latest | 1683 | * that will cause the IPI handler to fetch the latest |
1684 | * GP values as if an IPI was sent by the other side. | 1684 | * GP values as if an IPI was sent by the other side. |
1685 | */ | 1685 | */ |
1686 | if (ret == xpcTimeout) | 1686 | if (ret == xpTimeout) |
1687 | xpc_IPI_send_local_msgrequest(ch); | 1687 | xpc_IPI_send_local_msgrequest(ch); |
1688 | 1688 | ||
1689 | if (flags & XPC_NOWAIT) { | 1689 | if (flags & XPC_NOWAIT) { |
1690 | xpc_msgqueue_deref(ch); | 1690 | xpc_msgqueue_deref(ch); |
1691 | return xpcNoWait; | 1691 | return xpNoWait; |
1692 | } | 1692 | } |
1693 | 1693 | ||
1694 | ret = xpc_allocate_msg_wait(ch); | 1694 | ret = xpc_allocate_msg_wait(ch); |
1695 | if (ret != xpcInterrupted && ret != xpcTimeout) { | 1695 | if (ret != xpInterrupted && ret != xpTimeout) { |
1696 | xpc_msgqueue_deref(ch); | 1696 | xpc_msgqueue_deref(ch); |
1697 | return ret; | 1697 | return ret; |
1698 | } | 1698 | } |
@@ -1711,7 +1711,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1711 | 1711 | ||
1712 | *address_of_msg = msg; | 1712 | *address_of_msg = msg; |
1713 | 1713 | ||
1714 | return xpcSuccess; | 1714 | return xpSuccess; |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | /* | 1717 | /* |
@@ -1727,11 +1727,11 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1727 | * payload - address of the allocated payload area pointer (filled in on | 1727 | * payload - address of the allocated payload area pointer (filled in on |
1728 | * return) in which the user-defined message is constructed. | 1728 | * return) in which the user-defined message is constructed. |
1729 | */ | 1729 | */ |
1730 | enum xpc_retval | 1730 | enum xp_retval |
1731 | xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) | 1731 | xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload) |
1732 | { | 1732 | { |
1733 | struct xpc_partition *part = &xpc_partitions[partid]; | 1733 | struct xpc_partition *part = &xpc_partitions[partid]; |
1734 | enum xpc_retval ret = xpcUnknownReason; | 1734 | enum xp_retval ret = xpUnknownReason; |
1735 | struct xpc_msg *msg = NULL; | 1735 | struct xpc_msg *msg = NULL; |
1736 | 1736 | ||
1737 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 1737 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); |
@@ -1814,11 +1814,11 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) | |||
1814 | * local message queue's Put value and sends an IPI to the partition the | 1814 | * local message queue's Put value and sends an IPI to the partition the |
1815 | * message is being sent to. | 1815 | * message is being sent to. |
1816 | */ | 1816 | */ |
1817 | static enum xpc_retval | 1817 | static enum xp_retval |
1818 | xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | 1818 | xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, |
1819 | xpc_notify_func func, void *key) | 1819 | xpc_notify_func func, void *key) |
1820 | { | 1820 | { |
1821 | enum xpc_retval ret = xpcSuccess; | 1821 | enum xp_retval ret = xpSuccess; |
1822 | struct xpc_notify *notify = notify; | 1822 | struct xpc_notify *notify = notify; |
1823 | s64 put, msg_number = msg->number; | 1823 | s64 put, msg_number = msg->number; |
1824 | 1824 | ||
@@ -1908,12 +1908,12 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | |||
1908 | * payload - pointer to the payload area allocated via | 1908 | * payload - pointer to the payload area allocated via |
1909 | * xpc_initiate_allocate(). | 1909 | * xpc_initiate_allocate(). |
1910 | */ | 1910 | */ |
1911 | enum xpc_retval | 1911 | enum xp_retval |
1912 | xpc_initiate_send(partid_t partid, int ch_number, void *payload) | 1912 | xpc_initiate_send(short partid, int ch_number, void *payload) |
1913 | { | 1913 | { |
1914 | struct xpc_partition *part = &xpc_partitions[partid]; | 1914 | struct xpc_partition *part = &xpc_partitions[partid]; |
1915 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | 1915 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); |
1916 | enum xpc_retval ret; | 1916 | enum xp_retval ret; |
1917 | 1917 | ||
1918 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, | 1918 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, |
1919 | partid, ch_number); | 1919 | partid, ch_number); |
@@ -1957,13 +1957,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload) | |||
1957 | * receipt. THIS FUNCTION MUST BE NON-BLOCKING. | 1957 | * receipt. THIS FUNCTION MUST BE NON-BLOCKING. |
1958 | * key - user-defined key to be passed to the function when it's called. | 1958 | * key - user-defined key to be passed to the function when it's called. |
1959 | */ | 1959 | */ |
1960 | enum xpc_retval | 1960 | enum xp_retval |
1961 | xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, | 1961 | xpc_initiate_send_notify(short partid, int ch_number, void *payload, |
1962 | xpc_notify_func func, void *key) | 1962 | xpc_notify_func func, void *key) |
1963 | { | 1963 | { |
1964 | struct xpc_partition *part = &xpc_partitions[partid]; | 1964 | struct xpc_partition *part = &xpc_partitions[partid]; |
1965 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | 1965 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); |
1966 | enum xpc_retval ret; | 1966 | enum xp_retval ret; |
1967 | 1967 | ||
1968 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, | 1968 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, |
1969 | partid, ch_number); | 1969 | partid, ch_number); |
@@ -1985,7 +1985,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
1985 | struct xpc_msg *remote_msg, *msg; | 1985 | struct xpc_msg *remote_msg, *msg; |
1986 | u32 msg_index, nmsgs; | 1986 | u32 msg_index, nmsgs; |
1987 | u64 msg_offset; | 1987 | u64 msg_offset; |
1988 | enum xpc_retval ret; | 1988 | enum xp_retval ret; |
1989 | 1989 | ||
1990 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { | 1990 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { |
1991 | /* we were interrupted by a signal */ | 1991 | /* we were interrupted by a signal */ |
@@ -2012,7 +2012,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2012 | 2012 | ||
2013 | ret = xpc_pull_remote_cachelines(part, msg, remote_msg, | 2013 | ret = xpc_pull_remote_cachelines(part, msg, remote_msg, |
2014 | nmsgs * ch->msg_size); | 2014 | nmsgs * ch->msg_size); |
2015 | if (ret != xpcSuccess) { | 2015 | if (ret != xpSuccess) { |
2016 | 2016 | ||
2017 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" | 2017 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" |
2018 | " msg %ld from partition %d, channel=%d, " | 2018 | " msg %ld from partition %d, channel=%d, " |
@@ -2112,7 +2112,7 @@ xpc_deliver_msg(struct xpc_channel *ch) | |||
2112 | ch->number); | 2112 | ch->number); |
2113 | 2113 | ||
2114 | /* deliver the message to its intended recipient */ | 2114 | /* deliver the message to its intended recipient */ |
2115 | ch->func(xpcMsgReceived, ch->partid, ch->number, | 2115 | ch->func(xpMsgReceived, ch->partid, ch->number, |
2116 | &msg->payload, ch->key); | 2116 | &msg->payload, ch->key); |
2117 | 2117 | ||
2118 | dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " | 2118 | dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " |
@@ -2203,7 +2203,7 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | |||
2203 | * xpc_initiate_allocate(). | 2203 | * xpc_initiate_allocate(). |
2204 | */ | 2204 | */ |
2205 | void | 2205 | void |
2206 | xpc_initiate_received(partid_t partid, int ch_number, void *payload) | 2206 | xpc_initiate_received(short partid, int ch_number, void *payload) |
2207 | { | 2207 | { |
2208 | struct xpc_partition *part = &xpc_partitions[partid]; | 2208 | struct xpc_partition *part = &xpc_partitions[partid]; |
2209 | struct xpc_channel *ch; | 2209 | struct xpc_channel *ch; |