diff options
author | Dean Nelson <dcn@sgi.com> | 2008-04-22 15:50:17 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2008-04-22 18:08:55 -0400 |
commit | 2c2b94f93f4732c3b9703ce62627e6187e7d6128 (patch) | |
tree | 47fbdee38bc7cf0eec8c7c254a6c1c045ebbdb7e /drivers/misc/sgi-xp/xpc_channel.c | |
parent | 35190506b1a18eda7df24b285fdcd94dec7800ef (diff) |
[IA64] run drivers/misc/sgi-xp through scripts/checkpatch.pl
Addressed issues raised by scripts/checkpatch.pl. Removed unnecessary curly
braces. Eliminated uses of volatiles and use of kernel_thread() and daemonize().
Signed-off-by: Dean Nelson <dcn@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'drivers/misc/sgi-xp/xpc_channel.c')
-rw-r--r-- | drivers/misc/sgi-xp/xpc_channel.c | 192 |
1 files changed, 75 insertions, 117 deletions
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 15cb91a82102..bfcb9ea968e9 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -33,19 +33,19 @@ xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |||
33 | { | 33 | { |
34 | /* see if kzalloc will give us cachline aligned memory by default */ | 34 | /* see if kzalloc will give us cachline aligned memory by default */ |
35 | *base = kzalloc(size, flags); | 35 | *base = kzalloc(size, flags); |
36 | if (*base == NULL) { | 36 | if (*base == NULL) |
37 | return NULL; | 37 | return NULL; |
38 | } | 38 | |
39 | if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) { | 39 | if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) |
40 | return *base; | 40 | return *base; |
41 | } | 41 | |
42 | kfree(*base); | 42 | kfree(*base); |
43 | 43 | ||
44 | /* nope, we'll have to do it ourselves */ | 44 | /* nope, we'll have to do it ourselves */ |
45 | *base = kzalloc(size + L1_CACHE_BYTES, flags); | 45 | *base = kzalloc(size + L1_CACHE_BYTES, flags); |
46 | if (*base == NULL) { | 46 | if (*base == NULL) |
47 | return NULL; | 47 | return NULL; |
48 | } | 48 | |
49 | return (void *)L1_CACHE_ALIGN((u64)*base); | 49 | return (void *)L1_CACHE_ALIGN((u64)*base); |
50 | } | 50 | } |
51 | 51 | ||
@@ -264,15 +264,13 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | |||
264 | DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); | 264 | DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); |
265 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); | 265 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); |
266 | 266 | ||
267 | if (part->act_state == XPC_P_DEACTIVATING) { | 267 | if (part->act_state == XPC_P_DEACTIVATING) |
268 | return part->reason; | 268 | return part->reason; |
269 | } | ||
270 | 269 | ||
271 | bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, | 270 | bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, |
272 | (BTE_NORMAL | BTE_WACQUIRE), NULL); | 271 | (BTE_NORMAL | BTE_WACQUIRE), NULL); |
273 | if (bte_ret == BTE_SUCCESS) { | 272 | if (bte_ret == BTE_SUCCESS) |
274 | return xpcSuccess; | 273 | return xpcSuccess; |
275 | } | ||
276 | 274 | ||
277 | dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", | 275 | dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", |
278 | XPC_PARTID(part), bte_ret); | 276 | XPC_PARTID(part), bte_ret); |
@@ -359,18 +357,16 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
359 | part->remote_IPI_nasid = pulled_entry->IPI_nasid; | 357 | part->remote_IPI_nasid = pulled_entry->IPI_nasid; |
360 | part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; | 358 | part->remote_IPI_phys_cpuid = pulled_entry->IPI_phys_cpuid; |
361 | 359 | ||
362 | if (part->nchannels > pulled_entry->nchannels) { | 360 | if (part->nchannels > pulled_entry->nchannels) |
363 | part->nchannels = pulled_entry->nchannels; | 361 | part->nchannels = pulled_entry->nchannels; |
364 | } | ||
365 | 362 | ||
366 | /* let the other side know that we've pulled their variables */ | 363 | /* let the other side know that we've pulled their variables */ |
367 | 364 | ||
368 | xpc_vars_part[partid].magic = XPC_VP_MAGIC2; | 365 | xpc_vars_part[partid].magic = XPC_VP_MAGIC2; |
369 | } | 366 | } |
370 | 367 | ||
371 | if (pulled_entry->magic == XPC_VP_MAGIC1) { | 368 | if (pulled_entry->magic == XPC_VP_MAGIC1) |
372 | return xpcRetry; | 369 | return xpcRetry; |
373 | } | ||
374 | 370 | ||
375 | return xpcSuccess; | 371 | return xpcSuccess; |
376 | } | 372 | } |
@@ -390,9 +386,10 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
390 | */ | 386 | */ |
391 | 387 | ||
392 | spin_lock_irqsave(&part->IPI_lock, irq_flags); | 388 | spin_lock_irqsave(&part->IPI_lock, irq_flags); |
393 | if ((IPI_amo = part->local_IPI_amo) != 0) { | 389 | IPI_amo = part->local_IPI_amo; |
390 | if (IPI_amo != 0) | ||
394 | part->local_IPI_amo = 0; | 391 | part->local_IPI_amo = 0; |
395 | } | 392 | |
396 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); | 393 | spin_unlock_irqrestore(&part->IPI_lock, irq_flags); |
397 | 394 | ||
398 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { | 395 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_amo)) { |
@@ -441,20 +438,14 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch) | |||
441 | int nentries; | 438 | int nentries; |
442 | size_t nbytes; | 439 | size_t nbytes; |
443 | 440 | ||
444 | // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between | ||
445 | // >>> iterations of the for-loop, bail if set? | ||
446 | |||
447 | // >>> should we impose a minimum #of entries? like 4 or 8? | ||
448 | for (nentries = ch->local_nentries; nentries > 0; nentries--) { | 441 | for (nentries = ch->local_nentries; nentries > 0; nentries--) { |
449 | 442 | ||
450 | nbytes = nentries * ch->msg_size; | 443 | nbytes = nentries * ch->msg_size; |
451 | ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, | 444 | ch->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, |
452 | GFP_KERNEL, | 445 | GFP_KERNEL, |
453 | &ch-> | 446 | &ch->local_msgqueue_base); |
454 | local_msgqueue_base); | 447 | if (ch->local_msgqueue == NULL) |
455 | if (ch->local_msgqueue == NULL) { | ||
456 | continue; | 448 | continue; |
457 | } | ||
458 | 449 | ||
459 | nbytes = nentries * sizeof(struct xpc_notify); | 450 | nbytes = nentries * sizeof(struct xpc_notify); |
460 | ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); | 451 | ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); |
@@ -493,20 +484,14 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | |||
493 | 484 | ||
494 | DBUG_ON(ch->remote_nentries <= 0); | 485 | DBUG_ON(ch->remote_nentries <= 0); |
495 | 486 | ||
496 | // >>> may want to check for ch->flags & XPC_C_DISCONNECTING between | ||
497 | // >>> iterations of the for-loop, bail if set? | ||
498 | |||
499 | // >>> should we impose a minimum #of entries? like 4 or 8? | ||
500 | for (nentries = ch->remote_nentries; nentries > 0; nentries--) { | 487 | for (nentries = ch->remote_nentries; nentries > 0; nentries--) { |
501 | 488 | ||
502 | nbytes = nentries * ch->msg_size; | 489 | nbytes = nentries * ch->msg_size; |
503 | ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, | 490 | ch->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, |
504 | GFP_KERNEL, | 491 | GFP_KERNEL, |
505 | &ch-> | 492 | &ch->remote_msgqueue_base); |
506 | remote_msgqueue_base); | 493 | if (ch->remote_msgqueue == NULL) |
507 | if (ch->remote_msgqueue == NULL) { | ||
508 | continue; | 494 | continue; |
509 | } | ||
510 | 495 | ||
511 | spin_lock_irqsave(&ch->lock, irq_flags); | 496 | spin_lock_irqsave(&ch->lock, irq_flags); |
512 | if (nentries < ch->remote_nentries) { | 497 | if (nentries < ch->remote_nentries) { |
@@ -538,11 +523,12 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) | |||
538 | 523 | ||
539 | DBUG_ON(ch->flags & XPC_C_SETUP); | 524 | DBUG_ON(ch->flags & XPC_C_SETUP); |
540 | 525 | ||
541 | if ((ret = xpc_allocate_local_msgqueue(ch)) != xpcSuccess) { | 526 | ret = xpc_allocate_local_msgqueue(ch); |
527 | if (ret != xpcSuccess) | ||
542 | return ret; | 528 | return ret; |
543 | } | ||
544 | 529 | ||
545 | if ((ret = xpc_allocate_remote_msgqueue(ch)) != xpcSuccess) { | 530 | ret = xpc_allocate_remote_msgqueue(ch); |
531 | if (ret != xpcSuccess) { | ||
546 | kfree(ch->local_msgqueue_base); | 532 | kfree(ch->local_msgqueue_base); |
547 | ch->local_msgqueue = NULL; | 533 | ch->local_msgqueue = NULL; |
548 | kfree(ch->notify_queue); | 534 | kfree(ch->notify_queue); |
@@ -582,12 +568,11 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
582 | ret = xpc_allocate_msgqueues(ch); | 568 | ret = xpc_allocate_msgqueues(ch); |
583 | spin_lock_irqsave(&ch->lock, *irq_flags); | 569 | spin_lock_irqsave(&ch->lock, *irq_flags); |
584 | 570 | ||
585 | if (ret != xpcSuccess) { | 571 | if (ret != xpcSuccess) |
586 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); | 572 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); |
587 | } | 573 | |
588 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) { | 574 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) |
589 | return; | 575 | return; |
590 | } | ||
591 | 576 | ||
592 | DBUG_ON(!(ch->flags & XPC_C_SETUP)); | 577 | DBUG_ON(!(ch->flags & XPC_C_SETUP)); |
593 | DBUG_ON(ch->local_msgqueue == NULL); | 578 | DBUG_ON(ch->local_msgqueue == NULL); |
@@ -599,9 +584,8 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
599 | xpc_IPI_send_openreply(ch, irq_flags); | 584 | xpc_IPI_send_openreply(ch, irq_flags); |
600 | } | 585 | } |
601 | 586 | ||
602 | if (!(ch->flags & XPC_C_ROPENREPLY)) { | 587 | if (!(ch->flags & XPC_C_ROPENREPLY)) |
603 | return; | 588 | return; |
604 | } | ||
605 | 589 | ||
606 | DBUG_ON(ch->remote_msgqueue_pa == 0); | 590 | DBUG_ON(ch->remote_msgqueue_pa == 0); |
607 | 591 | ||
@@ -719,9 +703,8 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
719 | 703 | ||
720 | DBUG_ON(!spin_is_locked(&ch->lock)); | 704 | DBUG_ON(!spin_is_locked(&ch->lock)); |
721 | 705 | ||
722 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | 706 | if (!(ch->flags & XPC_C_DISCONNECTING)) |
723 | return; | 707 | return; |
724 | } | ||
725 | 708 | ||
726 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); | 709 | DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); |
727 | 710 | ||
@@ -736,26 +719,23 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
736 | 719 | ||
737 | if (part->act_state == XPC_P_DEACTIVATING) { | 720 | if (part->act_state == XPC_P_DEACTIVATING) { |
738 | /* can't proceed until the other side disengages from us */ | 721 | /* can't proceed until the other side disengages from us */ |
739 | if (xpc_partition_engaged(1UL << ch->partid)) { | 722 | if (xpc_partition_engaged(1UL << ch->partid)) |
740 | return; | 723 | return; |
741 | } | ||
742 | 724 | ||
743 | } else { | 725 | } else { |
744 | 726 | ||
745 | /* as long as the other side is up do the full protocol */ | 727 | /* as long as the other side is up do the full protocol */ |
746 | 728 | ||
747 | if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { | 729 | if (!(ch->flags & XPC_C_RCLOSEREQUEST)) |
748 | return; | 730 | return; |
749 | } | ||
750 | 731 | ||
751 | if (!(ch->flags & XPC_C_CLOSEREPLY)) { | 732 | if (!(ch->flags & XPC_C_CLOSEREPLY)) { |
752 | ch->flags |= XPC_C_CLOSEREPLY; | 733 | ch->flags |= XPC_C_CLOSEREPLY; |
753 | xpc_IPI_send_closereply(ch, irq_flags); | 734 | xpc_IPI_send_closereply(ch, irq_flags); |
754 | } | 735 | } |
755 | 736 | ||
756 | if (!(ch->flags & XPC_C_RCLOSEREPLY)) { | 737 | if (!(ch->flags & XPC_C_RCLOSEREPLY)) |
757 | return; | 738 | return; |
758 | } | ||
759 | } | 739 | } |
760 | 740 | ||
761 | /* wake those waiting for notify completion */ | 741 | /* wake those waiting for notify completion */ |
@@ -815,9 +795,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
815 | 795 | ||
816 | spin_lock_irqsave(&ch->lock, irq_flags); | 796 | spin_lock_irqsave(&ch->lock, irq_flags); |
817 | 797 | ||
818 | again: | 798 | again: |
819 | 799 | ||
820 | if ((ch->flags & XPC_C_DISCONNECTED) && (ch->flags & XPC_C_WDISCONNECT)) { | 800 | if ((ch->flags & XPC_C_DISCONNECTED) && |
801 | (ch->flags & XPC_C_WDISCONNECT)) { | ||
821 | /* | 802 | /* |
822 | * Delay processing IPI flags until thread waiting disconnect | 803 | * Delay processing IPI flags until thread waiting disconnect |
823 | * has had a chance to see that the channel is disconnected. | 804 | * has had a chance to see that the channel is disconnected. |
@@ -890,11 +871,10 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
890 | 871 | ||
891 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | 872 | if (!(ch->flags & XPC_C_DISCONNECTING)) { |
892 | reason = args->reason; | 873 | reason = args->reason; |
893 | if (reason <= xpcSuccess || reason > xpcUnknownReason) { | 874 | if (reason <= xpcSuccess || reason > xpcUnknownReason) |
894 | reason = xpcUnknownReason; | 875 | reason = xpcUnknownReason; |
895 | } else if (reason == xpcUnregistering) { | 876 | else if (reason == xpcUnregistering) |
896 | reason = xpcOtherUnregistering; | 877 | reason = xpcOtherUnregistering; |
897 | } | ||
898 | 878 | ||
899 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | 879 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); |
900 | 880 | ||
@@ -1068,9 +1048,8 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1068 | unsigned long irq_flags; | 1048 | unsigned long irq_flags; |
1069 | struct xpc_registration *registration = &xpc_registrations[ch->number]; | 1049 | struct xpc_registration *registration = &xpc_registrations[ch->number]; |
1070 | 1050 | ||
1071 | if (mutex_trylock(®istration->mutex) == 0) { | 1051 | if (mutex_trylock(®istration->mutex) == 0) |
1072 | return xpcRetry; | 1052 | return xpcRetry; |
1073 | } | ||
1074 | 1053 | ||
1075 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { | 1054 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { |
1076 | mutex_unlock(®istration->mutex); | 1055 | mutex_unlock(®istration->mutex); |
@@ -1159,7 +1138,7 @@ xpc_clear_local_msgqueue_flags(struct xpc_channel *ch) | |||
1159 | (get % ch->local_nentries) * | 1138 | (get % ch->local_nentries) * |
1160 | ch->msg_size); | 1139 | ch->msg_size); |
1161 | msg->flags = 0; | 1140 | msg->flags = 0; |
1162 | } while (++get < (volatile s64)ch->remote_GP.get); | 1141 | } while (++get < ch->remote_GP.get); |
1163 | } | 1142 | } |
1164 | 1143 | ||
1165 | /* | 1144 | /* |
@@ -1177,7 +1156,7 @@ xpc_clear_remote_msgqueue_flags(struct xpc_channel *ch) | |||
1177 | (put % ch->remote_nentries) * | 1156 | (put % ch->remote_nentries) * |
1178 | ch->msg_size); | 1157 | ch->msg_size); |
1179 | msg->flags = 0; | 1158 | msg->flags = 0; |
1180 | } while (++put < (volatile s64)ch->remote_GP.put); | 1159 | } while (++put < ch->remote_GP.put); |
1181 | } | 1160 | } |
1182 | 1161 | ||
1183 | static void | 1162 | static void |
@@ -1244,9 +1223,8 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | |||
1244 | * If anyone was waiting for message queue entries to become | 1223 | * If anyone was waiting for message queue entries to become |
1245 | * available, wake them up. | 1224 | * available, wake them up. |
1246 | */ | 1225 | */ |
1247 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { | 1226 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) |
1248 | wake_up(&ch->msg_allocate_wq); | 1227 | wake_up(&ch->msg_allocate_wq); |
1249 | } | ||
1250 | } | 1228 | } |
1251 | 1229 | ||
1252 | /* | 1230 | /* |
@@ -1273,9 +1251,8 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | |||
1273 | "delivered=%d, partid=%d, channel=%d\n", | 1251 | "delivered=%d, partid=%d, channel=%d\n", |
1274 | nmsgs_sent, ch->partid, ch->number); | 1252 | nmsgs_sent, ch->partid, ch->number); |
1275 | 1253 | ||
1276 | if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { | 1254 | if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) |
1277 | xpc_activate_kthreads(ch, nmsgs_sent); | 1255 | xpc_activate_kthreads(ch, nmsgs_sent); |
1278 | } | ||
1279 | } | 1256 | } |
1280 | } | 1257 | } |
1281 | 1258 | ||
@@ -1310,9 +1287,8 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1310 | 1287 | ||
1311 | IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); | 1288 | IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number); |
1312 | 1289 | ||
1313 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) { | 1290 | if (XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(IPI_flags)) |
1314 | xpc_process_openclose_IPI(part, ch_number, IPI_flags); | 1291 | xpc_process_openclose_IPI(part, ch_number, IPI_flags); |
1315 | } | ||
1316 | 1292 | ||
1317 | ch_flags = ch->flags; /* need an atomic snapshot of flags */ | 1293 | ch_flags = ch->flags; /* need an atomic snapshot of flags */ |
1318 | 1294 | ||
@@ -1323,9 +1299,8 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1323 | continue; | 1299 | continue; |
1324 | } | 1300 | } |
1325 | 1301 | ||
1326 | if (part->act_state == XPC_P_DEACTIVATING) { | 1302 | if (part->act_state == XPC_P_DEACTIVATING) |
1327 | continue; | 1303 | continue; |
1328 | } | ||
1329 | 1304 | ||
1330 | if (!(ch_flags & XPC_C_CONNECTED)) { | 1305 | if (!(ch_flags & XPC_C_CONNECTED)) { |
1331 | if (!(ch_flags & XPC_C_OPENREQUEST)) { | 1306 | if (!(ch_flags & XPC_C_OPENREQUEST)) { |
@@ -1345,9 +1320,8 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1345 | * from the other partition. | 1320 | * from the other partition. |
1346 | */ | 1321 | */ |
1347 | 1322 | ||
1348 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) { | 1323 | if (XPC_ANY_MSG_IPI_FLAGS_SET(IPI_flags)) |
1349 | xpc_process_msg_IPI(part, ch_number); | 1324 | xpc_process_msg_IPI(part, ch_number); |
1350 | } | ||
1351 | } | 1325 | } |
1352 | } | 1326 | } |
1353 | 1327 | ||
@@ -1560,9 +1534,9 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1560 | 1534 | ||
1561 | DBUG_ON(!spin_is_locked(&ch->lock)); | 1535 | DBUG_ON(!spin_is_locked(&ch->lock)); |
1562 | 1536 | ||
1563 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { | 1537 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) |
1564 | return; | 1538 | return; |
1565 | } | 1539 | |
1566 | DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); | 1540 | DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); |
1567 | 1541 | ||
1568 | dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", | 1542 | dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n", |
@@ -1578,9 +1552,8 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1578 | 1552 | ||
1579 | xpc_IPI_send_closerequest(ch, irq_flags); | 1553 | xpc_IPI_send_closerequest(ch, irq_flags); |
1580 | 1554 | ||
1581 | if (channel_was_connected) { | 1555 | if (channel_was_connected) |
1582 | ch->flags |= XPC_C_WASCONNECTED; | 1556 | ch->flags |= XPC_C_WASCONNECTED; |
1583 | } | ||
1584 | 1557 | ||
1585 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 1558 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
1586 | 1559 | ||
@@ -1595,9 +1568,8 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1595 | } | 1568 | } |
1596 | 1569 | ||
1597 | /* wake those waiting to allocate an entry from the local msg queue */ | 1570 | /* wake those waiting to allocate an entry from the local msg queue */ |
1598 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) { | 1571 | if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) |
1599 | wake_up(&ch->msg_allocate_wq); | 1572 | wake_up(&ch->msg_allocate_wq); |
1600 | } | ||
1601 | 1573 | ||
1602 | spin_lock_irqsave(&ch->lock, *irq_flags); | 1574 | spin_lock_irqsave(&ch->lock, *irq_flags); |
1603 | } | 1575 | } |
@@ -1632,7 +1604,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) | |||
1632 | enum xpc_retval ret; | 1604 | enum xpc_retval ret; |
1633 | 1605 | ||
1634 | if (ch->flags & XPC_C_DISCONNECTING) { | 1606 | if (ch->flags & XPC_C_DISCONNECTING) { |
1635 | DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? | 1607 | DBUG_ON(ch->reason == xpcInterrupted); |
1636 | return ch->reason; | 1608 | return ch->reason; |
1637 | } | 1609 | } |
1638 | 1610 | ||
@@ -1642,7 +1614,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) | |||
1642 | 1614 | ||
1643 | if (ch->flags & XPC_C_DISCONNECTING) { | 1615 | if (ch->flags & XPC_C_DISCONNECTING) { |
1644 | ret = ch->reason; | 1616 | ret = ch->reason; |
1645 | DBUG_ON(ch->reason == xpcInterrupted); // >>> Is this true? | 1617 | DBUG_ON(ch->reason == xpcInterrupted); |
1646 | } else if (ret == 0) { | 1618 | } else if (ret == 0) { |
1647 | ret = xpcTimeout; | 1619 | ret = xpcTimeout; |
1648 | } else { | 1620 | } else { |
@@ -1685,9 +1657,9 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1685 | 1657 | ||
1686 | while (1) { | 1658 | while (1) { |
1687 | 1659 | ||
1688 | put = (volatile s64)ch->w_local_GP.put; | 1660 | put = ch->w_local_GP.put; |
1689 | if (put - (volatile s64)ch->w_remote_GP.get < | 1661 | rmb(); /* guarantee that .put loads before .get */ |
1690 | ch->local_nentries) { | 1662 | if (put - ch->w_remote_GP.get < ch->local_nentries) { |
1691 | 1663 | ||
1692 | /* There are available message entries. We need to try | 1664 | /* There are available message entries. We need to try |
1693 | * to secure one for ourselves. We'll do this by trying | 1665 | * to secure one for ourselves. We'll do this by trying |
@@ -1711,9 +1683,8 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1711 | * that will cause the IPI handler to fetch the latest | 1683 | * that will cause the IPI handler to fetch the latest |
1712 | * GP values as if an IPI was sent by the other side. | 1684 | * GP values as if an IPI was sent by the other side. |
1713 | */ | 1685 | */ |
1714 | if (ret == xpcTimeout) { | 1686 | if (ret == xpcTimeout) |
1715 | xpc_IPI_send_local_msgrequest(ch); | 1687 | xpc_IPI_send_local_msgrequest(ch); |
1716 | } | ||
1717 | 1688 | ||
1718 | if (flags & XPC_NOWAIT) { | 1689 | if (flags & XPC_NOWAIT) { |
1719 | xpc_msgqueue_deref(ch); | 1690 | xpc_msgqueue_deref(ch); |
@@ -1772,9 +1743,8 @@ xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) | |||
1772 | ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); | 1743 | ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); |
1773 | xpc_part_deref(part); | 1744 | xpc_part_deref(part); |
1774 | 1745 | ||
1775 | if (msg != NULL) { | 1746 | if (msg != NULL) |
1776 | *payload = &msg->payload; | 1747 | *payload = &msg->payload; |
1777 | } | ||
1778 | } | 1748 | } |
1779 | 1749 | ||
1780 | return ret; | 1750 | return ret; |
@@ -1795,17 +1765,15 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) | |||
1795 | while (1) { | 1765 | while (1) { |
1796 | 1766 | ||
1797 | while (1) { | 1767 | while (1) { |
1798 | if (put == (volatile s64)ch->w_local_GP.put) { | 1768 | if (put == ch->w_local_GP.put) |
1799 | break; | 1769 | break; |
1800 | } | ||
1801 | 1770 | ||
1802 | msg = (struct xpc_msg *)((u64)ch->local_msgqueue + | 1771 | msg = (struct xpc_msg *)((u64)ch->local_msgqueue + |
1803 | (put % ch->local_nentries) * | 1772 | (put % ch->local_nentries) * |
1804 | ch->msg_size); | 1773 | ch->msg_size); |
1805 | 1774 | ||
1806 | if (!(msg->flags & XPC_M_READY)) { | 1775 | if (!(msg->flags & XPC_M_READY)) |
1807 | break; | 1776 | break; |
1808 | } | ||
1809 | 1777 | ||
1810 | put++; | 1778 | put++; |
1811 | } | 1779 | } |
@@ -1818,7 +1786,7 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) | |||
1818 | if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != | 1786 | if (cmpxchg_rel(&ch->local_GP->put, initial_put, put) != |
1819 | initial_put) { | 1787 | initial_put) { |
1820 | /* someone else beat us to it */ | 1788 | /* someone else beat us to it */ |
1821 | DBUG_ON((volatile s64)ch->local_GP->put < initial_put); | 1789 | DBUG_ON(ch->local_GP->put < initial_put); |
1822 | break; | 1790 | break; |
1823 | } | 1791 | } |
1824 | 1792 | ||
@@ -1837,9 +1805,8 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) | |||
1837 | initial_put = put; | 1805 | initial_put = put; |
1838 | } | 1806 | } |
1839 | 1807 | ||
1840 | if (send_IPI) { | 1808 | if (send_IPI) |
1841 | xpc_IPI_send_msgrequest(ch); | 1809 | xpc_IPI_send_msgrequest(ch); |
1842 | } | ||
1843 | } | 1810 | } |
1844 | 1811 | ||
1845 | /* | 1812 | /* |
@@ -1880,7 +1847,7 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | |||
1880 | notify->key = key; | 1847 | notify->key = key; |
1881 | notify->type = notify_type; | 1848 | notify->type = notify_type; |
1882 | 1849 | ||
1883 | // >>> is a mb() needed here? | 1850 | /* >>> is a mb() needed here? */ |
1884 | 1851 | ||
1885 | if (ch->flags & XPC_C_DISCONNECTING) { | 1852 | if (ch->flags & XPC_C_DISCONNECTING) { |
1886 | /* | 1853 | /* |
@@ -1913,9 +1880,8 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | |||
1913 | /* see if the message is next in line to be sent, if so send it */ | 1880 | /* see if the message is next in line to be sent, if so send it */ |
1914 | 1881 | ||
1915 | put = ch->local_GP->put; | 1882 | put = ch->local_GP->put; |
1916 | if (put == msg_number) { | 1883 | if (put == msg_number) |
1917 | xpc_send_msgs(ch, put); | 1884 | xpc_send_msgs(ch, put); |
1918 | } | ||
1919 | 1885 | ||
1920 | /* drop the reference grabbed in xpc_allocate_msg() */ | 1886 | /* drop the reference grabbed in xpc_allocate_msg() */ |
1921 | xpc_msgqueue_deref(ch); | 1887 | xpc_msgqueue_deref(ch); |
@@ -2032,10 +1998,8 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2032 | 1998 | ||
2033 | msg_index = ch->next_msg_to_pull % ch->remote_nentries; | 1999 | msg_index = ch->next_msg_to_pull % ch->remote_nentries; |
2034 | 2000 | ||
2035 | DBUG_ON(ch->next_msg_to_pull >= | 2001 | DBUG_ON(ch->next_msg_to_pull >= ch->w_remote_GP.put); |
2036 | (volatile s64)ch->w_remote_GP.put); | 2002 | nmsgs = ch->w_remote_GP.put - ch->next_msg_to_pull; |
2037 | nmsgs = (volatile s64)ch->w_remote_GP.put - | ||
2038 | ch->next_msg_to_pull; | ||
2039 | if (msg_index + nmsgs > ch->remote_nentries) { | 2003 | if (msg_index + nmsgs > ch->remote_nentries) { |
2040 | /* ignore the ones that wrap the msg queue for now */ | 2004 | /* ignore the ones that wrap the msg queue for now */ |
2041 | nmsgs = ch->remote_nentries - msg_index; | 2005 | nmsgs = ch->remote_nentries - msg_index; |
@@ -2046,9 +2010,9 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2046 | remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + | 2010 | remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + |
2047 | msg_offset); | 2011 | msg_offset); |
2048 | 2012 | ||
2049 | if ((ret = xpc_pull_remote_cachelines(part, msg, remote_msg, | 2013 | ret = xpc_pull_remote_cachelines(part, msg, remote_msg, |
2050 | nmsgs * ch->msg_size)) != | 2014 | nmsgs * ch->msg_size); |
2051 | xpcSuccess) { | 2015 | if (ret != xpcSuccess) { |
2052 | 2016 | ||
2053 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" | 2017 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" |
2054 | " msg %ld from partition %d, channel=%d, " | 2018 | " msg %ld from partition %d, channel=%d, " |
@@ -2061,8 +2025,6 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2061 | return NULL; | 2025 | return NULL; |
2062 | } | 2026 | } |
2063 | 2027 | ||
2064 | mb(); /* >>> this may not be needed, we're not sure */ | ||
2065 | |||
2066 | ch->next_msg_to_pull += nmsgs; | 2028 | ch->next_msg_to_pull += nmsgs; |
2067 | } | 2029 | } |
2068 | 2030 | ||
@@ -2085,14 +2047,13 @@ xpc_get_deliverable_msg(struct xpc_channel *ch) | |||
2085 | s64 get; | 2047 | s64 get; |
2086 | 2048 | ||
2087 | do { | 2049 | do { |
2088 | if ((volatile u32)ch->flags & XPC_C_DISCONNECTING) { | 2050 | if (ch->flags & XPC_C_DISCONNECTING) |
2089 | break; | 2051 | break; |
2090 | } | ||
2091 | 2052 | ||
2092 | get = (volatile s64)ch->w_local_GP.get; | 2053 | get = ch->w_local_GP.get; |
2093 | if (get == (volatile s64)ch->w_remote_GP.put) { | 2054 | rmb(); /* guarantee that .get loads before .put */ |
2055 | if (get == ch->w_remote_GP.put) | ||
2094 | break; | 2056 | break; |
2095 | } | ||
2096 | 2057 | ||
2097 | /* There are messages waiting to be pulled and delivered. | 2058 | /* There are messages waiting to be pulled and delivered. |
2098 | * We need to try to secure one for ourselves. We'll do this | 2059 | * We need to try to secure one for ourselves. We'll do this |
@@ -2132,7 +2093,8 @@ xpc_deliver_msg(struct xpc_channel *ch) | |||
2132 | { | 2093 | { |
2133 | struct xpc_msg *msg; | 2094 | struct xpc_msg *msg; |
2134 | 2095 | ||
2135 | if ((msg = xpc_get_deliverable_msg(ch)) != NULL) { | 2096 | msg = xpc_get_deliverable_msg(ch); |
2097 | if (msg != NULL) { | ||
2136 | 2098 | ||
2137 | /* | 2099 | /* |
2138 | * This ref is taken to protect the payload itself from being | 2100 | * This ref is taken to protect the payload itself from being |
@@ -2178,17 +2140,15 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | |||
2178 | while (1) { | 2140 | while (1) { |
2179 | 2141 | ||
2180 | while (1) { | 2142 | while (1) { |
2181 | if (get == (volatile s64)ch->w_local_GP.get) { | 2143 | if (get == ch->w_local_GP.get) |
2182 | break; | 2144 | break; |
2183 | } | ||
2184 | 2145 | ||
2185 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + | 2146 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + |
2186 | (get % ch->remote_nentries) * | 2147 | (get % ch->remote_nentries) * |
2187 | ch->msg_size); | 2148 | ch->msg_size); |
2188 | 2149 | ||
2189 | if (!(msg->flags & XPC_M_DONE)) { | 2150 | if (!(msg->flags & XPC_M_DONE)) |
2190 | break; | 2151 | break; |
2191 | } | ||
2192 | 2152 | ||
2193 | msg_flags |= msg->flags; | 2153 | msg_flags |= msg->flags; |
2194 | get++; | 2154 | get++; |
@@ -2202,7 +2162,7 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | |||
2202 | if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != | 2162 | if (cmpxchg_rel(&ch->local_GP->get, initial_get, get) != |
2203 | initial_get) { | 2163 | initial_get) { |
2204 | /* someone else beat us to it */ | 2164 | /* someone else beat us to it */ |
2205 | DBUG_ON((volatile s64)ch->local_GP->get <= initial_get); | 2165 | DBUG_ON(ch->local_GP->get <= initial_get); |
2206 | break; | 2166 | break; |
2207 | } | 2167 | } |
2208 | 2168 | ||
@@ -2221,9 +2181,8 @@ xpc_acknowledge_msgs(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) | |||
2221 | initial_get = get; | 2181 | initial_get = get; |
2222 | } | 2182 | } |
2223 | 2183 | ||
2224 | if (send_IPI) { | 2184 | if (send_IPI) |
2225 | xpc_IPI_send_msgrequest(ch); | 2185 | xpc_IPI_send_msgrequest(ch); |
2226 | } | ||
2227 | } | 2186 | } |
2228 | 2187 | ||
2229 | /* | 2188 | /* |
@@ -2276,9 +2235,8 @@ xpc_initiate_received(partid_t partid, int ch_number, void *payload) | |||
2276 | * been delivered. | 2235 | * been delivered. |
2277 | */ | 2236 | */ |
2278 | get = ch->local_GP->get; | 2237 | get = ch->local_GP->get; |
2279 | if (get == msg_number) { | 2238 | if (get == msg_number) |
2280 | xpc_acknowledge_msgs(ch, get, msg->flags); | 2239 | xpc_acknowledge_msgs(ch, get, msg->flags); |
2281 | } | ||
2282 | 2240 | ||
2283 | /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ | 2241 | /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ |
2284 | xpc_msgqueue_deref(ch); | 2242 | xpc_msgqueue_deref(ch); |