diff options
author | Cliff Wickman <cpw@sgi.com> | 2010-06-02 17:22:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-08 15:13:48 -0400 |
commit | f6d8a56693426b1f29ff5cafda8be0d65e4e1870 (patch) | |
tree | 3fedbb46a459c147fd8d470c7f5091235677cc03 /arch/x86 | |
parent | 450a007eebaf430426ea8f89bbc3f287949905b2 (diff) |
x86, UV: Modularize BAU send and wait
Streamline the large uv_flush_send_and_wait() function by use of
a couple of helper functions.
And remove some excess comments.
Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004ay-IH@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/uv/uv_bau.h | 1 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 82 |
2 files changed, 44 insertions, 39 deletions
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 7f6ea611cb71..42d412fd8b02 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -75,7 +75,6 @@ | |||
75 | #define DESC_STATUS_DESTINATION_TIMEOUT 2 | 75 | #define DESC_STATUS_DESTINATION_TIMEOUT 2 |
76 | #define DESC_STATUS_SOURCE_TIMEOUT 3 | 76 | #define DESC_STATUS_SOURCE_TIMEOUT 3 |
77 | 77 | ||
78 | #define TIMEOUT_DELAY 10 | ||
79 | /* | 78 | /* |
80 | * delay for 'plugged' timeout retries, in microseconds | 79 | * delay for 'plugged' timeout retries, in microseconds |
81 | */ | 80 | */ |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index a1615058fad3..abf3c31f14cf 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -485,6 +485,47 @@ static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) | |||
485 | } | 485 | } |
486 | 486 | ||
487 | /* | 487 | /* |
488 | * Our retries are blocked by all destination swack resources being | ||
489 | * in use, and a timeout is pending. In that case hardware immediately | ||
490 | * returns the ERROR that looks like a destination timeout. | ||
491 | */ | ||
492 | static void | ||
493 | destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp, | ||
494 | struct bau_control *hmaster, struct ptc_stats *stat) | ||
495 | { | ||
496 | udelay(bcp->plugged_delay); | ||
497 | bcp->plugged_tries++; | ||
498 | if (bcp->plugged_tries >= bcp->plugsb4reset) { | ||
499 | bcp->plugged_tries = 0; | ||
500 | quiesce_local_uvhub(hmaster); | ||
501 | spin_lock(&hmaster->queue_lock); | ||
502 | uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); | ||
503 | spin_unlock(&hmaster->queue_lock); | ||
504 | end_uvhub_quiesce(hmaster); | ||
505 | bcp->ipi_attempts++; | ||
506 | stat->s_resets_plug++; | ||
507 | } | ||
508 | } | ||
509 | |||
510 | static void | ||
511 | destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp, | ||
512 | struct bau_control *hmaster, struct ptc_stats *stat) | ||
513 | { | ||
514 | hmaster->max_bau_concurrent = 1; | ||
515 | bcp->timeout_tries++; | ||
516 | if (bcp->timeout_tries >= bcp->timeoutsb4reset) { | ||
517 | bcp->timeout_tries = 0; | ||
518 | quiesce_local_uvhub(hmaster); | ||
519 | spin_lock(&hmaster->queue_lock); | ||
520 | uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); | ||
521 | spin_unlock(&hmaster->queue_lock); | ||
522 | end_uvhub_quiesce(hmaster); | ||
523 | bcp->ipi_attempts++; | ||
524 | stat->s_resets_timeout++; | ||
525 | } | ||
526 | } | ||
527 | |||
528 | /* | ||
488 | * Completions are taking a very long time due to a congested numalink | 529 | * Completions are taking a very long time due to a congested numalink |
489 | * network. | 530 | * network. |
490 | */ | 531 | */ |
@@ -518,7 +559,7 @@ disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat) | |||
518 | * | 559 | * |
519 | * Send a broadcast and wait for it to complete. | 560 | * Send a broadcast and wait for it to complete. |
520 | * | 561 | * |
521 | * The flush_mask contains the cpus the broadcast is to be sent to, plus | 562 | * The flush_mask contains the cpus the broadcast is to be sent to including |
522 | * cpus that are on the local uvhub. | 563 | * cpus that are on the local uvhub. |
523 | * | 564 | * |
524 | * Returns 0 if all flushing represented in the mask was done. | 565 | * Returns 0 if all flushing represented in the mask was done. |
@@ -553,7 +594,6 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
553 | &hmaster->active_descriptor_count, | 594 | &hmaster->active_descriptor_count, |
554 | hmaster->max_bau_concurrent)); | 595 | hmaster->max_bau_concurrent)); |
555 | } | 596 | } |
556 | |||
557 | while (hmaster->uvhub_quiesce) | 597 | while (hmaster->uvhub_quiesce) |
558 | cpu_relax(); | 598 | cpu_relax(); |
559 | 599 | ||
@@ -584,40 +624,9 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
584 | right_shift, this_cpu, bcp, smaster, try); | 624 | right_shift, this_cpu, bcp, smaster, try); |
585 | 625 | ||
586 | if (completion_status == FLUSH_RETRY_PLUGGED) { | 626 | if (completion_status == FLUSH_RETRY_PLUGGED) { |
587 | /* | 627 | destination_plugged(bau_desc, bcp, hmaster, stat); |
588 | * Our retries may be blocked by all destination swack | ||
589 | * resources being consumed, and a timeout pending. In | ||
590 | * that case hardware immediately returns the ERROR | ||
591 | * that looks like a destination timeout. | ||
592 | */ | ||
593 | udelay(bcp->plugged_delay); | ||
594 | bcp->plugged_tries++; | ||
595 | if (bcp->plugged_tries >= bcp->plugsb4reset) { | ||
596 | bcp->plugged_tries = 0; | ||
597 | quiesce_local_uvhub(hmaster); | ||
598 | spin_lock(&hmaster->queue_lock); | ||
599 | uv_reset_with_ipi(&bau_desc->distribution, | ||
600 | this_cpu); | ||
601 | spin_unlock(&hmaster->queue_lock); | ||
602 | end_uvhub_quiesce(hmaster); | ||
603 | bcp->ipi_attempts++; | ||
604 | stat->s_resets_plug++; | ||
605 | } | ||
606 | } else if (completion_status == FLUSH_RETRY_TIMEOUT) { | 628 | } else if (completion_status == FLUSH_RETRY_TIMEOUT) { |
607 | hmaster->max_bau_concurrent = 1; | 629 | destination_timeout(bau_desc, bcp, hmaster, stat); |
608 | bcp->timeout_tries++; | ||
609 | udelay(TIMEOUT_DELAY); | ||
610 | if (bcp->timeout_tries >= bcp->timeoutsb4reset) { | ||
611 | bcp->timeout_tries = 0; | ||
612 | quiesce_local_uvhub(hmaster); | ||
613 | spin_lock(&hmaster->queue_lock); | ||
614 | uv_reset_with_ipi(&bau_desc->distribution, | ||
615 | this_cpu); | ||
616 | spin_unlock(&hmaster->queue_lock); | ||
617 | end_uvhub_quiesce(hmaster); | ||
618 | bcp->ipi_attempts++; | ||
619 | stat->s_resets_timeout++; | ||
620 | } | ||
621 | } | 630 | } |
622 | if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { | 631 | if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { |
623 | bcp->ipi_attempts = 0; | 632 | bcp->ipi_attempts = 0; |
@@ -628,10 +637,8 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
628 | } while ((completion_status == FLUSH_RETRY_PLUGGED) || | 637 | } while ((completion_status == FLUSH_RETRY_PLUGGED) || |
629 | (completion_status == FLUSH_RETRY_TIMEOUT)); | 638 | (completion_status == FLUSH_RETRY_TIMEOUT)); |
630 | time2 = get_cycles(); | 639 | time2 = get_cycles(); |
631 | |||
632 | bcp->plugged_tries = 0; | 640 | bcp->plugged_tries = 0; |
633 | bcp->timeout_tries = 0; | 641 | bcp->timeout_tries = 0; |
634 | |||
635 | if ((completion_status == FLUSH_COMPLETE) && | 642 | if ((completion_status == FLUSH_COMPLETE) && |
636 | (bcp->conseccompletes > bcp->complete_threshold) && | 643 | (bcp->conseccompletes > bcp->complete_threshold) && |
637 | (hmaster->max_bau_concurrent < | 644 | (hmaster->max_bau_concurrent < |
@@ -740,7 +747,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
740 | 747 | ||
741 | bau_desc = bcp->descriptor_base; | 748 | bau_desc = bcp->descriptor_base; |
742 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; | 749 | bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; |
743 | |||
744 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 750 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
745 | 751 | ||
746 | /* cpu statistics */ | 752 | /* cpu statistics */ |