diff options
| author | Cliff Wickman <cpw@sgi.com> | 2010-06-02 17:22:02 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-06-08 15:13:45 -0400 |
| commit | 712157aa703a01f58c7c17452096ab00b774d0a9 (patch) | |
| tree | 959fe57db03ced19a7e913933a4d96f836fb8014 | |
| parent | 50fb55acc5bbe5ee29d0a65262f4ec286b14d156 (diff) | |
x86, UV: Shorten access to BAU statistics structure
Use a pointer from the per-cpu BAU control structure to the
per-cpu BAU statistics structure.
We nearly always know the first before needing the second.
Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004aB-2k@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/x86/include/asm/uv/uv_bau.h | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/tlb_uv.c | 16 |
2 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 9b3e750ef2d8..6a42d42eb8f9 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
| @@ -332,6 +332,7 @@ struct bau_control { | |||
| 332 | struct bau_payload_queue_entry *bau_msg_head; | 332 | struct bau_payload_queue_entry *bau_msg_head; |
| 333 | struct bau_control *uvhub_master; | 333 | struct bau_control *uvhub_master; |
| 334 | struct bau_control *socket_master; | 334 | struct bau_control *socket_master; |
| 335 | struct ptc_stats *statp; | ||
| 335 | unsigned long timeout_interval; | 336 | unsigned long timeout_interval; |
| 336 | unsigned long set_bau_on_time; | 337 | unsigned long set_bau_on_time; |
| 337 | atomic_t active_descriptor_count; | 338 | atomic_t active_descriptor_count; |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index dc6a68312758..261b9653cde5 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
| @@ -153,7 +153,7 @@ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, | |||
| 153 | struct ptc_stats *stat; | 153 | struct ptc_stats *stat; |
| 154 | 154 | ||
| 155 | msg = mdp->msg; | 155 | msg = mdp->msg; |
| 156 | stat = &per_cpu(ptcstats, bcp->cpu); | 156 | stat = bcp->statp; |
| 157 | stat->d_retries++; | 157 | stat->d_retries++; |
| 158 | /* | 158 | /* |
| 159 | * cancel any message from msg+1 to the retry itself | 159 | * cancel any message from msg+1 to the retry itself |
| @@ -217,7 +217,7 @@ static void uv_bau_process_message(struct msg_desc *mdp, | |||
| 217 | * This must be a normal message, or retry of a normal message | 217 | * This must be a normal message, or retry of a normal message |
| 218 | */ | 218 | */ |
| 219 | msg = mdp->msg; | 219 | msg = mdp->msg; |
| 220 | stat = &per_cpu(ptcstats, bcp->cpu); | 220 | stat = bcp->statp; |
| 221 | if (msg->address == TLB_FLUSH_ALL) { | 221 | if (msg->address == TLB_FLUSH_ALL) { |
| 222 | local_flush_tlb(); | 222 | local_flush_tlb(); |
| 223 | stat->d_alltlb++; | 223 | stat->d_alltlb++; |
| @@ -301,7 +301,7 @@ uv_do_reset(void *ptr) | |||
| 301 | 301 | ||
| 302 | bcp = &per_cpu(bau_control, smp_processor_id()); | 302 | bcp = &per_cpu(bau_control, smp_processor_id()); |
| 303 | rap = (struct reset_args *)ptr; | 303 | rap = (struct reset_args *)ptr; |
| 304 | stat = &per_cpu(ptcstats, bcp->cpu); | 304 | stat = bcp->statp; |
| 305 | stat->d_resets++; | 305 | stat->d_resets++; |
| 306 | 306 | ||
| 307 | /* | 307 | /* |
| @@ -419,7 +419,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
| 419 | unsigned long mask; | 419 | unsigned long mask; |
| 420 | cycles_t ttime; | 420 | cycles_t ttime; |
| 421 | cycles_t timeout_time; | 421 | cycles_t timeout_time; |
| 422 | struct ptc_stats *stat = &per_cpu(ptcstats, this_cpu); | 422 | struct ptc_stats *stat = bcp->statp; |
| 423 | struct bau_control *hmaster; | 423 | struct bau_control *hmaster; |
| 424 | 424 | ||
| 425 | hmaster = bcp->uvhub_master; | 425 | hmaster = bcp->uvhub_master; |
| @@ -583,7 +583,7 @@ const struct cpumask *uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
| 583 | cycles_t time1; | 583 | cycles_t time1; |
| 584 | cycles_t time2; | 584 | cycles_t time2; |
| 585 | cycles_t elapsed; | 585 | cycles_t elapsed; |
| 586 | struct ptc_stats *stat = &per_cpu(ptcstats, bcp->cpu); | 586 | struct ptc_stats *stat = bcp->statp; |
| 587 | struct bau_control *smaster = bcp->socket_master; | 587 | struct bau_control *smaster = bcp->socket_master; |
| 588 | struct bau_control *hmaster = bcp->uvhub_master; | 588 | struct bau_control *hmaster = bcp->uvhub_master; |
| 589 | 589 | ||
| @@ -794,7 +794,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
| 794 | return cpumask; | 794 | return cpumask; |
| 795 | 795 | ||
| 796 | bcp = &per_cpu(bau_control, cpu); | 796 | bcp = &per_cpu(bau_control, cpu); |
| 797 | stat = &per_cpu(ptcstats, cpu); | 797 | stat = bcp->statp; |
| 798 | 798 | ||
| 799 | /* bau was disabled due to slow response */ | 799 | /* bau was disabled due to slow response */ |
| 800 | if (bcp->baudisabled) { | 800 | if (bcp->baudisabled) { |
| @@ -903,7 +903,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
| 903 | 903 | ||
| 904 | time_start = get_cycles(); | 904 | time_start = get_cycles(); |
| 905 | bcp = &per_cpu(bau_control, smp_processor_id()); | 905 | bcp = &per_cpu(bau_control, smp_processor_id()); |
| 906 | stat = &per_cpu(ptcstats, smp_processor_id()); | 906 | stat = bcp->statp; |
| 907 | msgdesc.va_queue_first = bcp->va_queue_first; | 907 | msgdesc.va_queue_first = bcp->va_queue_first; |
| 908 | msgdesc.va_queue_last = bcp->va_queue_last; | 908 | msgdesc.va_queue_last = bcp->va_queue_last; |
| 909 | msg = bcp->bau_msg_head; | 909 | msg = bcp->bau_msg_head; |
| @@ -1636,6 +1636,7 @@ static void uv_init_per_cpu(int nuvhubs) | |||
| 1636 | for_each_present_cpu(cpu) { | 1636 | for_each_present_cpu(cpu) { |
| 1637 | bcp = &per_cpu(bau_control, cpu); | 1637 | bcp = &per_cpu(bau_control, cpu); |
| 1638 | bcp->baudisabled = 0; | 1638 | bcp->baudisabled = 0; |
| 1639 | bcp->statp = &per_cpu(ptcstats, cpu); | ||
| 1639 | /* time interval to catch a hardware stay-busy bug */ | 1640 | /* time interval to catch a hardware stay-busy bug */ |
| 1640 | bcp->timeout_interval = microsec_2_cycles(2*timeout_us); | 1641 | bcp->timeout_interval = microsec_2_cycles(2*timeout_us); |
| 1641 | bcp->max_bau_concurrent = max_bau_concurrent; | 1642 | bcp->max_bau_concurrent = max_bau_concurrent; |
| @@ -1673,7 +1674,6 @@ static int __init uv_bau_init(void) | |||
| 1673 | zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), | 1674 | zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu), |
| 1674 | GFP_KERNEL, cpu_to_node(cur_cpu)); | 1675 | GFP_KERNEL, cpu_to_node(cur_cpu)); |
| 1675 | 1676 | ||
| 1676 | max_bau_concurrent = MAX_BAU_CONCURRENT; | ||
| 1677 | uv_nshift = uv_hub_info->m_val; | 1677 | uv_nshift = uv_hub_info->m_val; |
| 1678 | uv_mmask = (1UL << uv_hub_info->m_val) - 1; | 1678 | uv_mmask = (1UL << uv_hub_info->m_val) - 1; |
| 1679 | nuvhubs = uv_num_possible_blades(); | 1679 | nuvhubs = uv_num_possible_blades(); |
