aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorCliff Wickman <cpw@sgi.com>2010-06-02 17:22:02 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-08 15:13:47 -0400
commit90cc7d944981a6d06b49bb26fde1b490e28c90e5 (patch)
tree21e2d202c168e8b0ff17907954a7106f018a5f26 /arch/x86/kernel
parenta8328ee58c15c9d763a67607a35bb987b38950fa (diff)
x86, UV: Remove BAU check for stay-busy
Remove a faulty assumption that a long running BAU request has encountered a hardware problem and will never finish. Numalink congestion can make a request appear to have encountered such a problem, but it is not safe to cancel the request. If such a cancel is done but a reply is later received we can miss a TLB shootdown. We depend upon the max_bau_concurrent 'throttle' to prevent the stay-busy case from happening. Signed-off-by: Cliff Wickman <cpw@sgi.com> Cc: gregkh@suse.de LKML-Reference: <E1OJvNy-0004ad-BV@eag09.americas.sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/tlb_uv.c23
1 files changed, 0 insertions, 23 deletions
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index ab929e976502..dc962b5ac870 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -405,12 +405,10 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
405 unsigned long mmr; 405 unsigned long mmr;
406 unsigned long mask; 406 unsigned long mask;
407 cycles_t ttime; 407 cycles_t ttime;
408 cycles_t timeout_time;
409 struct ptc_stats *stat = bcp->statp; 408 struct ptc_stats *stat = bcp->statp;
410 struct bau_control *hmaster; 409 struct bau_control *hmaster;
411 410
412 hmaster = bcp->uvhub_master; 411 hmaster = bcp->uvhub_master;
413 timeout_time = get_cycles() + bcp->timeout_interval;
414 412
415 /* spin on the status MMR, waiting for it to go idle */ 413 /* spin on the status MMR, waiting for it to go idle */
416 while ((descriptor_status = (((unsigned long) 414 while ((descriptor_status = (((unsigned long)
@@ -450,26 +448,6 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
450 * descriptor_status is still BUSY 448 * descriptor_status is still BUSY
451 */ 449 */
452 cpu_relax(); 450 cpu_relax();
453 relaxes++;
454 if (relaxes >= 10000) {
455 relaxes = 0;
456 if (get_cycles() > timeout_time) {
457 quiesce_local_uvhub(hmaster);
458
459 /* single-thread the register change */
460 spin_lock(&hmaster->masks_lock);
461 mmr = uv_read_local_mmr(mmr_offset);
462 mask = 0UL;
463 mask |= (3UL < right_shift);
464 mask = ~mask;
465 mmr &= mask;
466 uv_write_local_mmr(mmr_offset, mmr);
467 spin_unlock(&hmaster->masks_lock);
468 end_uvhub_quiesce(hmaster);
469 stat->s_busy++;
470 return FLUSH_GIVEUP;
471 }
472 }
473 } 451 }
474 } 452 }
475 bcp->conseccompletes++; 453 bcp->conseccompletes++;
@@ -1580,7 +1558,6 @@ static void uv_init_per_cpu(int nuvhubs)
1580 for_each_present_cpu(cpu) { 1558 for_each_present_cpu(cpu) {
1581 bcp = &per_cpu(bau_control, cpu); 1559 bcp = &per_cpu(bau_control, cpu);
1582 memset(bcp, 0, sizeof(struct bau_control)); 1560 memset(bcp, 0, sizeof(struct bau_control));
1583 spin_lock_init(&bcp->masks_lock);
1584 pnode = uv_cpu_hub_info(cpu)->pnode; 1561 pnode = uv_cpu_hub_info(cpu)->pnode;
1585 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id; 1562 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1586 uvhub_mask |= (1 << uvhub); 1563 uvhub_mask |= (1 << uvhub);