aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-gru/grumain.c
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-12-15 19:48:06 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 10:20:15 -0500
commit99f7c229b32bdf7424fbeb1d0d1b3883e14e97d0 (patch)
treebc5cbd2cd6c8452faacfb2e27dc1fe9dc3e1000a /drivers/misc/sgi-gru/grumain.c
parent55484c45dbeca2eec7642932ec3f60f8a2d4bdbf (diff)
gru: allow users to specify gru chiplet 3
This patch builds on the infrastructure introduced in the patches that allow user specification of GRU blades & chiplets for context allocation. This patch simplifies the algorithms for migrating GRU contexts between blades. No new functionality is introduced. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-gru/grumain.c')
-rw-r--r--drivers/misc/sgi-gru/grumain.c30
1 files changed, 9 insertions, 21 deletions
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 54ad7544a9a0..9ec54bde4472 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -551,7 +551,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
551 551
552 if (cch_deallocate(cch)) 552 if (cch_deallocate(cch))
553 BUG(); 553 BUG();
554 gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */
555 unlock_cch_handle(cch); 554 unlock_cch_handle(cch);
556 555
557 gru_free_gru_context(gts); 556 gru_free_gru_context(gts);
@@ -624,11 +623,8 @@ void gru_load_context(struct gru_thread_state *gts)
624 * Update fields in an active CCH: 623 * Update fields in an active CCH:
625 * - retarget interrupts on local blade 624 * - retarget interrupts on local blade
626 * - update sizeavail mask 625 * - update sizeavail mask
627 * - force a delayed context unload by clearing the CCH asids. This
628 * forces TLB misses for new GRU instructions. The context is unloaded
629 * when the next TLB miss occurs.
630 */ 626 */
631int gru_update_cch(struct gru_thread_state *gts, int force_unload) 627int gru_update_cch(struct gru_thread_state *gts)
632{ 628{
633 struct gru_context_configuration_handle *cch; 629 struct gru_context_configuration_handle *cch;
634 struct gru_state *gru = gts->ts_gru; 630 struct gru_state *gru = gts->ts_gru;
@@ -642,21 +638,13 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
642 goto exit; 638 goto exit;
643 if (cch_interrupt(cch)) 639 if (cch_interrupt(cch))
644 BUG(); 640 BUG();
645 if (!force_unload) { 641 for (i = 0; i < 8; i++)
646 for (i = 0; i < 8; i++) 642 cch->sizeavail[i] = gts->ts_sizeavail;
647 cch->sizeavail[i] = gts->ts_sizeavail; 643 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
648 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 644 cch->tlb_int_select = gru_cpu_fault_map_id();
649 cch->tlb_int_select = gru_cpu_fault_map_id(); 645 cch->tfm_fault_bit_enable =
650 cch->tfm_fault_bit_enable = 646 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
651 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 647 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
652 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
653 } else {
654 for (i = 0; i < 8; i++)
655 cch->asid[i] = 0;
656 cch->tfm_fault_bit_enable = 0;
657 cch->tlb_int_enable = 0;
658 gts->ts_force_unload = 1;
659 }
660 if (cch_start(cch)) 648 if (cch_start(cch))
661 BUG(); 649 BUG();
662 ret = 1; 650 ret = 1;
@@ -681,7 +669,7 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
681 669
682 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, 670 gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select,
683 gru_cpu_fault_map_id()); 671 gru_cpu_fault_map_id());
684 return gru_update_cch(gts, 0); 672 return gru_update_cch(gts);
685} 673}
686 674
687/* 675/*