aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-gru/grumain.c
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-06-17 19:28:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 16:04:00 -0400
commit836ce679c0b5b5040164171afc33753396864b30 (patch)
tree786be786c29fa6821d8ee95668393cd43193a278 /drivers/misc/sgi-gru/grumain.c
parent6e9100741ca430eeef8022794f8b62a23a5916af (diff)
gru: change resource assignment for kernel threads
Change the way GRU resources are assigned for kernel threads. GRU contexts for kernel threads are now allocated on demand and can be stolen by user processes when idle. This allows MPI jobs to use ALL of the GRU resources when the kernel is not using them. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-gru/grumain.c')
-rw-r--r--drivers/misc/sgi-gru/grumain.c55
1 files changed, 40 insertions, 15 deletions
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 0c7bd384f0cf..3398e54a762b 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -96,7 +96,7 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid)
96 gid = gru->gs_gid; 96 gid = gru->gs_gid;
97again: 97again:
98 for (i = 0; i < GRU_NUM_CCH; i++) { 98 for (i = 0; i < GRU_NUM_CCH; i++) {
99 if (!gru->gs_gts[i]) 99 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
100 continue; 100 continue;
101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
@@ -506,7 +506,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
506 struct gru_context_configuration_handle *cch; 506 struct gru_context_configuration_handle *cch;
507 int ctxnum = gts->ts_ctxnum; 507 int ctxnum = gts->ts_ctxnum;
508 508
509 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 509 if (!is_kernel_context(gts))
510 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
510 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 511 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
511 512
512 gru_dbg(grudev, "gts %p\n", gts); 513 gru_dbg(grudev, "gts %p\n", gts);
@@ -514,7 +515,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
514 if (cch_interrupt_sync(cch)) 515 if (cch_interrupt_sync(cch))
515 BUG(); 516 BUG();
516 517
517 gru_unload_mm_tracker(gru, gts); 518 if (!is_kernel_context(gts))
519 gru_unload_mm_tracker(gru, gts);
518 if (savestate) 520 if (savestate)
519 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 521 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
520 ctxnum, gts->ts_cbr_map, 522 ctxnum, gts->ts_cbr_map,
@@ -526,7 +528,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
526 unlock_cch_handle(cch); 528 unlock_cch_handle(cch);
527 529
528 gru_free_gru_context(gts); 530 gru_free_gru_context(gts);
529 STAT(unload_context);
530} 531}
531 532
532/* 533/*
@@ -554,11 +555,16 @@ void gru_load_context(struct gru_thread_state *gts)
554 cch->tfm_done_bit_enable = 0; 555 cch->tfm_done_bit_enable = 0;
555 cch->dsr_allocation_map = gts->ts_dsr_map; 556 cch->dsr_allocation_map = gts->ts_dsr_map;
556 cch->cbr_allocation_map = gts->ts_cbr_map; 557 cch->cbr_allocation_map = gts->ts_cbr_map;
557 asid = gru_load_mm_tracker(gru, gts); 558
558 cch->unmap_enable = 0; 559 if (is_kernel_context(gts)) {
559 for (i = 0; i < 8; i++) { 560 cch->unmap_enable = 1;
560 cch->asid[i] = asid + i; 561 } else {
561 cch->sizeavail[i] = gts->ts_sizeavail; 562 cch->unmap_enable = 0;
563 asid = gru_load_mm_tracker(gru, gts);
564 for (i = 0; i < 8; i++) {
565 cch->asid[i] = asid + i;
566 cch->sizeavail[i] = gts->ts_sizeavail;
567 }
562 } 568 }
563 569
564 err = cch_allocate(cch); 570 err = cch_allocate(cch);
@@ -575,8 +581,6 @@ void gru_load_context(struct gru_thread_state *gts)
575 if (cch_start(cch)) 581 if (cch_start(cch))
576 BUG(); 582 BUG();
577 unlock_cch_handle(cch); 583 unlock_cch_handle(cch);
578
579 STAT(load_context);
580} 584}
581 585
582/* 586/*
@@ -652,6 +656,27 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
652#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 656#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
653 ((g)+1) : &(b)->bs_grus[0]) 657 ((g)+1) : &(b)->bs_grus[0])
654 658
659static int is_gts_stealable(struct gru_thread_state *gts,
660 struct gru_blade_state *bs)
661{
662 if (is_kernel_context(gts))
663 return down_write_trylock(&bs->bs_kgts_sema);
664 else
665 return mutex_trylock(&gts->ts_ctxlock);
666}
667
668static void gts_stolen(struct gru_thread_state *gts,
669 struct gru_blade_state *bs)
670{
671 if (is_kernel_context(gts)) {
672 up_write(&bs->bs_kgts_sema);
673 STAT(steal_kernel_context);
674 } else {
675 mutex_unlock(&gts->ts_ctxlock);
676 STAT(steal_user_context);
677 }
678}
679
655void gru_steal_context(struct gru_thread_state *gts, int blade_id) 680void gru_steal_context(struct gru_thread_state *gts, int blade_id)
656{ 681{
657 struct gru_blade_state *blade; 682 struct gru_blade_state *blade;
@@ -685,7 +710,7 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
685 * success are high. If trylock fails, try to steal a 710 * success are high. If trylock fails, try to steal a
686 * different GSEG. 711 * different GSEG.
687 */ 712 */
688 if (ngts && mutex_trylock(&ngts->ts_ctxlock)) 713 if (ngts && is_gts_stealable(ngts, blade))
689 break; 714 break;
690 ngts = NULL; 715 ngts = NULL;
691 flag = 1; 716 flag = 1;
@@ -701,10 +726,9 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
701 spin_unlock(&blade->bs_lock); 726 spin_unlock(&blade->bs_lock);
702 727
703 if (ngts) { 728 if (ngts) {
704 STAT(steal_context);
705 ngts->ts_steal_jiffies = jiffies; 729 ngts->ts_steal_jiffies = jiffies;
706 gru_unload_context(ngts, 1); 730 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
707 mutex_unlock(&ngts->ts_ctxlock); 731 gts_stolen(ngts, blade);
708 } else { 732 } else {
709 STAT(steal_context_failed); 733 STAT(steal_context_failed);
710 } 734 }
@@ -810,6 +834,7 @@ again:
810 } 834 }
811 835
812 if (!gts->ts_gru) { 836 if (!gts->ts_gru) {
837 STAT(load_user_context);
813 if (!gru_assign_gru_context(gts, blade_id)) { 838 if (!gru_assign_gru_context(gts, blade_id)) {
814 preempt_enable(); 839 preempt_enable();
815 mutex_unlock(&gts->ts_ctxlock); 840 mutex_unlock(&gts->ts_ctxlock);