aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-06-17 19:28:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 16:04:00 -0400
commit836ce679c0b5b5040164171afc33753396864b30 (patch)
tree786be786c29fa6821d8ee95668393cd43193a278 /drivers
parent6e9100741ca430eeef8022794f8b62a23a5916af (diff)
gru: change resource assignment for kernel threads
Change the way GRU resources are assigned for kernel threads. GRU contexts for kernel threads are now allocated on demand and can be stolen by user processes when idle. This allows MPI jobs to use ALL of the GRU resources when the kernel is not using them. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/misc/sgi-gru/gruhandles.c5
-rw-r--r--drivers/misc/sgi-gru/grukdump.c2
-rw-r--r--drivers/misc/sgi-gru/grukservices.c201
-rw-r--r--drivers/misc/sgi-gru/grumain.c55
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c9
-rw-r--r--drivers/misc/sgi-gru/grutables.h17
6 files changed, 184 insertions, 105 deletions
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
index a3a870ad9153..37e7cfc53b9c 100644
--- a/drivers/misc/sgi-gru/gruhandles.c
+++ b/drivers/misc/sgi-gru/gruhandles.c
@@ -57,7 +57,7 @@ static void start_instruction(void *h)
57static int wait_instruction_complete(void *h, enum mcs_op opc) 57static int wait_instruction_complete(void *h, enum mcs_op opc)
58{ 58{
59 int status; 59 int status;
60 cycles_t start_time = get_cycles(); 60 unsigned long start_time = get_cycles();
61 61
62 while (1) { 62 while (1) {
63 cpu_relax(); 63 cpu_relax();
@@ -65,7 +65,8 @@ static int wait_instruction_complete(void *h, enum mcs_op opc)
65 if (status != CCHSTATUS_ACTIVE) 65 if (status != CCHSTATUS_ACTIVE)
66 break; 66 break;
67 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time)) 67 if (GRU_OPERATION_TIMEOUT < (get_cycles() - start_time))
68 panic("GRU %p is malfunctioning\n", h); 68 panic("GRU %p is malfunctioning: start %ld, end %ld\n",
69 h, start_time, (unsigned long)get_cycles());
69 } 70 }
70 if (gru_options & OPT_STATS) 71 if (gru_options & OPT_STATS)
71 update_mcs_stats(opc, get_cycles() - start_time); 72 update_mcs_stats(opc, get_cycles() - start_time);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 27e00931a7b8..7b1bdf3906ba 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -131,7 +131,7 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
131 131
132 if (cch_locked || !lock_cch) { 132 if (cch_locked || !lock_cch) {
133 gts = gru->gs_gts[ctxnum]; 133 gts = gru->gs_gts[ctxnum];
134 if (gts) { 134 if (gts && gts->ts_vma) {
135 hdr.pid = gts->ts_tgid_owner; 135 hdr.pid = gts->ts_tgid_owner;
136 hdr.vaddr = gts->ts_vma->vm_start; 136 hdr.vaddr = gts->ts_vma->vm_start;
137 } 137 }
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 900f7aad2286..50b4dd8b0c9f 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -31,6 +31,7 @@
31#include <linux/proc_fs.h> 31#include <linux/proc_fs.h>
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/delay.h>
34#include "gru.h" 35#include "gru.h"
35#include "grulib.h" 36#include "grulib.h"
36#include "grutables.h" 37#include "grutables.h"
@@ -45,18 +46,17 @@
45 * resources. This will likely be replaced when we better understand the 46 * resources. This will likely be replaced when we better understand the
46 * kernel/user requirements. 47 * kernel/user requirements.
47 * 48 *
48 * At boot time, the kernel permanently reserves a fixed number of 49 * Blade percpu resources reserved for kernel use. These resources are
49 * CBRs/DSRs for each cpu to use. The resources are all taken from 50 * reserved whenever the the kernel context for the blade is loaded. Note
50 * the GRU chiplet 1 on the blade. This leaves the full set of resources 51 * that the kernel context is not guaranteed to be always available. It is
51 * of chiplet 0 available to be allocated to a single user. 52 * loaded on demand & can be stolen by a user if the user demand exceeds the
53 * kernel demand. The kernel can always reload the kernel context but
54 * a SLEEP may be required!!!.
52 */ 55 */
53
54/* Blade percpu resources PERMANENTLY reserved for kernel use */
55#define GRU_NUM_KERNEL_CBR 1 56#define GRU_NUM_KERNEL_CBR 1
56#define GRU_NUM_KERNEL_DSR_BYTES 256 57#define GRU_NUM_KERNEL_DSR_BYTES 256
57#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \ 58#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
58 GRU_CACHE_LINE_BYTES) 59 GRU_CACHE_LINE_BYTES)
59#define KERNEL_CTXNUM 15
60 60
61/* GRU instruction attributes for all instructions */ 61/* GRU instruction attributes for all instructions */
62#define IMA IMA_CB_DELAY 62#define IMA IMA_CB_DELAY
@@ -98,6 +98,88 @@ struct message_header {
98 98
99#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) 99#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
100 100
101/*
102 * Allocate a kernel context (GTS) for the specified blade.
103 * - protected by writelock on bs_kgts_sema.
104 */
105static void gru_alloc_kernel_context(struct gru_blade_state *bs, int blade_id)
106{
107 int cbr_au_count, dsr_au_count, ncpus;
108
109 ncpus = uv_blade_nr_possible_cpus(blade_id);
110 cbr_au_count = GRU_CB_COUNT_TO_AU(GRU_NUM_KERNEL_CBR * ncpus);
111 dsr_au_count = GRU_DS_BYTES_TO_AU(GRU_NUM_KERNEL_DSR_BYTES * ncpus);
112 bs->bs_kgts = gru_alloc_gts(NULL, cbr_au_count, dsr_au_count, 0, 0);
113}
114
115/*
116 * Reload the blade's kernel context into a GRU chiplet. Called holding
117 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
118 */
119static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
120{
121 struct gru_state *gru;
122 struct gru_thread_state *kgts;
123 void *vaddr;
124 int ctxnum;
125
126 up_read(&bs->bs_kgts_sema);
127 down_write(&bs->bs_kgts_sema);
128
129 if (!bs->bs_kgts)
130 gru_alloc_kernel_context(bs, blade_id);
131 kgts = bs->bs_kgts;
132
133 if (!kgts->ts_gru) {
134 STAT(load_kernel_context);
135 while (!gru_assign_gru_context(kgts, blade_id)) {
136 msleep(1);
137 gru_steal_context(kgts, blade_id);
138 }
139 gru_load_context(kgts);
140 gru = bs->bs_kgts->ts_gru;
141 vaddr = gru->gs_gru_base_vaddr;
142 ctxnum = kgts->ts_ctxnum;
143 bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
144 bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
145 }
146 downgrade_write(&bs->bs_kgts_sema);
147}
148
149/*
150 * Lock & load the kernel context for the specified blade.
151 */
152static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
153{
154 struct gru_blade_state *bs;
155
156 STAT(lock_kernel_context);
157 bs = gru_base[blade_id];
158
159 down_read(&bs->bs_kgts_sema);
160 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
161 gru_load_kernel_context(bs, blade_id);
162 return bs;
163
164}
165
166/*
167 * Unlock the kernel context for the specified blade. Context is not
168 * unloaded but may be stolen before next use.
169 */
170static void gru_unlock_kernel_context(int blade_id)
171{
172 struct gru_blade_state *bs;
173
174 bs = gru_base[blade_id];
175 up_read(&bs->bs_kgts_sema);
176 STAT(unlock_kernel_context);
177}
178
179/*
180 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
181 * - returns with preemption disabled
182 */
101static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) 183static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
102{ 184{
103 struct gru_blade_state *bs; 185 struct gru_blade_state *bs;
@@ -105,18 +187,23 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
105 187
106 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); 188 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
107 preempt_disable(); 189 preempt_disable();
108 bs = gru_base[uv_numa_blade_id()]; 190 bs = gru_lock_kernel_context(uv_numa_blade_id());
109 lcpu = uv_blade_processor_id(); 191 lcpu = uv_blade_processor_id();
110 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; 192 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
111 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; 193 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
112 return 0; 194 return 0;
113} 195}
114 196
197/*
198 * Free the current cpus reserved DSR/CBR resources.
199 */
115static void gru_free_cpu_resources(void *cb, void *dsr) 200static void gru_free_cpu_resources(void *cb, void *dsr)
116{ 201{
202 gru_unlock_kernel_context(uv_numa_blade_id());
117 preempt_enable(); 203 preempt_enable();
118} 204}
119 205
206/*----------------------------------------------------------------------*/
120int gru_get_cb_exception_detail(void *cb, 207int gru_get_cb_exception_detail(void *cb,
121 struct control_block_extended_exc_detail *excdet) 208 struct control_block_extended_exc_detail *excdet)
122{ 209{
@@ -597,34 +684,36 @@ EXPORT_SYMBOL_GPL(gru_copy_gpa);
597 684
598/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/ 685/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
599/* Temp - will delete after we gain confidence in the GRU */ 686/* Temp - will delete after we gain confidence in the GRU */
600static __cacheline_aligned unsigned long word0;
601static __cacheline_aligned unsigned long word1;
602 687
603static int quicktest(struct gru_state *gru) 688int quicktest(void)
604{ 689{
690 unsigned long word0;
691 unsigned long word1;
605 void *cb; 692 void *cb;
606 void *ds; 693 void *dsr;
607 unsigned long *p; 694 unsigned long *p;
608 695
609 cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); 696 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
610 ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); 697 return MQE_BUG_NO_RESOURCES;
611 p = ds; 698 p = dsr;
612 word0 = MAGIC; 699 word0 = MAGIC;
700 word1 = 0;
613 701
614 gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA); 702 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
615 if (gru_wait(cb) != CBS_IDLE) 703 if (gru_wait(cb) != CBS_IDLE)
616 BUG(); 704 BUG();
617 705
618 if (*(unsigned long *)ds != MAGIC) 706 if (*p != MAGIC)
619 BUG(); 707 BUG();
620 gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA); 708 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
621 if (gru_wait(cb) != CBS_IDLE) 709 if (gru_wait(cb) != CBS_IDLE)
622 BUG(); 710 BUG();
711 gru_free_cpu_resources(cb, dsr);
623 712
624 if (word0 != word1 || word0 != MAGIC) { 713 if (word0 != word1 || word1 != MAGIC) {
625 printk 714 printk
626 ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n", 715 ("GRU quicktest err: found 0x%lx, expected 0x%lx\n",
627 gru->gs_gid, word1, MAGIC); 716 word1, MAGIC);
628 BUG(); /* ZZZ should not be fatal */ 717 BUG(); /* ZZZ should not be fatal */
629 } 718 }
630 719
@@ -635,80 +724,30 @@ static int quicktest(struct gru_state *gru)
635int gru_kservices_init(struct gru_state *gru) 724int gru_kservices_init(struct gru_state *gru)
636{ 725{
637 struct gru_blade_state *bs; 726 struct gru_blade_state *bs;
638 struct gru_context_configuration_handle *cch; 727
639 unsigned long cbr_map, dsr_map;
640 int err, num, cpus_possible;
641
642 /*
643 * Currently, resources are reserved ONLY on the second chiplet
644 * on each blade. This leaves ALL resources on chiplet 0 available
645 * for user code.
646 */
647 bs = gru->gs_blade; 728 bs = gru->gs_blade;
648 if (gru != &bs->bs_grus[1]) 729 if (gru != &bs->bs_grus[0])
649 return 0; 730 return 0;
650 731
651 cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id); 732 init_rwsem(&bs->bs_kgts_sema);
652
653 num = GRU_NUM_KERNEL_CBR * cpus_possible;
654 cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL);
655 gru->gs_reserved_cbrs += num;
656
657 num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
658 dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
659 gru->gs_reserved_dsr_bytes += num;
660
661 gru->gs_active_contexts++;
662 __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
663 cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
664
665 bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
666 KERNEL_CTXNUM, 0);
667 bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
668 KERNEL_CTXNUM, 0);
669
670 lock_cch_handle(cch);
671 cch->tfm_fault_bit_enable = 0;
672 cch->tlb_int_enable = 0;
673 cch->tfm_done_bit_enable = 0;
674 cch->unmap_enable = 1;
675 cch->dsr_allocation_map = dsr_map;
676 cch->cbr_allocation_map = cbr_map;
677
678 err = cch_allocate(cch);
679 if (err) {
680 gru_dbg(grudev,
681 "Unable to allocate kernel CCH: gid %d, err %d\n",
682 gru->gs_gid, err);
683 BUG();
684 }
685 if (cch_start(cch)) {
686 gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n",
687 gru->gs_gid, err);
688 BUG();
689 }
690 unlock_cch_handle(cch);
691 733
692 if (gru_options & GRU_QUICKLOOK) 734 if (gru_options & GRU_QUICKLOOK)
693 quicktest(gru); 735 quicktest();
694 return 0; 736 return 0;
695} 737}
696 738
697void gru_kservices_exit(struct gru_state *gru) 739void gru_kservices_exit(struct gru_state *gru)
698{ 740{
699 struct gru_context_configuration_handle *cch;
700 struct gru_blade_state *bs; 741 struct gru_blade_state *bs;
742 struct gru_thread_state *kgts;
701 743
702 bs = gru->gs_blade; 744 bs = gru->gs_blade;
703 if (gru != &bs->bs_grus[1]) 745 if (gru != &bs->bs_grus[0])
704 return; 746 return;
705 747
706 cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); 748 kgts = bs->bs_kgts;
707 lock_cch_handle(cch); 749 if (kgts && kgts->ts_gru)
708 if (cch_interrupt_sync(cch)) 750 gru_unload_context(kgts, 0);
709 BUG(); 751 kfree(kgts);
710 if (cch_deallocate(cch))
711 BUG();
712 unlock_cch_handle(cch);
713} 752}
714 753
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 0c7bd384f0cf..3398e54a762b 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -96,7 +96,7 @@ static int gru_reset_asid_limit(struct gru_state *gru, int asid)
96 gid = gru->gs_gid; 96 gid = gru->gs_gid;
97again: 97again:
98 for (i = 0; i < GRU_NUM_CCH; i++) { 98 for (i = 0; i < GRU_NUM_CCH; i++) {
99 if (!gru->gs_gts[i]) 99 if (!gru->gs_gts[i] || is_kernel_context(gru->gs_gts[i]))
100 continue; 100 continue;
101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; 101 inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid;
102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n", 102 gru_dbg(grudev, "gid %d, gts %p, gms %p, inuse 0x%x, cxt %d\n",
@@ -506,7 +506,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
506 struct gru_context_configuration_handle *cch; 506 struct gru_context_configuration_handle *cch;
507 int ctxnum = gts->ts_ctxnum; 507 int ctxnum = gts->ts_ctxnum;
508 508
509 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); 509 if (!is_kernel_context(gts))
510 zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE);
510 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); 511 cch = get_cch(gru->gs_gru_base_vaddr, ctxnum);
511 512
512 gru_dbg(grudev, "gts %p\n", gts); 513 gru_dbg(grudev, "gts %p\n", gts);
@@ -514,7 +515,8 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
514 if (cch_interrupt_sync(cch)) 515 if (cch_interrupt_sync(cch))
515 BUG(); 516 BUG();
516 517
517 gru_unload_mm_tracker(gru, gts); 518 if (!is_kernel_context(gts))
519 gru_unload_mm_tracker(gru, gts);
518 if (savestate) 520 if (savestate)
519 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 521 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
520 ctxnum, gts->ts_cbr_map, 522 ctxnum, gts->ts_cbr_map,
@@ -526,7 +528,6 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
526 unlock_cch_handle(cch); 528 unlock_cch_handle(cch);
527 529
528 gru_free_gru_context(gts); 530 gru_free_gru_context(gts);
529 STAT(unload_context);
530} 531}
531 532
532/* 533/*
@@ -554,11 +555,16 @@ void gru_load_context(struct gru_thread_state *gts)
554 cch->tfm_done_bit_enable = 0; 555 cch->tfm_done_bit_enable = 0;
555 cch->dsr_allocation_map = gts->ts_dsr_map; 556 cch->dsr_allocation_map = gts->ts_dsr_map;
556 cch->cbr_allocation_map = gts->ts_cbr_map; 557 cch->cbr_allocation_map = gts->ts_cbr_map;
557 asid = gru_load_mm_tracker(gru, gts); 558
558 cch->unmap_enable = 0; 559 if (is_kernel_context(gts)) {
559 for (i = 0; i < 8; i++) { 560 cch->unmap_enable = 1;
560 cch->asid[i] = asid + i; 561 } else {
561 cch->sizeavail[i] = gts->ts_sizeavail; 562 cch->unmap_enable = 0;
563 asid = gru_load_mm_tracker(gru, gts);
564 for (i = 0; i < 8; i++) {
565 cch->asid[i] = asid + i;
566 cch->sizeavail[i] = gts->ts_sizeavail;
567 }
562 } 568 }
563 569
564 err = cch_allocate(cch); 570 err = cch_allocate(cch);
@@ -575,8 +581,6 @@ void gru_load_context(struct gru_thread_state *gts)
575 if (cch_start(cch)) 581 if (cch_start(cch))
576 BUG(); 582 BUG();
577 unlock_cch_handle(cch); 583 unlock_cch_handle(cch);
578
579 STAT(load_context);
580} 584}
581 585
582/* 586/*
@@ -652,6 +656,27 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
652#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ 656#define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \
653 ((g)+1) : &(b)->bs_grus[0]) 657 ((g)+1) : &(b)->bs_grus[0])
654 658
659static int is_gts_stealable(struct gru_thread_state *gts,
660 struct gru_blade_state *bs)
661{
662 if (is_kernel_context(gts))
663 return down_write_trylock(&bs->bs_kgts_sema);
664 else
665 return mutex_trylock(&gts->ts_ctxlock);
666}
667
668static void gts_stolen(struct gru_thread_state *gts,
669 struct gru_blade_state *bs)
670{
671 if (is_kernel_context(gts)) {
672 up_write(&bs->bs_kgts_sema);
673 STAT(steal_kernel_context);
674 } else {
675 mutex_unlock(&gts->ts_ctxlock);
676 STAT(steal_user_context);
677 }
678}
679
655void gru_steal_context(struct gru_thread_state *gts, int blade_id) 680void gru_steal_context(struct gru_thread_state *gts, int blade_id)
656{ 681{
657 struct gru_blade_state *blade; 682 struct gru_blade_state *blade;
@@ -685,7 +710,7 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
685 * success are high. If trylock fails, try to steal a 710 * success are high. If trylock fails, try to steal a
686 * different GSEG. 711 * different GSEG.
687 */ 712 */
688 if (ngts && mutex_trylock(&ngts->ts_ctxlock)) 713 if (ngts && is_gts_stealable(ngts, blade))
689 break; 714 break;
690 ngts = NULL; 715 ngts = NULL;
691 flag = 1; 716 flag = 1;
@@ -701,10 +726,9 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
701 spin_unlock(&blade->bs_lock); 726 spin_unlock(&blade->bs_lock);
702 727
703 if (ngts) { 728 if (ngts) {
704 STAT(steal_context);
705 ngts->ts_steal_jiffies = jiffies; 729 ngts->ts_steal_jiffies = jiffies;
706 gru_unload_context(ngts, 1); 730 gru_unload_context(ngts, is_kernel_context(ngts) ? 0 : 1);
707 mutex_unlock(&ngts->ts_ctxlock); 731 gts_stolen(ngts, blade);
708 } else { 732 } else {
709 STAT(steal_context_failed); 733 STAT(steal_context_failed);
710 } 734 }
@@ -810,6 +834,7 @@ again:
810 } 834 }
811 835
812 if (!gts->ts_gru) { 836 if (!gts->ts_gru) {
837 STAT(load_user_context);
813 if (!gru_assign_gru_context(gts, blade_id)) { 838 if (!gru_assign_gru_context(gts, blade_id)) {
814 preempt_enable(); 839 preempt_enable();
815 mutex_unlock(&gts->ts_ctxlock); 840 mutex_unlock(&gts->ts_ctxlock);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index c46c1c5f0c73..6ef4cb4b84c8 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -51,9 +51,12 @@ static int statistics_show(struct seq_file *s, void *p)
51 printstat(s, assign_context); 51 printstat(s, assign_context);
52 printstat(s, assign_context_failed); 52 printstat(s, assign_context_failed);
53 printstat(s, free_context); 53 printstat(s, free_context);
54 printstat(s, load_context); 54 printstat(s, load_user_context);
55 printstat(s, unload_context); 55 printstat(s, load_kernel_context);
56 printstat(s, steal_context); 56 printstat(s, lock_kernel_context);
57 printstat(s, unlock_kernel_context);
58 printstat(s, steal_user_context);
59 printstat(s, steal_kernel_context);
57 printstat(s, steal_context_failed); 60 printstat(s, steal_context_failed);
58 printstat(s, nopfn); 61 printstat(s, nopfn);
59 printstat(s, break_cow); 62 printstat(s, break_cow);
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 4ddb5b92acbb..1c85fdcf5d37 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -174,9 +174,12 @@ struct gru_stats_s {
174 atomic_long_t assign_context; 174 atomic_long_t assign_context;
175 atomic_long_t assign_context_failed; 175 atomic_long_t assign_context_failed;
176 atomic_long_t free_context; 176 atomic_long_t free_context;
177 atomic_long_t load_context; 177 atomic_long_t load_user_context;
178 atomic_long_t unload_context; 178 atomic_long_t load_kernel_context;
179 atomic_long_t steal_context; 179 atomic_long_t lock_kernel_context;
180 atomic_long_t unlock_kernel_context;
181 atomic_long_t steal_user_context;
182 atomic_long_t steal_kernel_context;
180 atomic_long_t steal_context_failed; 183 atomic_long_t steal_context_failed;
181 atomic_long_t nopfn; 184 atomic_long_t nopfn;
182 atomic_long_t break_cow; 185 atomic_long_t break_cow;
@@ -454,6 +457,9 @@ struct gru_blade_state {
454 reserved cb */ 457 reserved cb */
455 void *kernel_dsr; /* First kernel 458 void *kernel_dsr; /* First kernel
456 reserved DSR */ 459 reserved DSR */
460 struct rw_semaphore bs_kgts_sema; /* lock for kgts */
461 struct gru_thread_state *bs_kgts; /* GTS for kernel use */
462
457 /* ---- the following are protected by the bs_lock spinlock ---- */ 463 /* ---- the following are protected by the bs_lock spinlock ---- */
458 spinlock_t bs_lock; /* lock used for 464 spinlock_t bs_lock; /* lock used for
459 stealing contexts */ 465 stealing contexts */
@@ -597,6 +603,11 @@ static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh)
597 __unlock_handle(tgh); 603 __unlock_handle(tgh);
598} 604}
599 605
606static inline int is_kernel_context(struct gru_thread_state *gts)
607{
608 return !gts->ts_mm;
609}
610
600/*----------------------------------------------------------------------------- 611/*-----------------------------------------------------------------------------
601 * Function prototypes & externs 612 * Function prototypes & externs
602 */ 613 */