aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-06-17 19:28:25 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 16:04:00 -0400
commit4a7a17c1188a878e9f00e4ca8dc724c7cff17606 (patch)
treee7c728b3031534620fbb01ab0ae059ae28fd5f89 /drivers/misc
parent940229b9c0dcd9b6e1d64d0d26eba00238ddae98 (diff)
gru: support instruction completion interrupts
Add support for interrupts generated by GRU instruction completion. Previously, the only interrupts were for TLB misses. The hardware also supports interrupts on instruction completion. This will be supported for instructions issued by the kernel. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/sgi-gru/grufault.c27
-rw-r--r--drivers/misc/sgi-gru/grumain.c4
-rw-r--r--drivers/misc/sgi-gru/grutables.h5
3 files changed, 29 insertions, 7 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index f15152165a9..3220e95be6b 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -166,7 +166,8 @@ static inline struct gru_state *irq_to_gru(int irq)
166 * the GRU, atomic operations must be used to clear bits. 166 * the GRU, atomic operations must be used to clear bits.
167 */ 167 */
168static void get_clear_fault_map(struct gru_state *gru, 168static void get_clear_fault_map(struct gru_state *gru,
169 struct gru_tlb_fault_map *map) 169 struct gru_tlb_fault_map *imap,
170 struct gru_tlb_fault_map *dmap)
170{ 171{
171 unsigned long i, k; 172 unsigned long i, k;
172 struct gru_tlb_fault_map *tfm; 173 struct gru_tlb_fault_map *tfm;
@@ -177,7 +178,11 @@ static void get_clear_fault_map(struct gru_state *gru,
177 k = tfm->fault_bits[i]; 178 k = tfm->fault_bits[i];
178 if (k) 179 if (k)
179 k = xchg(&tfm->fault_bits[i], 0UL); 180 k = xchg(&tfm->fault_bits[i], 0UL);
180 map->fault_bits[i] = k; 181 imap->fault_bits[i] = k;
182 k = tfm->done_bits[i];
183 if (k)
184 k = xchg(&tfm->done_bits[i], 0UL);
185 dmap->fault_bits[i] = k;
181 } 186 }
182 187
183 /* 188 /*
@@ -449,7 +454,7 @@ failactive:
449irqreturn_t gru_intr(int irq, void *dev_id) 454irqreturn_t gru_intr(int irq, void *dev_id)
450{ 455{
451 struct gru_state *gru; 456 struct gru_state *gru;
452 struct gru_tlb_fault_map map; 457 struct gru_tlb_fault_map imap, dmap;
453 struct gru_thread_state *gts; 458 struct gru_thread_state *gts;
454 struct gru_tlb_fault_handle *tfh = NULL; 459 struct gru_tlb_fault_handle *tfh = NULL;
455 int cbrnum, ctxnum; 460 int cbrnum, ctxnum;
@@ -462,11 +467,19 @@ irqreturn_t gru_intr(int irq, void *dev_id)
462 raw_smp_processor_id(), irq); 467 raw_smp_processor_id(), irq);
463 return IRQ_NONE; 468 return IRQ_NONE;
464 } 469 }
465 get_clear_fault_map(gru, &map); 470 get_clear_fault_map(gru, &imap, &dmap);
466 gru_dbg(grudev, "irq %d, gru %x, map 0x%lx\n", irq, gru->gs_gid, 471 gru_dbg(grudev,
467 map.fault_bits[0]); 472 "irq %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
473 irq, gru->gs_gid, dmap.fault_bits[0], dmap.fault_bits[1],
474 dmap.fault_bits[0], dmap.fault_bits[1]);
475
476 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
477 complete(gru->gs_blade->bs_async_wq);
478 gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
479 gru->gs_gid, cbrnum, gru->gs_blade->bs_async_wq->done);
480 }
468 481
469 for_each_cbr_in_tfm(cbrnum, map.fault_bits) { 482 for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
470 tfh = get_tfh_by_index(gru, cbrnum); 483 tfh = get_tfh_by_index(gru, cbrnum);
471 prefetchw(tfh); /* Helps on hdw, required for emulator */ 484 prefetchw(tfh); /* Helps on hdw, required for emulator */
472 485
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index afc4c473c79..e38a0f1775f 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -572,8 +572,12 @@ void gru_load_context(struct gru_thread_state *gts)
572 572
573 if (is_kernel_context(gts)) { 573 if (is_kernel_context(gts)) {
574 cch->unmap_enable = 1; 574 cch->unmap_enable = 1;
575 cch->tfm_done_bit_enable = 1;
576 cch->cb_int_enable = 1;
575 } else { 577 } else {
576 cch->unmap_enable = 0; 578 cch->unmap_enable = 0;
579 cch->tfm_done_bit_enable = 0;
580 cch->cb_int_enable = 0;
577 asid = gru_load_mm_tracker(gru, gts); 581 asid = gru_load_mm_tracker(gru, gts);
578 for (i = 0; i < 8; i++) { 582 for (i = 0; i < 8; i++) {
579 cch->asid[i] = asid + i; 583 cch->asid[i] = asid + i;
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 5f8f3bda2fa..ca81800146f 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -462,6 +462,11 @@ struct gru_blade_state {
462 struct rw_semaphore bs_kgts_sema; /* lock for kgts */ 462 struct rw_semaphore bs_kgts_sema; /* lock for kgts */
463 struct gru_thread_state *bs_kgts; /* GTS for kernel use */ 463 struct gru_thread_state *bs_kgts; /* GTS for kernel use */
464 464
465 /* ---- the following are used for managing kernel async GRU CBRs --- */
466 int bs_async_dsr_bytes; /* DSRs for async */
467 int bs_async_cbrs; /* CBRs AU for async */
468 struct completion *bs_async_wq;
469
465 /* ---- the following are protected by the bs_lock spinlock ---- */ 470 /* ---- the following are protected by the bs_lock spinlock ---- */
466 spinlock_t bs_lock; /* lock used for 471 spinlock_t bs_lock; /* lock used for
467 stealing contexts */ 472 stealing contexts */