diff options
author | Jack Steiner <steiner@sgi.com> | 2009-12-15 19:48:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-16 10:20:16 -0500 |
commit | 4107e1d38a23028c2a3bc23dd948265dbe6becba (patch) | |
tree | 3c64087639dc327e42447f015f3c4b01c4a1de39 /drivers/misc/sgi-gru/grufault.c | |
parent | 67bf04a5c2574e9495f660f418f6df776821d578 (diff) |
gru: update irq infrastructure
Update the GRU irq allocate/free functions to use the latest upstream
infrastructure.
Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-gru/grufault.c')
-rw-r--r-- | drivers/misc/sgi-gru/grufault.c | 49 |
1 files changed, 32 insertions, 17 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index d3cacd696b38..a78aa798d50b 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
@@ -134,19 +134,6 @@ static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | /* | 136 | /* |
137 | * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the | ||
138 | * interrupt. Interrupts are always sent to a cpu on the blade that contains the | ||
139 | * GRU (except for headless blades which are not currently supported). A blade | ||
140 | * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ | ||
141 | * number uniquely identifies the GRU chiplet on the local blade that caused the | ||
142 | * interrupt. Always called in interrupt context. | ||
143 | */ | ||
144 | static inline struct gru_state *irq_to_gru(int irq) | ||
145 | { | ||
146 | return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU]; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Read & clear a TFM | 137 | * Read & clear a TFM |
151 | * | 138 | * |
152 | * The GRU has an array of fault maps. A map is private to a cpu | 139 | * The GRU has an array of fault maps. A map is private to a cpu |
@@ -449,7 +436,7 @@ failactive: | |||
449 | * Note that this is the interrupt handler that is registered with linux | 436 | * Note that this is the interrupt handler that is registered with linux |
450 | * interrupt handlers. | 437 | * interrupt handlers. |
451 | */ | 438 | */ |
452 | irqreturn_t gru_intr(int irq, void *dev_id) | 439 | static irqreturn_t gru_intr(int chiplet, int blade) |
453 | { | 440 | { |
454 | struct gru_state *gru; | 441 | struct gru_state *gru; |
455 | struct gru_tlb_fault_map imap, dmap; | 442 | struct gru_tlb_fault_map imap, dmap; |
@@ -459,13 +446,18 @@ irqreturn_t gru_intr(int irq, void *dev_id) | |||
459 | 446 | ||
460 | STAT(intr); | 447 | STAT(intr); |
461 | 448 | ||
462 | gru = irq_to_gru(irq); | 449 | gru = &gru_base[blade]->bs_grus[chiplet]; |
463 | if (!gru) { | 450 | if (!gru) { |
464 | dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n", | 451 | dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n", |
465 | raw_smp_processor_id(), irq); | 452 | raw_smp_processor_id(), chiplet); |
466 | return IRQ_NONE; | 453 | return IRQ_NONE; |
467 | } | 454 | } |
468 | get_clear_fault_map(gru, &imap, &dmap); | 455 | get_clear_fault_map(gru, &imap, &dmap); |
456 | gru_dbg(grudev, | ||
457 | "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n", | ||
458 | smp_processor_id(), chiplet, gru->gs_gid, | ||
459 | imap.fault_bits[0], imap.fault_bits[1], | ||
460 | dmap.fault_bits[0], dmap.fault_bits[1]); | ||
469 | 461 | ||
470 | for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { | 462 | for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { |
471 | complete(gru->gs_blade->bs_async_wq); | 463 | complete(gru->gs_blade->bs_async_wq); |
@@ -503,6 +495,29 @@ irqreturn_t gru_intr(int irq, void *dev_id) | |||
503 | return IRQ_HANDLED; | 495 | return IRQ_HANDLED; |
504 | } | 496 | } |
505 | 497 | ||
498 | irqreturn_t gru0_intr(int irq, void *dev_id) | ||
499 | { | ||
500 | return gru_intr(0, uv_numa_blade_id()); | ||
501 | } | ||
502 | |||
503 | irqreturn_t gru1_intr(int irq, void *dev_id) | ||
504 | { | ||
505 | return gru_intr(1, uv_numa_blade_id()); | ||
506 | } | ||
507 | |||
508 | irqreturn_t gru_intr_mblade(int irq, void *dev_id) | ||
509 | { | ||
510 | int blade; | ||
511 | |||
512 | for_each_possible_blade(blade) { | ||
513 | if (uv_blade_nr_possible_cpus(blade)) | ||
514 | continue; | ||
515 | gru_intr(0, blade); | ||
516 | gru_intr(1, blade); | ||
517 | } | ||
518 | return IRQ_HANDLED; | ||
519 | } | ||
520 | |||
506 | 521 | ||
507 | static int gru_user_dropin(struct gru_thread_state *gts, | 522 | static int gru_user_dropin(struct gru_thread_state *gts, |
508 | struct gru_tlb_fault_handle *tfh, | 523 | struct gru_tlb_fault_handle *tfh, |