aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-12-15 19:48:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 10:20:16 -0500
commit4107e1d38a23028c2a3bc23dd948265dbe6becba (patch)
tree3c64087639dc327e42447f015f3c4b01c4a1de39 /drivers/misc
parent67bf04a5c2574e9495f660f418f6df776821d578 (diff)
gru: update irq infrastructure
Update the GRU irq allocate/free functions to use the latest upstream infrastructure. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/sgi-gru/grufault.c49
-rw-r--r--drivers/misc/sgi-gru/grufile.c258
-rw-r--r--drivers/misc/sgi-gru/grumain.c9
-rw-r--r--drivers/misc/sgi-gru/grutables.h14
4 files changed, 260 insertions, 70 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index d3cacd696b38..a78aa798d50b 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -134,19 +134,6 @@ static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
134} 134}
135 135
136/* 136/*
137 * Convert a interrupt IRQ to a pointer to the GRU GTS that caused the
138 * interrupt. Interrupts are always sent to a cpu on the blade that contains the
139 * GRU (except for headless blades which are not currently supported). A blade
140 * has N grus; a block of N consecutive IRQs is assigned to the GRUs. The IRQ
141 * number uniquely identifies the GRU chiplet on the local blade that caused the
142 * interrupt. Always called in interrupt context.
143 */
144static inline struct gru_state *irq_to_gru(int irq)
145{
146 return &gru_base[uv_numa_blade_id()]->bs_grus[irq - IRQ_GRU];
147}
148
149/*
150 * Read & clear a TFM 137 * Read & clear a TFM
151 * 138 *
152 * The GRU has an array of fault maps. A map is private to a cpu 139 * The GRU has an array of fault maps. A map is private to a cpu
@@ -449,7 +436,7 @@ failactive:
449 * Note that this is the interrupt handler that is registered with linux 436 * Note that this is the interrupt handler that is registered with linux
450 * interrupt handlers. 437 * interrupt handlers.
451 */ 438 */
452irqreturn_t gru_intr(int irq, void *dev_id) 439static irqreturn_t gru_intr(int chiplet, int blade)
453{ 440{
454 struct gru_state *gru; 441 struct gru_state *gru;
455 struct gru_tlb_fault_map imap, dmap; 442 struct gru_tlb_fault_map imap, dmap;
@@ -459,13 +446,18 @@ irqreturn_t gru_intr(int irq, void *dev_id)
459 446
460 STAT(intr); 447 STAT(intr);
461 448
462 gru = irq_to_gru(irq); 449 gru = &gru_base[blade]->bs_grus[chiplet];
463 if (!gru) { 450 if (!gru) {
464 dev_err(grudev, "GRU: invalid interrupt: cpu %d, irq %d\n", 451 dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
465 raw_smp_processor_id(), irq); 452 raw_smp_processor_id(), chiplet);
466 return IRQ_NONE; 453 return IRQ_NONE;
467 } 454 }
468 get_clear_fault_map(gru, &imap, &dmap); 455 get_clear_fault_map(gru, &imap, &dmap);
456 gru_dbg(grudev,
457 "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
458 smp_processor_id(), chiplet, gru->gs_gid,
459 imap.fault_bits[0], imap.fault_bits[1],
460 dmap.fault_bits[0], dmap.fault_bits[1]);
469 461
470 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) { 462 for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
471 complete(gru->gs_blade->bs_async_wq); 463 complete(gru->gs_blade->bs_async_wq);
@@ -503,6 +495,29 @@ irqreturn_t gru_intr(int irq, void *dev_id)
503 return IRQ_HANDLED; 495 return IRQ_HANDLED;
504} 496}
505 497
498irqreturn_t gru0_intr(int irq, void *dev_id)
499{
500 return gru_intr(0, uv_numa_blade_id());
501}
502
503irqreturn_t gru1_intr(int irq, void *dev_id)
504{
505 return gru_intr(1, uv_numa_blade_id());
506}
507
508irqreturn_t gru_intr_mblade(int irq, void *dev_id)
509{
510 int blade;
511
512 for_each_possible_blade(blade) {
513 if (uv_blade_nr_possible_cpus(blade))
514 continue;
515 gru_intr(0, blade);
516 gru_intr(1, blade);
517 }
518 return IRQ_HANDLED;
519}
520
506 521
507static int gru_user_dropin(struct gru_thread_state *gts, 522static int gru_user_dropin(struct gru_thread_state *gts,
508 struct gru_tlb_fault_handle *tfh, 523 struct gru_tlb_fault_handle *tfh,
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 0a6d2a5a01f3..22b8b2733a24 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -35,6 +35,9 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#ifdef CONFIG_X86_64
39#include <asm/uv/uv_irq.h>
40#endif
38#include <asm/uv/uv.h> 41#include <asm/uv/uv.h>
39#include "gru.h" 42#include "gru.h"
40#include "grulib.h" 43#include "grulib.h"
@@ -130,7 +133,6 @@ static int gru_create_new_context(unsigned long arg)
130 struct gru_vma_data *vdata; 133 struct gru_vma_data *vdata;
131 int ret = -EINVAL; 134 int ret = -EINVAL;
132 135
133
134 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 136 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
135 return -EFAULT; 137 return -EFAULT;
136 138
@@ -302,34 +304,210 @@ fail:
302 return -ENOMEM; 304 return -ENOMEM;
303} 305}
304 306
305#ifdef CONFIG_IA64 307static void gru_free_tables(void)
308{
309 int bid;
310 int order = get_order(sizeof(struct gru_state) *
311 GRU_CHIPLETS_PER_BLADE);
306 312
307static int get_base_irq(void) 313 for (bid = 0; bid < GRU_MAX_BLADES; bid++)
314 free_pages((unsigned long)gru_base[bid], order);
315}
316
317static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
308{ 318{
309 return IRQ_GRU; 319 unsigned long mmr = 0;
320 int core;
321
322 /*
323 * We target the cores of a blade and not the hyperthreads themselves.
324 * There is a max of 8 cores per socket and 2 sockets per blade,
325 * making for a max total of 16 cores (i.e., 16 CPUs without
326 * hyperthreading and 32 CPUs with hyperthreading).
327 */
328 core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
329 if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
330 return 0;
331
332 if (chiplet == 0) {
333 mmr = UVH_GR0_TLB_INT0_CONFIG +
334 core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
335 } else if (chiplet == 1) {
336 mmr = UVH_GR1_TLB_INT0_CONFIG +
337 core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
338 } else {
339 BUG();
340 }
341
342 *corep = core;
343 return mmr;
310} 344}
311 345
312#elif defined CONFIG_X86_64 346#ifdef CONFIG_IA64
347
348static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
313 349
314static void noop(unsigned int irq) 350static void gru_noop(unsigned int irq)
315{ 351{
316} 352}
317 353
318static struct irq_chip gru_chip = { 354static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
319 .name = "gru", 355 [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
320 .mask = noop, 356 .mask = gru_noop,
321 .unmask = noop, 357 .unmask = gru_noop,
322 .ack = noop, 358 .ack = gru_noop
359 }
323}; 360};
324 361
325static int get_base_irq(void) 362static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
363 irq_handler_t irq_handler, int cpu, int blade)
364{
365 unsigned long mmr;
366 int irq = IRQ_GRU + chiplet;
367 int ret, core;
368
369 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
370 if (mmr == 0)
371 return 0;
372
373 if (gru_irq_count[chiplet] == 0) {
374 gru_chip[chiplet].name = irq_name;
375 ret = set_irq_chip(irq, &gru_chip[chiplet]);
376 if (ret) {
377 printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
378 GRU_DRIVER_ID_STR, -ret);
379 return ret;
380 }
381
382 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
383 if (ret) {
384 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
385 GRU_DRIVER_ID_STR, -ret);
386 return ret;
387 }
388 }
389 gru_irq_count[chiplet]++;
390
391 return 0;
392}
393
394static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
395{
396 unsigned long mmr;
397 int core, irq = IRQ_GRU + chiplet;
398
399 if (gru_irq_count[chiplet] == 0)
400 return;
401
402 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
403 if (mmr == 0)
404 return;
405
406 if (--gru_irq_count[chiplet] == 0)
407 free_irq(irq, NULL);
408}
409
410#elif defined CONFIG_X86_64
411
412static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
413 irq_handler_t irq_handler, int cpu, int blade)
414{
415 unsigned long mmr;
416 int irq, core;
417 int ret;
418
419 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
420 if (mmr == 0)
421 return 0;
422
423 irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
424 if (irq < 0) {
425 printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
426 GRU_DRIVER_ID_STR, -irq);
427 return irq;
428 }
429
430 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
431 if (ret) {
432 uv_teardown_irq(irq);
433 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
434 GRU_DRIVER_ID_STR, -ret);
435 return ret;
436 }
437 gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
438 return 0;
439}
440
441static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
326{ 442{
327 set_irq_chip(IRQ_GRU, &gru_chip); 443 int irq, core;
328 set_irq_chip(IRQ_GRU + 1, &gru_chip); 444 unsigned long mmr;
329 return IRQ_GRU; 445
446 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
447 if (mmr) {
448 irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
449 if (irq) {
450 free_irq(irq, NULL);
451 uv_teardown_irq(irq);
452 }
453 }
330} 454}
455
331#endif 456#endif
332 457
458static void gru_teardown_tlb_irqs(void)
459{
460 int blade;
461 int cpu;
462
463 for_each_online_cpu(cpu) {
464 blade = uv_cpu_to_blade_id(cpu);
465 gru_chiplet_teardown_tlb_irq(0, cpu, blade);
466 gru_chiplet_teardown_tlb_irq(1, cpu, blade);
467 }
468 for_each_possible_blade(blade) {
469 if (uv_blade_nr_possible_cpus(blade))
470 continue;
471 gru_chiplet_teardown_tlb_irq(0, 0, blade);
472 gru_chiplet_teardown_tlb_irq(1, 0, blade);
473 }
474}
475
476static int gru_setup_tlb_irqs(void)
477{
478 int blade;
479 int cpu;
480 int ret;
481
482 for_each_online_cpu(cpu) {
483 blade = uv_cpu_to_blade_id(cpu);
484 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
485 if (ret != 0)
486 goto exit1;
487
488 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
489 if (ret != 0)
490 goto exit1;
491 }
492 for_each_possible_blade(blade) {
493 if (uv_blade_nr_possible_cpus(blade))
494 continue;
495 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
496 if (ret != 0)
497 goto exit1;
498
499 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
500 if (ret != 0)
501 goto exit1;
502 }
503
504 return 0;
505
506exit1:
507 gru_teardown_tlb_irqs();
508 return ret;
509}
510
333/* 511/*
334 * gru_init 512 * gru_init
335 * 513 *
@@ -337,8 +515,7 @@ static int get_base_irq(void)
337 */ 515 */
338static int __init gru_init(void) 516static int __init gru_init(void)
339{ 517{
340 int ret, irq, chip; 518 int ret;
341 char id[10];
342 519
343 if (!is_uv_system()) 520 if (!is_uv_system())
344 return 0; 521 return 0;
@@ -353,41 +530,29 @@ static int __init gru_init(void)
353 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; 530 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
354 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", 531 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
355 gru_start_paddr, gru_end_paddr); 532 gru_start_paddr, gru_end_paddr);
356 irq = get_base_irq();
357 for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
358 ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
359 /* TODO: fix irq handling on x86. For now ignore failure because
360 * interrupts are not required & not yet fully supported */
361 if (ret) {
362 printk(KERN_WARNING
363 "!!!WARNING: GRU ignoring request failure!!!\n");
364 ret = 0;
365 }
366 if (ret) {
367 printk(KERN_ERR "%s: request_irq failed\n",
368 GRU_DRIVER_ID_STR);
369 goto exit1;
370 }
371 }
372
373 ret = misc_register(&gru_miscdev); 533 ret = misc_register(&gru_miscdev);
374 if (ret) { 534 if (ret) {
375 printk(KERN_ERR "%s: misc_register failed\n", 535 printk(KERN_ERR "%s: misc_register failed\n",
376 GRU_DRIVER_ID_STR); 536 GRU_DRIVER_ID_STR);
377 goto exit1; 537 goto exit0;
378 } 538 }
379 539
380 ret = gru_proc_init(); 540 ret = gru_proc_init();
381 if (ret) { 541 if (ret) {
382 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); 542 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
383 goto exit2; 543 goto exit1;
384 } 544 }
385 545
386 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); 546 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
387 if (ret) { 547 if (ret) {
388 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); 548 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
389 goto exit3; 549 goto exit2;
390 } 550 }
551
552 ret = gru_setup_tlb_irqs();
553 if (ret != 0)
554 goto exit3;
555
391 gru_kservices_init(); 556 gru_kservices_init();
392 557
393 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, 558 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
@@ -395,31 +560,24 @@ static int __init gru_init(void)
395 return 0; 560 return 0;
396 561
397exit3: 562exit3:
398 gru_proc_exit(); 563 gru_free_tables();
399exit2: 564exit2:
400 misc_deregister(&gru_miscdev); 565 gru_proc_exit();
401exit1: 566exit1:
402 for (--chip; chip >= 0; chip--) 567 misc_deregister(&gru_miscdev);
403 free_irq(irq + chip, NULL); 568exit0:
404 return ret; 569 return ret;
405 570
406} 571}
407 572
408static void __exit gru_exit(void) 573static void __exit gru_exit(void)
409{ 574{
410 int i, bid;
411 int order = get_order(sizeof(struct gru_state) *
412 GRU_CHIPLETS_PER_BLADE);
413
414 if (!is_uv_system()) 575 if (!is_uv_system())
415 return; 576 return;
416 577
417 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) 578 gru_teardown_tlb_irqs();
418 free_irq(IRQ_GRU + i, NULL);
419 gru_kservices_exit(); 579 gru_kservices_exit();
420 for (bid = 0; bid < GRU_MAX_BLADES; bid++) 580 gru_free_tables();
421 free_pages((unsigned long)gru_base[bid], order);
422
423 misc_deregister(&gru_miscdev); 581 misc_deregister(&gru_miscdev);
424 gru_proc_exit(); 582 gru_proc_exit();
425} 583}
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index a383271d3912..120c70c5a28a 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -49,12 +49,16 @@ struct device *grudev = &gru_device;
49/* 49/*
50 * Select a gru fault map to be used by the current cpu. Note that 50 * Select a gru fault map to be used by the current cpu. Note that
51 * multiple cpus may be using the same map. 51 * multiple cpus may be using the same map.
52 * ZZZ should "shift" be used?? Depends on HT cpu numbering
53 * ZZZ should be inline but did not work on emulator 52 * ZZZ should be inline but did not work on emulator
54 */ 53 */
55int gru_cpu_fault_map_id(void) 54int gru_cpu_fault_map_id(void)
56{ 55{
57 return uv_blade_processor_id() % GRU_NUM_TFM; 56 int cpu = smp_processor_id();
57 int id, core;
58
59 core = uv_cpu_core_number(cpu);
60 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
61 return id;
58} 62}
59 63
60/*--------- ASID Management ------------------------------------------- 64/*--------- ASID Management -------------------------------------------
@@ -605,6 +609,7 @@ void gru_load_context(struct gru_thread_state *gts)
605 cch->unmap_enable = 1; 609 cch->unmap_enable = 1;
606 cch->tfm_done_bit_enable = 1; 610 cch->tfm_done_bit_enable = 1;
607 cch->cb_int_enable = 1; 611 cch->cb_int_enable = 1;
612 cch->tlb_int_select = 0; /* For now, ints go to cpu 0 */
608 } else { 613 } else {
609 cch->unmap_enable = 0; 614 cch->unmap_enable = 0;
610 cch->tfm_done_bit_enable = 0; 615 cch->tfm_done_bit_enable = 0;
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index f0c7308a36bb..d83e36715e6b 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -444,6 +444,7 @@ struct gru_state {
444 in use */ 444 in use */
445 struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using 445 struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using
446 the context */ 446 the context */
447 int gs_irq[GRU_NUM_TFM]; /* Interrupt irqs */
447}; 448};
448 449
449/* 450/*
@@ -610,6 +611,15 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
610 return !gts->ts_mm; 611 return !gts->ts_mm;
611} 612}
612 613
614/*
615 * The following are for Nehelem-EX. A more general scheme is needed for
616 * future processors.
617 */
618#define UV_MAX_INT_CORES 8
619#define uv_cpu_socket_number(p) ((cpu_physical_id(p) >> 5) & 1)
620#define uv_cpu_ht_number(p) (cpu_physical_id(p) & 1)
621#define uv_cpu_core_number(p) (((cpu_physical_id(p) >> 2) & 4) | \
622 ((cpu_physical_id(p) >> 1) & 3))
613/*----------------------------------------------------------------------------- 623/*-----------------------------------------------------------------------------
614 * Function prototypes & externs 624 * Function prototypes & externs
615 */ 625 */
@@ -633,9 +643,11 @@ extern void gts_drop(struct gru_thread_state *gts);
633extern void gru_tgh_flush_init(struct gru_state *gru); 643extern void gru_tgh_flush_init(struct gru_state *gru);
634extern int gru_kservices_init(void); 644extern int gru_kservices_init(void);
635extern void gru_kservices_exit(void); 645extern void gru_kservices_exit(void);
646extern irqreturn_t gru0_intr(int irq, void *dev_id);
647extern irqreturn_t gru1_intr(int irq, void *dev_id);
648extern irqreturn_t gru_intr_mblade(int irq, void *dev_id);
636extern int gru_dump_chiplet_request(unsigned long arg); 649extern int gru_dump_chiplet_request(unsigned long arg);
637extern long gru_get_gseg_statistics(unsigned long arg); 650extern long gru_get_gseg_statistics(unsigned long arg);
638extern irqreturn_t gru_intr(int irq, void *dev_id);
639extern int gru_handle_user_call_os(unsigned long address); 651extern int gru_handle_user_call_os(unsigned long address);
640extern int gru_user_flush_tlb(unsigned long arg); 652extern int gru_user_flush_tlb(unsigned long arg);
641extern int gru_user_unload_context(unsigned long arg); 653extern int gru_user_unload_context(unsigned long arg);