aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-gru/grufile.c
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-12-15 19:48:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 10:20:16 -0500
commit4107e1d38a23028c2a3bc23dd948265dbe6becba (patch)
tree3c64087639dc327e42447f015f3c4b01c4a1de39 /drivers/misc/sgi-gru/grufile.c
parent67bf04a5c2574e9495f660f418f6df776821d578 (diff)
gru: update irq infrastructure
Update the GRU irq allocate/free functions to use the latest upstream infrastructure. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc/sgi-gru/grufile.c')
-rw-r--r--drivers/misc/sgi-gru/grufile.c258
1 files changed, 208 insertions, 50 deletions
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 0a6d2a5a01f3..22b8b2733a24 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -35,6 +35,9 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/uaccess.h> 37#include <linux/uaccess.h>
38#ifdef CONFIG_X86_64
39#include <asm/uv/uv_irq.h>
40#endif
38#include <asm/uv/uv.h> 41#include <asm/uv/uv.h>
39#include "gru.h" 42#include "gru.h"
40#include "grulib.h" 43#include "grulib.h"
@@ -130,7 +133,6 @@ static int gru_create_new_context(unsigned long arg)
130 struct gru_vma_data *vdata; 133 struct gru_vma_data *vdata;
131 int ret = -EINVAL; 134 int ret = -EINVAL;
132 135
133
134 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) 136 if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
135 return -EFAULT; 137 return -EFAULT;
136 138
@@ -302,34 +304,210 @@ fail:
302 return -ENOMEM; 304 return -ENOMEM;
303} 305}
304 306
305#ifdef CONFIG_IA64 307static void gru_free_tables(void)
308{
309 int bid;
310 int order = get_order(sizeof(struct gru_state) *
311 GRU_CHIPLETS_PER_BLADE);
306 312
307static int get_base_irq(void) 313 for (bid = 0; bid < GRU_MAX_BLADES; bid++)
314 free_pages((unsigned long)gru_base[bid], order);
315}
316
317static unsigned long gru_chiplet_cpu_to_mmr(int chiplet, int cpu, int *corep)
308{ 318{
309 return IRQ_GRU; 319 unsigned long mmr = 0;
320 int core;
321
322 /*
323 * We target the cores of a blade and not the hyperthreads themselves.
324 * There is a max of 8 cores per socket and 2 sockets per blade,
325 * making for a max total of 16 cores (i.e., 16 CPUs without
326 * hyperthreading and 32 CPUs with hyperthreading).
327 */
328 core = uv_cpu_core_number(cpu) + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
329 if (core >= GRU_NUM_TFM || uv_cpu_ht_number(cpu))
330 return 0;
331
332 if (chiplet == 0) {
333 mmr = UVH_GR0_TLB_INT0_CONFIG +
334 core * (UVH_GR0_TLB_INT1_CONFIG - UVH_GR0_TLB_INT0_CONFIG);
335 } else if (chiplet == 1) {
336 mmr = UVH_GR1_TLB_INT0_CONFIG +
337 core * (UVH_GR1_TLB_INT1_CONFIG - UVH_GR1_TLB_INT0_CONFIG);
338 } else {
339 BUG();
340 }
341
342 *corep = core;
343 return mmr;
310} 344}
311 345
312#elif defined CONFIG_X86_64 346#ifdef CONFIG_IA64
347
348static int gru_irq_count[GRU_CHIPLETS_PER_BLADE];
313 349
314static void noop(unsigned int irq) 350static void gru_noop(unsigned int irq)
315{ 351{
316} 352}
317 353
318static struct irq_chip gru_chip = { 354static struct irq_chip gru_chip[GRU_CHIPLETS_PER_BLADE] = {
319 .name = "gru", 355 [0 ... GRU_CHIPLETS_PER_BLADE - 1] {
320 .mask = noop, 356 .mask = gru_noop,
321 .unmask = noop, 357 .unmask = gru_noop,
322 .ack = noop, 358 .ack = gru_noop
359 }
323}; 360};
324 361
325static int get_base_irq(void) 362static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
363 irq_handler_t irq_handler, int cpu, int blade)
364{
365 unsigned long mmr;
366 int irq = IRQ_GRU + chiplet;
367 int ret, core;
368
369 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
370 if (mmr == 0)
371 return 0;
372
373 if (gru_irq_count[chiplet] == 0) {
374 gru_chip[chiplet].name = irq_name;
375 ret = set_irq_chip(irq, &gru_chip[chiplet]);
376 if (ret) {
377 printk(KERN_ERR "%s: set_irq_chip failed, errno=%d\n",
378 GRU_DRIVER_ID_STR, -ret);
379 return ret;
380 }
381
382 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
383 if (ret) {
384 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
385 GRU_DRIVER_ID_STR, -ret);
386 return ret;
387 }
388 }
389 gru_irq_count[chiplet]++;
390
391 return 0;
392}
393
394static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
395{
396 unsigned long mmr;
397 int core, irq = IRQ_GRU + chiplet;
398
399 if (gru_irq_count[chiplet] == 0)
400 return;
401
402 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
403 if (mmr == 0)
404 return;
405
406 if (--gru_irq_count[chiplet] == 0)
407 free_irq(irq, NULL);
408}
409
410#elif defined CONFIG_X86_64
411
412static int gru_chiplet_setup_tlb_irq(int chiplet, char *irq_name,
413 irq_handler_t irq_handler, int cpu, int blade)
414{
415 unsigned long mmr;
416 int irq, core;
417 int ret;
418
419 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
420 if (mmr == 0)
421 return 0;
422
423 irq = uv_setup_irq(irq_name, cpu, blade, mmr, UV_AFFINITY_CPU);
424 if (irq < 0) {
425 printk(KERN_ERR "%s: uv_setup_irq failed, errno=%d\n",
426 GRU_DRIVER_ID_STR, -irq);
427 return irq;
428 }
429
430 ret = request_irq(irq, irq_handler, 0, irq_name, NULL);
431 if (ret) {
432 uv_teardown_irq(irq);
433 printk(KERN_ERR "%s: request_irq failed, errno=%d\n",
434 GRU_DRIVER_ID_STR, -ret);
435 return ret;
436 }
437 gru_base[blade]->bs_grus[chiplet].gs_irq[core] = irq;
438 return 0;
439}
440
441static void gru_chiplet_teardown_tlb_irq(int chiplet, int cpu, int blade)
326{ 442{
327 set_irq_chip(IRQ_GRU, &gru_chip); 443 int irq, core;
328 set_irq_chip(IRQ_GRU + 1, &gru_chip); 444 unsigned long mmr;
329 return IRQ_GRU; 445
446 mmr = gru_chiplet_cpu_to_mmr(chiplet, cpu, &core);
447 if (mmr) {
448 irq = gru_base[blade]->bs_grus[chiplet].gs_irq[core];
449 if (irq) {
450 free_irq(irq, NULL);
451 uv_teardown_irq(irq);
452 }
453 }
330} 454}
455
331#endif 456#endif
332 457
458static void gru_teardown_tlb_irqs(void)
459{
460 int blade;
461 int cpu;
462
463 for_each_online_cpu(cpu) {
464 blade = uv_cpu_to_blade_id(cpu);
465 gru_chiplet_teardown_tlb_irq(0, cpu, blade);
466 gru_chiplet_teardown_tlb_irq(1, cpu, blade);
467 }
468 for_each_possible_blade(blade) {
469 if (uv_blade_nr_possible_cpus(blade))
470 continue;
471 gru_chiplet_teardown_tlb_irq(0, 0, blade);
472 gru_chiplet_teardown_tlb_irq(1, 0, blade);
473 }
474}
475
476static int gru_setup_tlb_irqs(void)
477{
478 int blade;
479 int cpu;
480 int ret;
481
482 for_each_online_cpu(cpu) {
483 blade = uv_cpu_to_blade_id(cpu);
484 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru0_intr, cpu, blade);
485 if (ret != 0)
486 goto exit1;
487
488 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru1_intr, cpu, blade);
489 if (ret != 0)
490 goto exit1;
491 }
492 for_each_possible_blade(blade) {
493 if (uv_blade_nr_possible_cpus(blade))
494 continue;
495 ret = gru_chiplet_setup_tlb_irq(0, "GRU0_TLB", gru_intr_mblade, 0, blade);
496 if (ret != 0)
497 goto exit1;
498
499 ret = gru_chiplet_setup_tlb_irq(1, "GRU1_TLB", gru_intr_mblade, 0, blade);
500 if (ret != 0)
501 goto exit1;
502 }
503
504 return 0;
505
506exit1:
507 gru_teardown_tlb_irqs();
508 return ret;
509}
510
333/* 511/*
334 * gru_init 512 * gru_init
335 * 513 *
@@ -337,8 +515,7 @@ static int get_base_irq(void)
337 */ 515 */
338static int __init gru_init(void) 516static int __init gru_init(void)
339{ 517{
340 int ret, irq, chip; 518 int ret;
341 char id[10];
342 519
343 if (!is_uv_system()) 520 if (!is_uv_system())
344 return 0; 521 return 0;
@@ -353,41 +530,29 @@ static int __init gru_init(void)
353 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; 530 gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE;
354 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", 531 printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n",
355 gru_start_paddr, gru_end_paddr); 532 gru_start_paddr, gru_end_paddr);
356 irq = get_base_irq();
357 for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) {
358 ret = request_irq(irq + chip, gru_intr, 0, id, NULL);
359 /* TODO: fix irq handling on x86. For now ignore failure because
360 * interrupts are not required & not yet fully supported */
361 if (ret) {
362 printk(KERN_WARNING
363 "!!!WARNING: GRU ignoring request failure!!!\n");
364 ret = 0;
365 }
366 if (ret) {
367 printk(KERN_ERR "%s: request_irq failed\n",
368 GRU_DRIVER_ID_STR);
369 goto exit1;
370 }
371 }
372
373 ret = misc_register(&gru_miscdev); 533 ret = misc_register(&gru_miscdev);
374 if (ret) { 534 if (ret) {
375 printk(KERN_ERR "%s: misc_register failed\n", 535 printk(KERN_ERR "%s: misc_register failed\n",
376 GRU_DRIVER_ID_STR); 536 GRU_DRIVER_ID_STR);
377 goto exit1; 537 goto exit0;
378 } 538 }
379 539
380 ret = gru_proc_init(); 540 ret = gru_proc_init();
381 if (ret) { 541 if (ret) {
382 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR); 542 printk(KERN_ERR "%s: proc init failed\n", GRU_DRIVER_ID_STR);
383 goto exit2; 543 goto exit1;
384 } 544 }
385 545
386 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr); 546 ret = gru_init_tables(gru_start_paddr, gru_start_vaddr);
387 if (ret) { 547 if (ret) {
388 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR); 548 printk(KERN_ERR "%s: init tables failed\n", GRU_DRIVER_ID_STR);
389 goto exit3; 549 goto exit2;
390 } 550 }
551
552 ret = gru_setup_tlb_irqs();
553 if (ret != 0)
554 goto exit3;
555
391 gru_kservices_init(); 556 gru_kservices_init();
392 557
393 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR, 558 printk(KERN_INFO "%s: v%s\n", GRU_DRIVER_ID_STR,
@@ -395,31 +560,24 @@ static int __init gru_init(void)
395 return 0; 560 return 0;
396 561
397exit3: 562exit3:
398 gru_proc_exit(); 563 gru_free_tables();
399exit2: 564exit2:
400 misc_deregister(&gru_miscdev); 565 gru_proc_exit();
401exit1: 566exit1:
402 for (--chip; chip >= 0; chip--) 567 misc_deregister(&gru_miscdev);
403 free_irq(irq + chip, NULL); 568exit0:
404 return ret; 569 return ret;
405 570
406} 571}
407 572
408static void __exit gru_exit(void) 573static void __exit gru_exit(void)
409{ 574{
410 int i, bid;
411 int order = get_order(sizeof(struct gru_state) *
412 GRU_CHIPLETS_PER_BLADE);
413
414 if (!is_uv_system()) 575 if (!is_uv_system())
415 return; 576 return;
416 577
417 for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) 578 gru_teardown_tlb_irqs();
418 free_irq(IRQ_GRU + i, NULL);
419 gru_kservices_exit(); 579 gru_kservices_exit();
420 for (bid = 0; bid < GRU_MAX_BLADES; bid++) 580 gru_free_tables();
421 free_pages((unsigned long)gru_base[bid], order);
422
423 misc_deregister(&gru_miscdev); 581 misc_deregister(&gru_miscdev);
424 gru_proc_exit(); 582 gru_proc_exit();
425} 583}