diff options
Diffstat (limited to 'drivers/misc/sgi-gru/grufile.c')
-rw-r--r-- | drivers/misc/sgi-gru/grufile.c | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index c67e4e8bd62c..3e6e42d2f01b 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -45,7 +45,9 @@ | |||
45 | #include <asm/uv/uv_mmrs.h> | 45 | #include <asm/uv/uv_mmrs.h> |
46 | 46 | ||
47 | struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; | 47 | struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; |
48 | unsigned long gru_start_paddr, gru_end_paddr __read_mostly; | 48 | unsigned long gru_start_paddr __read_mostly; |
49 | unsigned long gru_end_paddr __read_mostly; | ||
50 | unsigned int gru_max_gids __read_mostly; | ||
49 | struct gru_stats_s gru_stats; | 51 | struct gru_stats_s gru_stats; |
50 | 52 | ||
51 | /* Guaranteed user available resources on each node */ | 53 | /* Guaranteed user available resources on each node */ |
@@ -101,7 +103,7 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
101 | return -EPERM; | 103 | return -EPERM; |
102 | 104 | ||
103 | if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || | 105 | if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || |
104 | vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) | 106 | vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) |
105 | return -EINVAL; | 107 | return -EINVAL; |
106 | 108 | ||
107 | vma->vm_flags |= | 109 | vma->vm_flags |= |
@@ -273,8 +275,11 @@ static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, | |||
273 | gru->gs_blade_id = bid; | 275 | gru->gs_blade_id = bid; |
274 | gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; | 276 | gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; |
275 | gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; | 277 | gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; |
278 | gru->gs_asid_limit = MAX_ASID; | ||
276 | gru_tgh_flush_init(gru); | 279 | gru_tgh_flush_init(gru); |
277 | gru_dbg(grudev, "bid %d, nid %d, gru %x, vaddr %p (0x%lx)\n", | 280 | if (gru->gs_gid >= gru_max_gids) |
281 | gru_max_gids = gru->gs_gid + 1; | ||
282 | gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", | ||
278 | bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, | 283 | bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, |
279 | gru->gs_gru_base_paddr); | 284 | gru->gs_gru_base_paddr); |
280 | gru_kservices_init(gru); | 285 | gru_kservices_init(gru); |
@@ -295,7 +300,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | |||
295 | for_each_online_node(nid) { | 300 | for_each_online_node(nid) { |
296 | bid = uv_node_to_blade_id(nid); | 301 | bid = uv_node_to_blade_id(nid); |
297 | pnode = uv_node_to_pnode(nid); | 302 | pnode = uv_node_to_pnode(nid); |
298 | if (gru_base[bid]) | 303 | if (bid < 0 || gru_base[bid]) |
299 | continue; | 304 | continue; |
300 | page = alloc_pages_node(nid, GFP_KERNEL, order); | 305 | page = alloc_pages_node(nid, GFP_KERNEL, order); |
301 | if (!page) | 306 | if (!page) |
@@ -308,11 +313,11 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | |||
308 | dsrbytes = 0; | 313 | dsrbytes = 0; |
309 | cbrs = 0; | 314 | cbrs = 0; |
310 | for (gru = gru_base[bid]->bs_grus, chip = 0; | 315 | for (gru = gru_base[bid]->bs_grus, chip = 0; |
311 | chip < GRU_CHIPLETS_PER_BLADE; | 316 | chip < GRU_CHIPLETS_PER_BLADE; |
312 | chip++, gru++) { | 317 | chip++, gru++) { |
313 | paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); | 318 | paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); |
314 | vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); | 319 | vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); |
315 | gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); | 320 | gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); |
316 | n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; | 321 | n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; |
317 | cbrs = max(cbrs, n); | 322 | cbrs = max(cbrs, n); |
318 | n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; | 323 | n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; |
@@ -370,26 +375,26 @@ static int __init gru_init(void) | |||
370 | void *gru_start_vaddr; | 375 | void *gru_start_vaddr; |
371 | 376 | ||
372 | if (!is_uv_system()) | 377 | if (!is_uv_system()) |
373 | return 0; | 378 | return -ENODEV; |
374 | 379 | ||
375 | #if defined CONFIG_IA64 | 380 | #if defined CONFIG_IA64 |
376 | gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ | 381 | gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ |
377 | #else | 382 | #else |
378 | gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & | 383 | gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & |
379 | 0x7fffffffffffUL; | 384 | 0x7fffffffffffUL; |
380 | |||
381 | #endif | 385 | #endif |
382 | gru_start_vaddr = __va(gru_start_paddr); | 386 | gru_start_vaddr = __va(gru_start_paddr); |
383 | gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; | 387 | gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; |
384 | printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", | 388 | printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", |
385 | gru_start_paddr, gru_end_paddr); | 389 | gru_start_paddr, gru_end_paddr); |
386 | irq = get_base_irq(); | 390 | irq = get_base_irq(); |
387 | for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { | 391 | for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { |
388 | ret = request_irq(irq + chip, gru_intr, 0, id, NULL); | 392 | ret = request_irq(irq + chip, gru_intr, 0, id, NULL); |
389 | /* TODO: fix irq handling on x86. For now ignore failures because | 393 | /* TODO: fix irq handling on x86. For now ignore failure because |
390 | * interrupts are not required & not yet fully supported */ | 394 | * interrupts are not required & not yet fully supported */ |
391 | if (ret) { | 395 | if (ret) { |
392 | printk("!!!WARNING: GRU ignoring request failure!!!\n"); | 396 | printk(KERN_WARNING |
397 | "!!!WARNING: GRU ignoring request failure!!!\n"); | ||
393 | ret = 0; | 398 | ret = 0; |
394 | } | 399 | } |
395 | if (ret) { | 400 | if (ret) { |
@@ -435,7 +440,7 @@ exit1: | |||
435 | 440 | ||
436 | static void __exit gru_exit(void) | 441 | static void __exit gru_exit(void) |
437 | { | 442 | { |
438 | int i, bid; | 443 | int i, bid, gid; |
439 | int order = get_order(sizeof(struct gru_state) * | 444 | int order = get_order(sizeof(struct gru_state) * |
440 | GRU_CHIPLETS_PER_BLADE); | 445 | GRU_CHIPLETS_PER_BLADE); |
441 | 446 | ||
@@ -445,6 +450,9 @@ static void __exit gru_exit(void) | |||
445 | for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) | 450 | for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++) |
446 | free_irq(IRQ_GRU + i, NULL); | 451 | free_irq(IRQ_GRU + i, NULL); |
447 | 452 | ||
453 | foreach_gid(gid) | ||
454 | gru_kservices_exit(GID_TO_GRU(gid)); | ||
455 | |||
448 | for (bid = 0; bid < GRU_MAX_BLADES; bid++) | 456 | for (bid = 0; bid < GRU_MAX_BLADES; bid++) |
449 | free_pages((unsigned long)gru_base[bid], order); | 457 | free_pages((unsigned long)gru_base[bid], order); |
450 | 458 | ||
@@ -469,7 +477,11 @@ struct vm_operations_struct gru_vm_ops = { | |||
469 | .fault = gru_fault, | 477 | .fault = gru_fault, |
470 | }; | 478 | }; |
471 | 479 | ||
480 | #ifndef MODULE | ||
472 | fs_initcall(gru_init); | 481 | fs_initcall(gru_init); |
482 | #else | ||
483 | module_init(gru_init); | ||
484 | #endif | ||
473 | module_exit(gru_exit); | 485 | module_exit(gru_exit); |
474 | 486 | ||
475 | module_param(gru_options, ulong, 0644); | 487 | module_param(gru_options, ulong, 0644); |