diff options
author | Jack Steiner <steiner@sgi.com> | 2009-04-02 19:59:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-02 22:05:05 -0400 |
commit | fe5bb6b00c3a9374841d651e01694fe4190a677e (patch) | |
tree | 8580ba3dac70d236261557a458df535cdc0e2acd /drivers | |
parent | 66666e50fcd69d80117d7d243ce02e1f774cbaf5 (diff) |
sgi-gru: misc GRU cleanup
Misc trivial GRU drivers fixes:
- fix long lines
- eliminate extra whitespace
- eliminate compiler warning
- better validation of invalidate user parameters
- bug fix for GRU TLB flush (not the cpu TLB flush)
These changes are all internal to the SGI GRU driver and have no effect
on the base kernel.
Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/misc/sgi-gru/gru_instructions.h | 22 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grufault.c | 19 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grufile.c | 25 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/gruhandles.h | 2 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grukservices.c | 4 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grumain.c | 6 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grutables.h | 15 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grutlbpurge.c | 5 |
8 files changed, 55 insertions, 43 deletions
diff --git a/drivers/misc/sgi-gru/gru_instructions.h b/drivers/misc/sgi-gru/gru_instructions.h index 48762e7b98be..3fde33c1e8f3 100644 --- a/drivers/misc/sgi-gru/gru_instructions.h +++ b/drivers/misc/sgi-gru/gru_instructions.h | |||
@@ -19,8 +19,11 @@ | |||
19 | #ifndef __GRU_INSTRUCTIONS_H__ | 19 | #ifndef __GRU_INSTRUCTIONS_H__ |
20 | #define __GRU_INSTRUCTIONS_H__ | 20 | #define __GRU_INSTRUCTIONS_H__ |
21 | 21 | ||
22 | #define gru_flush_cache_hook(p) | 22 | extern int gru_check_status_proc(void *cb); |
23 | #define gru_emulator_wait_hook(p, w) | 23 | extern int gru_wait_proc(void *cb); |
24 | extern void gru_wait_abort_proc(void *cb); | ||
25 | |||
26 | |||
24 | 27 | ||
25 | /* | 28 | /* |
26 | * Architecture dependent functions | 29 | * Architecture dependent functions |
@@ -29,16 +32,16 @@ | |||
29 | #if defined(CONFIG_IA64) | 32 | #if defined(CONFIG_IA64) |
30 | #include <linux/compiler.h> | 33 | #include <linux/compiler.h> |
31 | #include <asm/intrinsics.h> | 34 | #include <asm/intrinsics.h> |
32 | #define __flush_cache(p) ia64_fc(p) | 35 | #define __flush_cache(p) ia64_fc((unsigned long)p) |
33 | /* Use volatile on IA64 to ensure ordering via st4.rel */ | 36 | /* Use volatile on IA64 to ensure ordering via st4.rel */ |
34 | #define gru_ordered_store_int(p,v) \ | 37 | #define gru_ordered_store_int(p, v) \ |
35 | do { \ | 38 | do { \ |
36 | barrier(); \ | 39 | barrier(); \ |
37 | *((volatile int *)(p)) = v; /* force st.rel */ \ | 40 | *((volatile int *)(p)) = v; /* force st.rel */ \ |
38 | } while (0) | 41 | } while (0) |
39 | #elif defined(CONFIG_X86_64) | 42 | #elif defined(CONFIG_X86_64) |
40 | #define __flush_cache(p) clflush(p) | 43 | #define __flush_cache(p) clflush(p) |
41 | #define gru_ordered_store_int(p,v) \ | 44 | #define gru_ordered_store_int(p, v) \ |
42 | do { \ | 45 | do { \ |
43 | barrier(); \ | 46 | barrier(); \ |
44 | *(int *)p = v; \ | 47 | *(int *)p = v; \ |
@@ -558,20 +561,19 @@ extern int gru_get_cb_exception_detail(void *cb, | |||
558 | 561 | ||
559 | #define GRU_EXC_STR_SIZE 256 | 562 | #define GRU_EXC_STR_SIZE 256 |
560 | 563 | ||
561 | extern int gru_check_status_proc(void *cb); | ||
562 | extern int gru_wait_proc(void *cb); | ||
563 | extern void gru_wait_abort_proc(void *cb); | ||
564 | 564 | ||
565 | /* | 565 | /* |
566 | * Control block definition for checking status | 566 | * Control block definition for checking status |
567 | */ | 567 | */ |
568 | struct gru_control_block_status { | 568 | struct gru_control_block_status { |
569 | unsigned int icmd :1; | 569 | unsigned int icmd :1; |
570 | unsigned int unused1 :31; | 570 | unsigned int ima :3; |
571 | unsigned int reserved0 :4; | ||
572 | unsigned int unused1 :24; | ||
571 | unsigned int unused2 :24; | 573 | unsigned int unused2 :24; |
572 | unsigned int istatus :2; | 574 | unsigned int istatus :2; |
573 | unsigned int isubstatus :4; | 575 | unsigned int isubstatus :4; |
574 | unsigned int inused3 :2; | 576 | unsigned int unused3 :2; |
575 | }; | 577 | }; |
576 | 578 | ||
577 | /* Get CB status */ | 579 | /* Get CB status */ |
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 3ee698ad8599..f85d27306789 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
@@ -368,6 +368,7 @@ failupm: | |||
368 | 368 | ||
369 | failfmm: | 369 | failfmm: |
370 | /* FMM state on UPM call */ | 370 | /* FMM state on UPM call */ |
371 | gru_flush_cache(tfh); | ||
371 | STAT(tlb_dropin_fail_fmm); | 372 | STAT(tlb_dropin_fail_fmm); |
372 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); | 373 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); |
373 | return 0; | 374 | return 0; |
@@ -497,10 +498,8 @@ int gru_handle_user_call_os(unsigned long cb) | |||
497 | if (!gts) | 498 | if (!gts) |
498 | return -EINVAL; | 499 | return -EINVAL; |
499 | 500 | ||
500 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { | 501 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) |
501 | ret = -EINVAL; | ||
502 | goto exit; | 502 | goto exit; |
503 | } | ||
504 | 503 | ||
505 | /* | 504 | /* |
506 | * If force_unload is set, the UPM TLB fault is phony. The task | 505 | * If force_unload is set, the UPM TLB fault is phony. The task |
@@ -508,6 +507,10 @@ int gru_handle_user_call_os(unsigned long cb) | |||
508 | * unload the context. The task will page fault and assign a new | 507 | * unload the context. The task will page fault and assign a new |
509 | * context. | 508 | * context. |
510 | */ | 509 | */ |
510 | if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 && | ||
511 | gts->ts_blade != uv_numa_blade_id()) | ||
512 | gts->ts_force_unload = 1; | ||
513 | |||
511 | ret = -EAGAIN; | 514 | ret = -EAGAIN; |
512 | cbrnum = thread_cbr_number(gts, ucbnum); | 515 | cbrnum = thread_cbr_number(gts, ucbnum); |
513 | if (gts->ts_force_unload) { | 516 | if (gts->ts_force_unload) { |
@@ -541,11 +544,13 @@ int gru_get_exception_detail(unsigned long arg) | |||
541 | if (!gts) | 544 | if (!gts) |
542 | return -EINVAL; | 545 | return -EINVAL; |
543 | 546 | ||
544 | if (gts->ts_gru) { | 547 | ucbnum = get_cb_number((void *)excdet.cb); |
545 | ucbnum = get_cb_number((void *)excdet.cb); | 548 | if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) { |
549 | ret = -EINVAL; | ||
550 | } else if (gts->ts_gru) { | ||
546 | cbrnum = thread_cbr_number(gts, ucbnum); | 551 | cbrnum = thread_cbr_number(gts, ucbnum); |
547 | cbe = get_cbe_by_index(gts->ts_gru, cbrnum); | 552 | cbe = get_cbe_by_index(gts->ts_gru, cbrnum); |
548 | prefetchw(cbe); /* Harmless on hardware, required for emulator */ | 553 | prefetchw(cbe);/* Harmless on hardware, required for emulator */ |
549 | excdet.opc = cbe->opccpy; | 554 | excdet.opc = cbe->opccpy; |
550 | excdet.exopc = cbe->exopccpy; | 555 | excdet.exopc = cbe->exopccpy; |
551 | excdet.ecause = cbe->ecause; | 556 | excdet.ecause = cbe->ecause; |
@@ -609,7 +614,7 @@ int gru_user_flush_tlb(unsigned long arg) | |||
609 | if (!gts) | 614 | if (!gts) |
610 | return -EINVAL; | 615 | return -EINVAL; |
611 | 616 | ||
612 | gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.vaddr + req.len); | 617 | gru_flush_tlb_range(gts->ts_gms, req.vaddr, req.len); |
613 | gru_unlock_gts(gts); | 618 | gru_unlock_gts(gts); |
614 | 619 | ||
615 | return 0; | 620 | return 0; |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index c67e4e8bd62c..15292e5f74a1 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
@@ -45,7 +45,8 @@ | |||
45 | #include <asm/uv/uv_mmrs.h> | 45 | #include <asm/uv/uv_mmrs.h> |
46 | 46 | ||
47 | struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; | 47 | struct gru_blade_state *gru_base[GRU_MAX_BLADES] __read_mostly; |
48 | unsigned long gru_start_paddr, gru_end_paddr __read_mostly; | 48 | unsigned long gru_start_paddr __read_mostly; |
49 | unsigned long gru_end_paddr __read_mostly; | ||
49 | struct gru_stats_s gru_stats; | 50 | struct gru_stats_s gru_stats; |
50 | 51 | ||
51 | /* Guaranteed user available resources on each node */ | 52 | /* Guaranteed user available resources on each node */ |
@@ -101,7 +102,7 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma) | |||
101 | return -EPERM; | 102 | return -EPERM; |
102 | 103 | ||
103 | if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || | 104 | if (vma->vm_start & (GRU_GSEG_PAGESIZE - 1) || |
104 | vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) | 105 | vma->vm_end & (GRU_GSEG_PAGESIZE - 1)) |
105 | return -EINVAL; | 106 | return -EINVAL; |
106 | 107 | ||
107 | vma->vm_flags |= | 108 | vma->vm_flags |= |
@@ -295,7 +296,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | |||
295 | for_each_online_node(nid) { | 296 | for_each_online_node(nid) { |
296 | bid = uv_node_to_blade_id(nid); | 297 | bid = uv_node_to_blade_id(nid); |
297 | pnode = uv_node_to_pnode(nid); | 298 | pnode = uv_node_to_pnode(nid); |
298 | if (gru_base[bid]) | 299 | if (bid < 0 || gru_base[bid]) |
299 | continue; | 300 | continue; |
300 | page = alloc_pages_node(nid, GFP_KERNEL, order); | 301 | page = alloc_pages_node(nid, GFP_KERNEL, order); |
301 | if (!page) | 302 | if (!page) |
@@ -308,11 +309,11 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr) | |||
308 | dsrbytes = 0; | 309 | dsrbytes = 0; |
309 | cbrs = 0; | 310 | cbrs = 0; |
310 | for (gru = gru_base[bid]->bs_grus, chip = 0; | 311 | for (gru = gru_base[bid]->bs_grus, chip = 0; |
311 | chip < GRU_CHIPLETS_PER_BLADE; | 312 | chip < GRU_CHIPLETS_PER_BLADE; |
312 | chip++, gru++) { | 313 | chip++, gru++) { |
313 | paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); | 314 | paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); |
314 | vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); | 315 | vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); |
315 | gru_init_chiplet(gru, paddr, vaddr, bid, nid, chip); | 316 | gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); |
316 | n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; | 317 | n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; |
317 | cbrs = max(cbrs, n); | 318 | cbrs = max(cbrs, n); |
318 | n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; | 319 | n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; |
@@ -370,26 +371,26 @@ static int __init gru_init(void) | |||
370 | void *gru_start_vaddr; | 371 | void *gru_start_vaddr; |
371 | 372 | ||
372 | if (!is_uv_system()) | 373 | if (!is_uv_system()) |
373 | return 0; | 374 | return -ENODEV; |
374 | 375 | ||
375 | #if defined CONFIG_IA64 | 376 | #if defined CONFIG_IA64 |
376 | gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ | 377 | gru_start_paddr = 0xd000000000UL; /* ZZZZZZZZZZZZZZZZZZZ fixme */ |
377 | #else | 378 | #else |
378 | gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & | 379 | gru_start_paddr = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR) & |
379 | 0x7fffffffffffUL; | 380 | 0x7fffffffffffUL; |
380 | |||
381 | #endif | 381 | #endif |
382 | gru_start_vaddr = __va(gru_start_paddr); | 382 | gru_start_vaddr = __va(gru_start_paddr); |
383 | gru_end_paddr = gru_start_paddr + MAX_NUMNODES * GRU_SIZE; | 383 | gru_end_paddr = gru_start_paddr + GRU_MAX_BLADES * GRU_SIZE; |
384 | printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", | 384 | printk(KERN_INFO "GRU space: 0x%lx - 0x%lx\n", |
385 | gru_start_paddr, gru_end_paddr); | 385 | gru_start_paddr, gru_end_paddr); |
386 | irq = get_base_irq(); | 386 | irq = get_base_irq(); |
387 | for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { | 387 | for (chip = 0; chip < GRU_CHIPLETS_PER_BLADE; chip++) { |
388 | ret = request_irq(irq + chip, gru_intr, 0, id, NULL); | 388 | ret = request_irq(irq + chip, gru_intr, 0, id, NULL); |
389 | /* TODO: fix irq handling on x86. For now ignore failures because | 389 | /* TODO: fix irq handling on x86. For now ignore failure because |
390 | * interrupts are not required & not yet fully supported */ | 390 | * interrupts are not required & not yet fully supported */ |
391 | if (ret) { | 391 | if (ret) { |
392 | printk("!!!WARNING: GRU ignoring request failure!!!\n"); | 392 | printk(KERN_WARNING |
393 | "!!!WARNING: GRU ignoring request failure!!!\n"); | ||
393 | ret = 0; | 394 | ret = 0; |
394 | } | 395 | } |
395 | if (ret) { | 396 | if (ret) { |
@@ -469,7 +470,11 @@ struct vm_operations_struct gru_vm_ops = { | |||
469 | .fault = gru_fault, | 470 | .fault = gru_fault, |
470 | }; | 471 | }; |
471 | 472 | ||
473 | #ifndef MODULE | ||
472 | fs_initcall(gru_init); | 474 | fs_initcall(gru_init); |
475 | #else | ||
476 | module_init(gru_init); | ||
477 | #endif | ||
473 | module_exit(gru_exit); | 478 | module_exit(gru_exit); |
474 | 479 | ||
475 | module_param(gru_options, ulong, 0644); | 480 | module_param(gru_options, ulong, 0644); |
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h index b63018d60fe1..fb72a52a34a8 100644 --- a/drivers/misc/sgi-gru/gruhandles.h +++ b/drivers/misc/sgi-gru/gruhandles.h | |||
@@ -489,7 +489,7 @@ enum gru_cbr_state { | |||
489 | * 64m 26 8 | 489 | * 64m 26 8 |
490 | * ... | 490 | * ... |
491 | */ | 491 | */ |
492 | #define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2: (sh)) >> 1) - 6) | 492 | #define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6) |
493 | #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) | 493 | #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) |
494 | 494 | ||
495 | /* minimum TLB purge count to ensure a full purge */ | 495 | /* minimum TLB purge count to ensure a full purge */ |
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 880c55dfb662..3e36b7b6e1c8 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c | |||
@@ -122,7 +122,7 @@ int gru_get_cb_exception_detail(void *cb, | |||
122 | struct gru_control_block_extended *cbe; | 122 | struct gru_control_block_extended *cbe; |
123 | 123 | ||
124 | cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); | 124 | cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); |
125 | prefetchw(cbe); /* Harmless on hardware, required for emulator */ | 125 | prefetchw(cbe); /* Harmless on hardware, required for emulator */ |
126 | excdet->opc = cbe->opccpy; | 126 | excdet->opc = cbe->opccpy; |
127 | excdet->exopc = cbe->exopccpy; | 127 | excdet->exopc = cbe->exopccpy; |
128 | excdet->ecause = cbe->ecause; | 128 | excdet->ecause = cbe->ecause; |
@@ -437,7 +437,7 @@ static int send_message_failure(void *cb, | |||
437 | break; | 437 | break; |
438 | case CBSS_PUT_NACKED: | 438 | case CBSS_PUT_NACKED: |
439 | STAT(mesq_send_put_nacked); | 439 | STAT(mesq_send_put_nacked); |
440 | m =mq + (gru_get_amo_value_head(cb) << 6); | 440 | m = mq + (gru_get_amo_value_head(cb) << 6); |
441 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); | 441 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); |
442 | if (gru_wait(cb) == CBS_IDLE) | 442 | if (gru_wait(cb) == CBS_IDLE) |
443 | ret = MQE_OK; | 443 | ret = MQE_OK; |
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 3d2fc216bae5..1ce32bcd0259 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c | |||
@@ -432,8 +432,8 @@ static inline long gru_copy_handle(void *d, void *s) | |||
432 | return GRU_HANDLE_BYTES; | 432 | return GRU_HANDLE_BYTES; |
433 | } | 433 | } |
434 | 434 | ||
435 | static void gru_prefetch_context(void *gseg, void *cb, void *cbe, unsigned long cbrmap, | 435 | static void gru_prefetch_context(void *gseg, void *cb, void *cbe, |
436 | unsigned long length) | 436 | unsigned long cbrmap, unsigned long length) |
437 | { | 437 | { |
438 | int i, scr; | 438 | int i, scr; |
439 | 439 | ||
@@ -773,8 +773,8 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
773 | return VM_FAULT_SIGBUS; | 773 | return VM_FAULT_SIGBUS; |
774 | 774 | ||
775 | again: | 775 | again: |
776 | preempt_disable(); | ||
777 | mutex_lock(>s->ts_ctxlock); | 776 | mutex_lock(>s->ts_ctxlock); |
777 | preempt_disable(); | ||
778 | if (gts->ts_gru) { | 778 | if (gts->ts_gru) { |
779 | if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { | 779 | if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { |
780 | STAT(migrated_nopfn_unload); | 780 | STAT(migrated_nopfn_unload); |
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index a78f70deeb59..db3fe08bf79e 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h | |||
@@ -278,13 +278,12 @@ struct gru_stats_s { | |||
278 | /* Generate a GRU asid value from a GRU base asid & a virtual address. */ | 278 | /* Generate a GRU asid value from a GRU base asid & a virtual address. */ |
279 | #if defined CONFIG_IA64 | 279 | #if defined CONFIG_IA64 |
280 | #define VADDR_HI_BIT 64 | 280 | #define VADDR_HI_BIT 64 |
281 | #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) | ||
282 | #elif defined CONFIG_X86_64 | 281 | #elif defined CONFIG_X86_64 |
283 | #define VADDR_HI_BIT 48 | 282 | #define VADDR_HI_BIT 48 |
284 | #define GRUREGION(addr) (0) /* ZZZ could do better */ | ||
285 | #else | 283 | #else |
286 | #error "Unsupported architecture" | 284 | #error "Unsupported architecture" |
287 | #endif | 285 | #endif |
286 | #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) | ||
288 | #define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) | 287 | #define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) |
289 | 288 | ||
290 | /*------------------------------------------------------------------------------ | 289 | /*------------------------------------------------------------------------------ |
@@ -297,12 +296,12 @@ struct gru_state; | |||
297 | * This structure is pointed to from the mmstruct via the notifier pointer. | 296 | * This structure is pointed to from the mmstruct via the notifier pointer. |
298 | * There is one of these per address space. | 297 | * There is one of these per address space. |
299 | */ | 298 | */ |
300 | struct gru_mm_tracker { | 299 | struct gru_mm_tracker { /* pack to reduce size */ |
301 | unsigned int mt_asid_gen; /* ASID wrap count */ | 300 | unsigned int mt_asid_gen:24; /* ASID wrap count */ |
302 | int mt_asid; /* current base ASID for gru */ | 301 | unsigned int mt_asid:24; /* current base ASID for gru */ |
303 | unsigned short mt_ctxbitmap; /* bitmap of contexts using | 302 | unsigned short mt_ctxbitmap:16;/* bitmap of contexts using |
304 | asid */ | 303 | asid */ |
305 | }; | 304 | } __attribute__ ((packed)); |
306 | 305 | ||
307 | struct gru_mm_struct { | 306 | struct gru_mm_struct { |
308 | struct mmu_notifier ms_notifier; | 307 | struct mmu_notifier ms_notifier; |
@@ -359,6 +358,8 @@ struct gru_thread_state { | |||
359 | required for contest */ | 358 | required for contest */ |
360 | unsigned char ts_cbr_au_count;/* Number of CBR resources | 359 | unsigned char ts_cbr_au_count;/* Number of CBR resources |
361 | required for contest */ | 360 | required for contest */ |
361 | char ts_blade; /* If >= 0, migrate context if | ||
362 | ref from diferent blade */ | ||
362 | char ts_force_unload;/* force context to be unloaded | 363 | char ts_force_unload;/* force context to be unloaded |
363 | after migration */ | 364 | after migration */ |
364 | char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each | 365 | char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each |
diff --git a/drivers/misc/sgi-gru/grutlbpurge.c b/drivers/misc/sgi-gru/grutlbpurge.c index c84496a77691..4e438e4dd2a8 100644 --- a/drivers/misc/sgi-gru/grutlbpurge.c +++ b/drivers/misc/sgi-gru/grutlbpurge.c | |||
@@ -187,7 +187,7 @@ void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, | |||
187 | " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", | 187 | " FLUSH gruid %d, asid 0x%x, num %ld, cbmap 0x%x\n", |
188 | gid, asid, num, asids->mt_ctxbitmap); | 188 | gid, asid, num, asids->mt_ctxbitmap); |
189 | tgh = get_lock_tgh_handle(gru); | 189 | tgh = get_lock_tgh_handle(gru); |
190 | tgh_invalidate(tgh, start, 0, asid, grupagesize, 0, | 190 | tgh_invalidate(tgh, start, ~0, asid, grupagesize, 0, |
191 | num - 1, asids->mt_ctxbitmap); | 191 | num - 1, asids->mt_ctxbitmap); |
192 | get_unlock_tgh_handle(tgh); | 192 | get_unlock_tgh_handle(tgh); |
193 | } else { | 193 | } else { |
@@ -212,9 +212,8 @@ void gru_flush_all_tlb(struct gru_state *gru) | |||
212 | 212 | ||
213 | gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid); | 213 | gru_dbg(grudev, "gru %p, gid %d\n", gru, gru->gs_gid); |
214 | tgh = get_lock_tgh_handle(gru); | 214 | tgh = get_lock_tgh_handle(gru); |
215 | tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0); | 215 | tgh_invalidate(tgh, 0, ~0, 0, 1, 1, GRUMAXINVAL - 1, 0xffff); |
216 | get_unlock_tgh_handle(tgh); | 216 | get_unlock_tgh_handle(tgh); |
217 | preempt_enable(); | ||
218 | } | 217 | } |
219 | 218 | ||
220 | /* | 219 | /* |