diff options
Diffstat (limited to 'arch/sh/mm/tlb-flush.c')
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index fd7e42bcaa40..73ec7f6084fa 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c | |||
@@ -14,12 +14,12 @@ | |||
14 | 14 | ||
15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
16 | { | 16 | { |
17 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { | 17 | if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { |
18 | unsigned long flags; | 18 | unsigned long flags; |
19 | unsigned long asid; | 19 | unsigned long asid; |
20 | unsigned long saved_asid = MMU_NO_ASID; | 20 | unsigned long saved_asid = MMU_NO_ASID; |
21 | 21 | ||
22 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; | 22 | asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; |
23 | page &= PAGE_MASK; | 23 | page &= PAGE_MASK; |
24 | 24 | ||
25 | local_irq_save(flags); | 25 | local_irq_save(flags); |
@@ -39,20 +39,21 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
39 | { | 39 | { |
40 | struct mm_struct *mm = vma->vm_mm; | 40 | struct mm_struct *mm = vma->vm_mm; |
41 | 41 | ||
42 | if (mm->context != NO_CONTEXT) { | 42 | if (mm->context.id != NO_CONTEXT) { |
43 | unsigned long flags; | 43 | unsigned long flags; |
44 | int size; | 44 | int size; |
45 | 45 | ||
46 | local_irq_save(flags); | 46 | local_irq_save(flags); |
47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
49 | mm->context = NO_CONTEXT; | 49 | mm->context.id = NO_CONTEXT; |
50 | if (mm == current->mm) | 50 | if (mm == current->mm) |
51 | activate_context(mm); | 51 | activate_context(mm); |
52 | } else { | 52 | } else { |
53 | unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; | 53 | unsigned long asid; |
54 | unsigned long saved_asid = MMU_NO_ASID; | 54 | unsigned long saved_asid = MMU_NO_ASID; |
55 | 55 | ||
56 | asid = mm->context.id & MMU_CONTEXT_ASID_MASK; | ||
56 | start &= PAGE_MASK; | 57 | start &= PAGE_MASK; |
57 | end += (PAGE_SIZE - 1); | 58 | end += (PAGE_SIZE - 1); |
58 | end &= PAGE_MASK; | 59 | end &= PAGE_MASK; |
@@ -81,9 +82,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
81 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 82 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
82 | flush_tlb_all(); | 83 | flush_tlb_all(); |
83 | } else { | 84 | } else { |
84 | unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; | 85 | unsigned long asid; |
85 | unsigned long saved_asid = get_asid(); | 86 | unsigned long saved_asid = get_asid(); |
86 | 87 | ||
88 | asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; | ||
87 | start &= PAGE_MASK; | 89 | start &= PAGE_MASK; |
88 | end += (PAGE_SIZE - 1); | 90 | end += (PAGE_SIZE - 1); |
89 | end &= PAGE_MASK; | 91 | end &= PAGE_MASK; |
@@ -101,11 +103,11 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
101 | { | 103 | { |
102 | /* Invalidate all TLB of this process. */ | 104 | /* Invalidate all TLB of this process. */ |
103 | /* Instead of invalidating each TLB, we get new MMU context. */ | 105 | /* Instead of invalidating each TLB, we get new MMU context. */ |
104 | if (mm->context != NO_CONTEXT) { | 106 | if (mm->context.id != NO_CONTEXT) { |
105 | unsigned long flags; | 107 | unsigned long flags; |
106 | 108 | ||
107 | local_irq_save(flags); | 109 | local_irq_save(flags); |
108 | mm->context = NO_CONTEXT; | 110 | mm->context.id = NO_CONTEXT; |
109 | if (mm == current->mm) | 111 | if (mm == current->mm) |
110 | activate_context(mm); | 112 | activate_context(mm); |
111 | local_irq_restore(flags); | 113 | local_irq_restore(flags); |