diff options
Diffstat (limited to 'arch/sh/mm/tlb-flush.c')
-rw-r--r-- | arch/sh/mm/tlb-flush.c | 101 |
1 files changed, 80 insertions, 21 deletions
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c index 73ec7f6084fa..d2f7b4a2eb05 100644 --- a/arch/sh/mm/tlb-flush.c +++ b/arch/sh/mm/tlb-flush.c | |||
@@ -2,24 +2,28 @@ | |||
2 | * TLB flushing operations for SH with an MMU. | 2 | * TLB flushing operations for SH with an MMU. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 4 | * Copyright (C) 1999 Niibe Yutaka |
5 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 - 2006 Paul Mundt |
6 | * | 6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
9 | * for more details. | 9 | * for more details. |
10 | */ | 10 | */ |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/io.h> | ||
12 | #include <asm/mmu_context.h> | 13 | #include <asm/mmu_context.h> |
13 | #include <asm/tlbflush.h> | 14 | #include <asm/tlbflush.h> |
15 | #include <asm/cacheflush.h> | ||
14 | 16 | ||
15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 17 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
16 | { | 18 | { |
17 | if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { | 19 | unsigned int cpu = smp_processor_id(); |
20 | |||
21 | if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { | ||
18 | unsigned long flags; | 22 | unsigned long flags; |
19 | unsigned long asid; | 23 | unsigned long asid; |
20 | unsigned long saved_asid = MMU_NO_ASID; | 24 | unsigned long saved_asid = MMU_NO_ASID; |
21 | 25 | ||
22 | asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; | 26 | asid = cpu_asid(cpu, vma->vm_mm); |
23 | page &= PAGE_MASK; | 27 | page &= PAGE_MASK; |
24 | 28 | ||
25 | local_irq_save(flags); | 29 | local_irq_save(flags); |
@@ -27,33 +31,34 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
27 | saved_asid = get_asid(); | 31 | saved_asid = get_asid(); |
28 | set_asid(asid); | 32 | set_asid(asid); |
29 | } | 33 | } |
30 | __flush_tlb_page(asid, page); | 34 | local_flush_tlb_one(asid, page); |
31 | if (saved_asid != MMU_NO_ASID) | 35 | if (saved_asid != MMU_NO_ASID) |
32 | set_asid(saved_asid); | 36 | set_asid(saved_asid); |
33 | local_irq_restore(flags); | 37 | local_irq_restore(flags); |
34 | } | 38 | } |
35 | } | 39 | } |
36 | 40 | ||
37 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 41 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
38 | unsigned long end) | 42 | unsigned long end) |
39 | { | 43 | { |
40 | struct mm_struct *mm = vma->vm_mm; | 44 | struct mm_struct *mm = vma->vm_mm; |
45 | unsigned int cpu = smp_processor_id(); | ||
41 | 46 | ||
42 | if (mm->context.id != NO_CONTEXT) { | 47 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
43 | unsigned long flags; | 48 | unsigned long flags; |
44 | int size; | 49 | int size; |
45 | 50 | ||
46 | local_irq_save(flags); | 51 | local_irq_save(flags); |
47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 52 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 53 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
49 | mm->context.id = NO_CONTEXT; | 54 | cpu_context(cpu, mm) = NO_CONTEXT; |
50 | if (mm == current->mm) | 55 | if (mm == current->mm) |
51 | activate_context(mm); | 56 | activate_context(mm, cpu); |
52 | } else { | 57 | } else { |
53 | unsigned long asid; | 58 | unsigned long asid; |
54 | unsigned long saved_asid = MMU_NO_ASID; | 59 | unsigned long saved_asid = MMU_NO_ASID; |
55 | 60 | ||
56 | asid = mm->context.id & MMU_CONTEXT_ASID_MASK; | 61 | asid = cpu_asid(cpu, mm); |
57 | start &= PAGE_MASK; | 62 | start &= PAGE_MASK; |
58 | end += (PAGE_SIZE - 1); | 63 | end += (PAGE_SIZE - 1); |
59 | end &= PAGE_MASK; | 64 | end &= PAGE_MASK; |
@@ -62,7 +67,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
62 | set_asid(asid); | 67 | set_asid(asid); |
63 | } | 68 | } |
64 | while (start < end) { | 69 | while (start < end) { |
65 | __flush_tlb_page(asid, start); | 70 | local_flush_tlb_one(asid, start); |
66 | start += PAGE_SIZE; | 71 | start += PAGE_SIZE; |
67 | } | 72 | } |
68 | if (saved_asid != MMU_NO_ASID) | 73 | if (saved_asid != MMU_NO_ASID) |
@@ -72,26 +77,27 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
72 | } | 77 | } |
73 | } | 78 | } |
74 | 79 | ||
75 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 80 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) |
76 | { | 81 | { |
82 | unsigned int cpu = smp_processor_id(); | ||
77 | unsigned long flags; | 83 | unsigned long flags; |
78 | int size; | 84 | int size; |
79 | 85 | ||
80 | local_irq_save(flags); | 86 | local_irq_save(flags); |
81 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 87 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
82 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 88 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
83 | flush_tlb_all(); | 89 | local_flush_tlb_all(); |
84 | } else { | 90 | } else { |
85 | unsigned long asid; | 91 | unsigned long asid; |
86 | unsigned long saved_asid = get_asid(); | 92 | unsigned long saved_asid = get_asid(); |
87 | 93 | ||
88 | asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; | 94 | asid = cpu_asid(cpu, &init_mm); |
89 | start &= PAGE_MASK; | 95 | start &= PAGE_MASK; |
90 | end += (PAGE_SIZE - 1); | 96 | end += (PAGE_SIZE - 1); |
91 | end &= PAGE_MASK; | 97 | end &= PAGE_MASK; |
92 | set_asid(asid); | 98 | set_asid(asid); |
93 | while (start < end) { | 99 | while (start < end) { |
94 | __flush_tlb_page(asid, start); | 100 | local_flush_tlb_one(asid, start); |
95 | start += PAGE_SIZE; | 101 | start += PAGE_SIZE; |
96 | } | 102 | } |
97 | set_asid(saved_asid); | 103 | set_asid(saved_asid); |
@@ -99,22 +105,24 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
99 | local_irq_restore(flags); | 105 | local_irq_restore(flags); |
100 | } | 106 | } |
101 | 107 | ||
102 | void flush_tlb_mm(struct mm_struct *mm) | 108 | void local_flush_tlb_mm(struct mm_struct *mm) |
103 | { | 109 | { |
110 | unsigned int cpu = smp_processor_id(); | ||
111 | |||
104 | /* Invalidate all TLB of this process. */ | 112 | /* Invalidate all TLB of this process. */ |
105 | /* Instead of invalidating each TLB, we get new MMU context. */ | 113 | /* Instead of invalidating each TLB, we get new MMU context. */ |
106 | if (mm->context.id != NO_CONTEXT) { | 114 | if (cpu_context(cpu, mm) != NO_CONTEXT) { |
107 | unsigned long flags; | 115 | unsigned long flags; |
108 | 116 | ||
109 | local_irq_save(flags); | 117 | local_irq_save(flags); |
110 | mm->context.id = NO_CONTEXT; | 118 | cpu_context(cpu, mm) = NO_CONTEXT; |
111 | if (mm == current->mm) | 119 | if (mm == current->mm) |
112 | activate_context(mm); | 120 | activate_context(mm, cpu); |
113 | local_irq_restore(flags); | 121 | local_irq_restore(flags); |
114 | } | 122 | } |
115 | } | 123 | } |
116 | 124 | ||
117 | void flush_tlb_all(void) | 125 | void local_flush_tlb_all(void) |
118 | { | 126 | { |
119 | unsigned long flags, status; | 127 | unsigned long flags, status; |
120 | 128 | ||
@@ -132,3 +140,54 @@ void flush_tlb_all(void) | |||
132 | ctrl_barrier(); | 140 | ctrl_barrier(); |
133 | local_irq_restore(flags); | 141 | local_irq_restore(flags); |
134 | } | 142 | } |
143 | |||
144 | void update_mmu_cache(struct vm_area_struct *vma, | ||
145 | unsigned long address, pte_t pte) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | unsigned long pteval; | ||
149 | unsigned long vpn; | ||
150 | struct page *page; | ||
151 | unsigned long pfn = pte_pfn(pte); | ||
152 | struct address_space *mapping; | ||
153 | |||
154 | if (!pfn_valid(pfn)) | ||
155 | return; | ||
156 | |||
157 | page = pfn_to_page(pfn); | ||
158 | mapping = page_mapping(page); | ||
159 | if (mapping) { | ||
160 | unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; | ||
161 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | ||
162 | |||
163 | if (dirty) | ||
164 | __flush_wback_region((void *)P1SEGADDR(phys), | ||
165 | PAGE_SIZE); | ||
166 | } | ||
167 | |||
168 | local_irq_save(flags); | ||
169 | |||
170 | /* Set PTEH register */ | ||
171 | vpn = (address & MMU_VPN_MASK) | get_asid(); | ||
172 | ctrl_outl(vpn, MMU_PTEH); | ||
173 | |||
174 | pteval = pte_val(pte); | ||
175 | |||
176 | #ifdef CONFIG_CPU_HAS_PTEA | ||
177 | /* Set PTEA register */ | ||
178 | /* TODO: make this look less hacky */ | ||
179 | ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA); | ||
180 | #endif | ||
181 | |||
182 | /* Set PTEL register */ | ||
183 | pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ | ||
184 | #if defined(CONFIG_SH_WRITETHROUGH) && defined(CONFIG_CPU_SH4) | ||
185 | pteval |= _PAGE_WT; | ||
186 | #endif | ||
187 | /* conveniently, we want all the software flags to be 0 anyway */ | ||
188 | ctrl_outl(pteval, MMU_PTEL); | ||
189 | |||
190 | /* Load the TLB */ | ||
191 | asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); | ||
192 | local_irq_restore(flags); | ||
193 | } | ||