diff options
Diffstat (limited to 'arch/powerpc/mm/tlb_hash64.c')
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 211 |
1 files changed, 211 insertions, 0 deletions
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c new file mode 100644 index 000000000000..c931bc7d1079 --- /dev/null +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * This file contains the routines for flushing entries from the | ||
3 | * TLB and MMU hash table. | ||
4 | * | ||
5 | * Derived from arch/ppc64/mm/init.c: | ||
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
7 | * | ||
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | ||
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | ||
10 | * Copyright (C) 1996 Paul Mackerras | ||
11 | * | ||
12 | * Derived from "arch/i386/mm/init.c" | ||
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | ||
14 | * | ||
15 | * Dave Engebretsen <engebret@us.ibm.com> | ||
16 | * Rework for PPC64 port. | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License | ||
20 | * as published by the Free Software Foundation; either version | ||
21 | * 2 of the License, or (at your option) any later version. | ||
22 | */ | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/percpu.h> | ||
28 | #include <linux/hardirq.h> | ||
29 | #include <asm/pgalloc.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/tlb.h> | ||
32 | #include <asm/bug.h> | ||
33 | |||
34 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | ||
35 | |||
36 | /* This is declared as we are using the more or less generic | ||
37 | * arch/powerpc/include/asm/tlb.h file -- tgall | ||
38 | */ | ||
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
40 | |||
41 | /* | ||
42 | * A linux PTE was changed and the corresponding hash table entry | ||
43 | * neesd to be flushed. This function will either perform the flush | ||
44 | * immediately or will batch it up if the current CPU has an active | ||
45 | * batch on it. | ||
46 | * | ||
47 | * Must be called from within some kind of spinlock/non-preempt region... | ||
48 | */ | ||
49 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | ||
50 | pte_t *ptep, unsigned long pte, int huge) | ||
51 | { | ||
52 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | ||
53 | unsigned long vsid, vaddr; | ||
54 | unsigned int psize; | ||
55 | int ssize; | ||
56 | real_pte_t rpte; | ||
57 | int i; | ||
58 | |||
59 | i = batch->index; | ||
60 | |||
61 | /* We mask the address for the base page size. Huge pages will | ||
62 | * have applied their own masking already | ||
63 | */ | ||
64 | addr &= PAGE_MASK; | ||
65 | |||
66 | /* Get page size (maybe move back to caller). | ||
67 | * | ||
68 | * NOTE: when using special 64K mappings in 4K environment like | ||
69 | * for SPEs, we obtain the page size from the slice, which thus | ||
70 | * must still exist (and thus the VMA not reused) at the time | ||
71 | * of this call | ||
72 | */ | ||
73 | if (huge) { | ||
74 | #ifdef CONFIG_HUGETLB_PAGE | ||
75 | psize = get_slice_psize(mm, addr);; | ||
76 | #else | ||
77 | BUG(); | ||
78 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ | ||
79 | #endif | ||
80 | } else | ||
81 | psize = pte_pagesize_index(mm, addr, pte); | ||
82 | |||
83 | /* Build full vaddr */ | ||
84 | if (!is_kernel_addr(addr)) { | ||
85 | ssize = user_segment_size(addr); | ||
86 | vsid = get_vsid(mm->context.id, addr, ssize); | ||
87 | WARN_ON(vsid == 0); | ||
88 | } else { | ||
89 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | ||
90 | ssize = mmu_kernel_ssize; | ||
91 | } | ||
92 | vaddr = hpt_va(addr, vsid, ssize); | ||
93 | rpte = __real_pte(__pte(pte), ptep); | ||
94 | |||
95 | /* | ||
96 | * Check if we have an active batch on this CPU. If not, just | ||
97 | * flush now and return. For now, we don global invalidates | ||
98 | * in that case, might be worth testing the mm cpu mask though | ||
99 | * and decide to use local invalidates instead... | ||
100 | */ | ||
101 | if (!batch->active) { | ||
102 | flush_hash_page(vaddr, rpte, psize, ssize, 0); | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * This can happen when we are in the middle of a TLB batch and | ||
108 | * we encounter memory pressure (eg copy_page_range when it tries | ||
109 | * to allocate a new pte). If we have to reclaim memory and end | ||
110 | * up scanning and resetting referenced bits then our batch context | ||
111 | * will change mid stream. | ||
112 | * | ||
113 | * We also need to ensure only one page size is present in a given | ||
114 | * batch | ||
115 | */ | ||
116 | if (i != 0 && (mm != batch->mm || batch->psize != psize || | ||
117 | batch->ssize != ssize)) { | ||
118 | __flush_tlb_pending(batch); | ||
119 | i = 0; | ||
120 | } | ||
121 | if (i == 0) { | ||
122 | batch->mm = mm; | ||
123 | batch->psize = psize; | ||
124 | batch->ssize = ssize; | ||
125 | } | ||
126 | batch->pte[i] = rpte; | ||
127 | batch->vaddr[i] = vaddr; | ||
128 | batch->index = ++i; | ||
129 | if (i >= PPC64_TLB_BATCH_NR) | ||
130 | __flush_tlb_pending(batch); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * This function is called when terminating an mmu batch or when a batch | ||
135 | * is full. It will perform the flush of all the entries currently stored | ||
136 | * in a batch. | ||
137 | * | ||
138 | * Must be called from within some kind of spinlock/non-preempt region... | ||
139 | */ | ||
140 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | ||
141 | { | ||
142 | cpumask_t tmp; | ||
143 | int i, local = 0; | ||
144 | |||
145 | i = batch->index; | ||
146 | tmp = cpumask_of_cpu(smp_processor_id()); | ||
147 | if (cpus_equal(batch->mm->cpu_vm_mask, tmp)) | ||
148 | local = 1; | ||
149 | if (i == 1) | ||
150 | flush_hash_page(batch->vaddr[0], batch->pte[0], | ||
151 | batch->psize, batch->ssize, local); | ||
152 | else | ||
153 | flush_hash_range(i, local); | ||
154 | batch->index = 0; | ||
155 | } | ||
156 | |||
157 | /** | ||
158 | * __flush_hash_table_range - Flush all HPTEs for a given address range | ||
159 | * from the hash table (and the TLB). But keeps | ||
160 | * the linux PTEs intact. | ||
161 | * | ||
162 | * @mm : mm_struct of the target address space (generally init_mm) | ||
163 | * @start : starting address | ||
164 | * @end : ending address (not included in the flush) | ||
165 | * | ||
166 | * This function is mostly to be used by some IO hotplug code in order | ||
167 | * to remove all hash entries from a given address range used to map IO | ||
168 | * space on a removed PCI-PCI bidge without tearing down the full mapping | ||
169 | * since 64K pages may overlap with other bridges when using 64K pages | ||
170 | * with 4K HW pages on IO space. | ||
171 | * | ||
172 | * Because of that usage pattern, it's only available with CONFIG_HOTPLUG | ||
173 | * and is implemented for small size rather than speed. | ||
174 | */ | ||
175 | #ifdef CONFIG_HOTPLUG | ||
176 | |||
177 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | ||
178 | unsigned long end) | ||
179 | { | ||
180 | unsigned long flags; | ||
181 | |||
182 | start = _ALIGN_DOWN(start, PAGE_SIZE); | ||
183 | end = _ALIGN_UP(end, PAGE_SIZE); | ||
184 | |||
185 | BUG_ON(!mm->pgd); | ||
186 | |||
187 | /* Note: Normally, we should only ever use a batch within a | ||
188 | * PTE locked section. This violates the rule, but will work | ||
189 | * since we don't actually modify the PTEs, we just flush the | ||
190 | * hash while leaving the PTEs intact (including their reference | ||
191 | * to being hashed). This is not the most performance oriented | ||
192 | * way to do things but is fine for our needs here. | ||
193 | */ | ||
194 | local_irq_save(flags); | ||
195 | arch_enter_lazy_mmu_mode(); | ||
196 | for (; start < end; start += PAGE_SIZE) { | ||
197 | pte_t *ptep = find_linux_pte(mm->pgd, start); | ||
198 | unsigned long pte; | ||
199 | |||
200 | if (ptep == NULL) | ||
201 | continue; | ||
202 | pte = pte_val(*ptep); | ||
203 | if (!(pte & _PAGE_HASHPTE)) | ||
204 | continue; | ||
205 | hpte_need_flush(mm, start, ptep, pte, 0); | ||
206 | } | ||
207 | arch_leave_lazy_mmu_mode(); | ||
208 | local_irq_restore(flags); | ||
209 | } | ||
210 | |||
211 | #endif /* CONFIG_HOTPLUG */ | ||