aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_32.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-14 14:44:39 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-15 23:53:30 -0500
commite41e811a79a4e328005be2744c3076ebde455088 (patch)
tree9f5bea9ce50284d05a3abbe6573b74185c14c65e /arch/powerpc/mm/tlb_32.c
parent1a37a3fd7f12d8f9f720cceec84e23152e116668 (diff)
powerpc/mm: Rename tlb_32.c and tlb_64.c to tlb_hash32.c and tlb_hash64.c
This renames the files to clarify the fact that they are used by the hash based family of CPUs (the 603 being an exception in that family but is still handled by that code). This paves the way for the new tlb_nohash.c coming via a subsequent commit. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_32.c')
-rw-r--r--arch/powerpc/mm/tlb_32.c190
1 files changed, 0 insertions, 190 deletions
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
deleted file mode 100644
index f9a47fee3927..000000000000
--- a/arch/powerpc/mm/tlb_32.c
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU uses a hash table to store virtual to
4 * physical translations, these routines flush entries from the
5 * hash table also.
6 * -- paulus
7 *
8 * Derived from arch/ppc/mm/init.c:
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
13 * Copyright (C) 1996 Paul Mackerras
14 *
15 * Derived from "arch/i386/mm/init.c"
16 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/init.h>
28#include <linux/highmem.h>
29#include <linux/pagemap.h>
30
31#include <asm/tlbflush.h>
32#include <asm/tlb.h>
33
34#include "mmu_decl.h"
35
36/*
37 * Called when unmapping pages to flush entries from the TLB/hash table.
38 */
39void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
40{
41 unsigned long ptephys;
42
43 if (Hash != 0) {
44 ptephys = __pa(ptep) & PAGE_MASK;
45 flush_hash_pages(mm->context.id, addr, ptephys, 1);
46 }
47}
48EXPORT_SYMBOL(flush_hash_entry);
49
50/*
51 * Called by ptep_set_access_flags, must flush on CPUs for which the
52 * DSI handler can't just "fixup" the TLB on a write fault
53 */
54void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
55{
56 if (Hash != 0)
57 return;
58 _tlbie(addr);
59}
60
61/*
62 * Called at the end of a mmu_gather operation to make sure the
63 * TLB flush is completely done.
64 */
65void tlb_flush(struct mmu_gather *tlb)
66{
67 if (Hash == 0) {
68 /*
69 * 603 needs to flush the whole TLB here since
70 * it doesn't use a hash table.
71 */
72 _tlbia();
73 }
74}
75
76/*
77 * TLB flushing:
78 *
79 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
80 * - flush_tlb_page(vma, vmaddr) flushes one page
81 * - flush_tlb_range(vma, start, end) flushes a range of pages
82 * - flush_tlb_kernel_range(start, end) flushes kernel pages
83 *
84 * since the hardware hash table functions as an extension of the
85 * tlb as far as the linux tables are concerned, flush it too.
86 * -- Cort
87 */
88
89/*
90 * 750 SMP is a Bad Idea because the 750 doesn't broadcast all
91 * the cache operations on the bus. Hence we need to use an IPI
92 * to get the other CPU(s) to invalidate their TLBs.
93 */
94#ifdef CONFIG_SMP_750
95#define FINISH_FLUSH smp_send_tlb_invalidate(0)
96#else
97#define FINISH_FLUSH do { } while (0)
98#endif
99
100static void flush_range(struct mm_struct *mm, unsigned long start,
101 unsigned long end)
102{
103 pmd_t *pmd;
104 unsigned long pmd_end;
105 int count;
106 unsigned int ctx = mm->context.id;
107
108 if (Hash == 0) {
109 _tlbia();
110 return;
111 }
112 start &= PAGE_MASK;
113 if (start >= end)
114 return;
115 end = (end - 1) | ~PAGE_MASK;
116 pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
117 for (;;) {
118 pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
119 if (pmd_end > end)
120 pmd_end = end;
121 if (!pmd_none(*pmd)) {
122 count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
123 flush_hash_pages(ctx, start, pmd_val(*pmd), count);
124 }
125 if (pmd_end == end)
126 break;
127 start = pmd_end + 1;
128 ++pmd;
129 }
130}
131
132/*
133 * Flush kernel TLB entries in the given range
134 */
135void flush_tlb_kernel_range(unsigned long start, unsigned long end)
136{
137 flush_range(&init_mm, start, end);
138 FINISH_FLUSH;
139}
140
141/*
142 * Flush all the (user) entries for the address space described by mm.
143 */
144void flush_tlb_mm(struct mm_struct *mm)
145{
146 struct vm_area_struct *mp;
147
148 if (Hash == 0) {
149 _tlbia();
150 return;
151 }
152
153 /*
154 * It is safe to go down the mm's list of vmas when called
155 * from dup_mmap, holding mmap_sem. It would also be safe from
156 * unmap_region or exit_mmap, but not from vmtruncate on SMP -
157 * but it seems dup_mmap is the only SMP case which gets here.
158 */
159 for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
160 flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
161 FINISH_FLUSH;
162}
163
164void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
165{
166 struct mm_struct *mm;
167 pmd_t *pmd;
168
169 if (Hash == 0) {
170 _tlbie(vmaddr);
171 return;
172 }
173 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
174 pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
175 if (!pmd_none(*pmd))
176 flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
177 FINISH_FLUSH;
178}
179
180/*
181 * For each address in the range, find the pte for the address
182 * and check _PAGE_HASHPTE bit; if it is set, find and destroy
183 * the corresponding HPTE.
184 */
185void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
186 unsigned long end)
187{
188 flush_range(vma->vm_mm, start, end);
189 FINISH_FLUSH;
190}