aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/tlb.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/sparc64/mm/tlb.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/sparc64/mm/tlb.c')
-rw-r--r--arch/sparc64/mm/tlb.c151
1 files changed, 151 insertions, 0 deletions
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
new file mode 100644
index 000000000000..90ca99d0b89c
--- /dev/null
+++ b/arch/sparc64/mm/tlb.c
@@ -0,0 +1,151 @@
1/* arch/sparc64/mm/tlb.c
2 *
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/init.h>
8#include <linux/percpu.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11
12#include <asm/pgtable.h>
13#include <asm/pgalloc.h>
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
16#include <asm/mmu_context.h>
17#include <asm/tlb.h>
18
19/* Heavily inspired by the ppc64 code. */
20
21DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
22 { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
23
24void flush_tlb_pending(void)
25{
26 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
27
28 if (mp->tlb_nr) {
29 if (CTX_VALID(mp->mm->context)) {
30#ifdef CONFIG_SMP
31 smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
32 &mp->vaddrs[0]);
33#else
34 __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
35 mp->tlb_nr, &mp->vaddrs[0]);
36#endif
37 }
38 mp->tlb_nr = 0;
39 }
40}
41
42void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
43{
44 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
45 unsigned long nr;
46
47 vaddr &= PAGE_MASK;
48 if (pte_exec(orig))
49 vaddr |= 0x1UL;
50
51 if (pte_dirty(orig)) {
52 unsigned long paddr, pfn = pte_pfn(orig);
53 struct address_space *mapping;
54 struct page *page;
55
56 if (!pfn_valid(pfn))
57 goto no_cache_flush;
58
59 page = pfn_to_page(pfn);
60 if (PageReserved(page))
61 goto no_cache_flush;
62
63 /* A real file page? */
64 mapping = page_mapping(page);
65 if (!mapping)
66 goto no_cache_flush;
67
68 paddr = (unsigned long) page_address(page);
69 if ((paddr ^ vaddr) & (1 << 13))
70 flush_dcache_page_all(mm, page);
71 }
72
73no_cache_flush:
74
75 if (mp->tlb_frozen)
76 return;
77
78 nr = mp->tlb_nr;
79
80 if (unlikely(nr != 0 && mm != mp->mm)) {
81 flush_tlb_pending();
82 nr = 0;
83 }
84
85 if (nr == 0)
86 mp->mm = mm;
87
88 mp->vaddrs[nr] = vaddr;
89 mp->tlb_nr = ++nr;
90 if (nr >= TLB_BATCH_NR)
91 flush_tlb_pending();
92}
93
94void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
95{
96 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
97 unsigned long nr = mp->tlb_nr;
98 long s = start, e = end, vpte_base;
99
100 if (mp->tlb_frozen)
101 return;
102
103 /* If start is greater than end, that is a real problem. */
104 BUG_ON(start > end);
105
106 /* However, straddling the VA space hole is quite normal. */
107 s &= PMD_MASK;
108 e = (e + PMD_SIZE - 1) & PMD_MASK;
109
110 vpte_base = (tlb_type == spitfire ?
111 VPTE_BASE_SPITFIRE :
112 VPTE_BASE_CHEETAH);
113
114 if (unlikely(nr != 0 && mm != mp->mm)) {
115 flush_tlb_pending();
116 nr = 0;
117 }
118
119 if (nr == 0)
120 mp->mm = mm;
121
122 start = vpte_base + (s >> (PAGE_SHIFT - 3));
123 end = vpte_base + (e >> (PAGE_SHIFT - 3));
124
125 /* If the request straddles the VA space hole, we
126 * need to swap start and end. The reason this
127 * occurs is that "vpte_base" is the center of
128 * the linear page table mapping area. Thus,
129 * high addresses with the sign bit set map to
130 * addresses below vpte_base and non-sign bit
131 * addresses map to addresses above vpte_base.
132 */
133 if (end < start) {
134 unsigned long tmp = start;
135
136 start = end;
137 end = tmp;
138 }
139
140 while (start < end) {
141 mp->vaddrs[nr] = start;
142 mp->tlb_nr = ++nr;
143 if (nr >= TLB_BATCH_NR) {
144 flush_tlb_pending();
145 nr = 0;
146 }
147 start += PAGE_SIZE;
148 }
149 if (nr)
150 flush_tlb_pending();
151}