diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/arm/mm/fault-armv.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r-- | arch/arm/mm/fault-armv.c | 223 |
1 files changed, 223 insertions, 0 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c new file mode 100644 index 000000000000..01967ddeef53 --- /dev/null +++ b/arch/arm/mm/fault-armv.c | |||
@@ -0,0 +1,223 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/fault-armv.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Modifications for ARM processor (c) 1995-2002 Russell King | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/bitops.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/pagemap.h> | ||
19 | |||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/pgtable.h> | ||
22 | #include <asm/tlbflush.h> | ||
23 | |||
24 | static unsigned long shared_pte_mask = L_PTE_CACHEABLE; | ||
25 | |||
26 | /* | ||
27 | * We take the easy way out of this problem - we make the | ||
28 | * PTE uncacheable. However, we leave the write buffer on. | ||
29 | */ | ||
30 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | ||
31 | { | ||
32 | pgd_t *pgd; | ||
33 | pmd_t *pmd; | ||
34 | pte_t *pte, entry; | ||
35 | int ret = 0; | ||
36 | |||
37 | pgd = pgd_offset(vma->vm_mm, address); | ||
38 | if (pgd_none(*pgd)) | ||
39 | goto no_pgd; | ||
40 | if (pgd_bad(*pgd)) | ||
41 | goto bad_pgd; | ||
42 | |||
43 | pmd = pmd_offset(pgd, address); | ||
44 | if (pmd_none(*pmd)) | ||
45 | goto no_pmd; | ||
46 | if (pmd_bad(*pmd)) | ||
47 | goto bad_pmd; | ||
48 | |||
49 | pte = pte_offset_map(pmd, address); | ||
50 | entry = *pte; | ||
51 | |||
52 | /* | ||
53 | * If this page isn't present, or is already setup to | ||
54 | * fault (ie, is old), we can safely ignore any issues. | ||
55 | */ | ||
56 | if (pte_present(entry) && pte_val(entry) & shared_pte_mask) { | ||
57 | flush_cache_page(vma, address, pte_pfn(entry)); | ||
58 | pte_val(entry) &= ~shared_pte_mask; | ||
59 | set_pte(pte, entry); | ||
60 | flush_tlb_page(vma, address); | ||
61 | ret = 1; | ||
62 | } | ||
63 | pte_unmap(pte); | ||
64 | return ret; | ||
65 | |||
66 | bad_pgd: | ||
67 | pgd_ERROR(*pgd); | ||
68 | pgd_clear(pgd); | ||
69 | no_pgd: | ||
70 | return 0; | ||
71 | |||
72 | bad_pmd: | ||
73 | pmd_ERROR(*pmd); | ||
74 | pmd_clear(pmd); | ||
75 | no_pmd: | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static void | ||
80 | make_coherent(struct vm_area_struct *vma, unsigned long addr, struct page *page, int dirty) | ||
81 | { | ||
82 | struct address_space *mapping = page_mapping(page); | ||
83 | struct mm_struct *mm = vma->vm_mm; | ||
84 | struct vm_area_struct *mpnt; | ||
85 | struct prio_tree_iter iter; | ||
86 | unsigned long offset; | ||
87 | pgoff_t pgoff; | ||
88 | int aliases = 0; | ||
89 | |||
90 | if (!mapping) | ||
91 | return; | ||
92 | |||
93 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); | ||
94 | |||
95 | /* | ||
96 | * If we have any shared mappings that are in the same mm | ||
97 | * space, then we need to handle them specially to maintain | ||
98 | * cache coherency. | ||
99 | */ | ||
100 | flush_dcache_mmap_lock(mapping); | ||
101 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { | ||
102 | /* | ||
103 | * If this VMA is not in our MM, we can ignore it. | ||
104 | * Note that we intentionally mask out the VMA | ||
105 | * that we are fixing up. | ||
106 | */ | ||
107 | if (mpnt->vm_mm != mm || mpnt == vma) | ||
108 | continue; | ||
109 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | ||
110 | continue; | ||
111 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | ||
112 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset); | ||
113 | } | ||
114 | flush_dcache_mmap_unlock(mapping); | ||
115 | if (aliases) | ||
116 | adjust_pte(vma, addr); | ||
117 | else | ||
118 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Take care of architecture specific things when placing a new PTE into | ||
123 | * a page table, or changing an existing PTE. Basically, there are two | ||
124 | * things that we need to take care of: | ||
125 | * | ||
126 | * 1. If PG_dcache_dirty is set for the page, we need to ensure | ||
127 | * that any cache entries for the kernels virtual memory | ||
128 | * range are written back to the page. | ||
129 | * 2. If we have multiple shared mappings of the same space in | ||
130 | * an object, we need to deal with the cache aliasing issues. | ||
131 | * | ||
132 | * Note that the page_table_lock will be held. | ||
133 | */ | ||
134 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
135 | { | ||
136 | unsigned long pfn = pte_pfn(pte); | ||
137 | struct page *page; | ||
138 | |||
139 | if (!pfn_valid(pfn)) | ||
140 | return; | ||
141 | page = pfn_to_page(pfn); | ||
142 | if (page_mapping(page)) { | ||
143 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | ||
144 | |||
145 | if (dirty) { | ||
146 | /* | ||
147 | * This is our first userspace mapping of this page. | ||
148 | * Ensure that the physical page is coherent with | ||
149 | * the kernel mapping. | ||
150 | * | ||
151 | * FIXME: only need to do this on VIVT and aliasing | ||
152 | * VIPT cache architectures. We can do that | ||
153 | * by choosing whether to set this bit... | ||
154 | */ | ||
155 | __cpuc_flush_dcache_page(page_address(page)); | ||
156 | } | ||
157 | |||
158 | if (cache_is_vivt()) | ||
159 | make_coherent(vma, addr, page, dirty); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Check whether the write buffer has physical address aliasing | ||
165 | * issues. If it has, we need to avoid them for the case where | ||
166 | * we have several shared mappings of the same object in user | ||
167 | * space. | ||
168 | */ | ||
169 | static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) | ||
170 | { | ||
171 | register unsigned long zero = 0, one = 1, val; | ||
172 | |||
173 | local_irq_disable(); | ||
174 | mb(); | ||
175 | *p1 = one; | ||
176 | mb(); | ||
177 | *p2 = zero; | ||
178 | mb(); | ||
179 | val = *p1; | ||
180 | mb(); | ||
181 | local_irq_enable(); | ||
182 | return val != zero; | ||
183 | } | ||
184 | |||
185 | void __init check_writebuffer_bugs(void) | ||
186 | { | ||
187 | struct page *page; | ||
188 | const char *reason; | ||
189 | unsigned long v = 1; | ||
190 | |||
191 | printk(KERN_INFO "CPU: Testing write buffer coherency: "); | ||
192 | |||
193 | page = alloc_page(GFP_KERNEL); | ||
194 | if (page) { | ||
195 | unsigned long *p1, *p2; | ||
196 | pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| | ||
197 | L_PTE_DIRTY|L_PTE_WRITE| | ||
198 | L_PTE_BUFFERABLE); | ||
199 | |||
200 | p1 = vmap(&page, 1, VM_IOREMAP, prot); | ||
201 | p2 = vmap(&page, 1, VM_IOREMAP, prot); | ||
202 | |||
203 | if (p1 && p2) { | ||
204 | v = check_writebuffer(p1, p2); | ||
205 | reason = "enabling work-around"; | ||
206 | } else { | ||
207 | reason = "unable to map memory\n"; | ||
208 | } | ||
209 | |||
210 | vunmap(p1); | ||
211 | vunmap(p2); | ||
212 | put_page(page); | ||
213 | } else { | ||
214 | reason = "unable to grab page\n"; | ||
215 | } | ||
216 | |||
217 | if (v) { | ||
218 | printk("failed, %s\n", reason); | ||
219 | shared_pte_mask |= L_PTE_BUFFERABLE; | ||
220 | } else { | ||
221 | printk("ok\n"); | ||
222 | } | ||
223 | } | ||