diff options
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r-- | arch/arm/mm/fault-armv.c | 100 |
1 files changed, 58 insertions, 42 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index d0d17b6a3703..0d414c28eb2c 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/pagemap.h> | 18 | #include <linux/pagemap.h> |
19 | #include <linux/gfp.h> | ||
19 | 20 | ||
20 | #include <asm/bugs.h> | 21 | #include <asm/bugs.h> |
21 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -23,6 +24,8 @@ | |||
23 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
24 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
25 | 26 | ||
27 | #include "mm.h" | ||
28 | |||
26 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | 29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; |
27 | 30 | ||
28 | /* | 31 | /* |
@@ -34,28 +37,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | |||
34 | * Therefore those configurations which might call adjust_pte (those | 37 | * Therefore those configurations which might call adjust_pte (those |
35 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | 38 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. |
36 | */ | 39 | */ |
37 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | 40 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
41 | unsigned long pfn, pte_t *ptep) | ||
38 | { | 42 | { |
39 | pgd_t *pgd; | 43 | pte_t entry = *ptep; |
40 | pmd_t *pmd; | ||
41 | pte_t *pte, entry; | ||
42 | int ret; | 44 | int ret; |
43 | 45 | ||
44 | pgd = pgd_offset(vma->vm_mm, address); | ||
45 | if (pgd_none(*pgd)) | ||
46 | goto no_pgd; | ||
47 | if (pgd_bad(*pgd)) | ||
48 | goto bad_pgd; | ||
49 | |||
50 | pmd = pmd_offset(pgd, address); | ||
51 | if (pmd_none(*pmd)) | ||
52 | goto no_pmd; | ||
53 | if (pmd_bad(*pmd)) | ||
54 | goto bad_pmd; | ||
55 | |||
56 | pte = pte_offset_map(pmd, address); | ||
57 | entry = *pte; | ||
58 | |||
59 | /* | 46 | /* |
60 | * If this page is present, it's actually being shared. | 47 | * If this page is present, it's actually being shared. |
61 | */ | 48 | */ |
@@ -66,33 +53,55 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | |||
66 | * fault (ie, is old), we can safely ignore any issues. | 53 | * fault (ie, is old), we can safely ignore any issues. |
67 | */ | 54 | */ |
68 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { | 55 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
69 | unsigned long pfn = pte_pfn(entry); | ||
70 | flush_cache_page(vma, address, pfn); | 56 | flush_cache_page(vma, address, pfn); |
71 | outer_flush_range((pfn << PAGE_SHIFT), | 57 | outer_flush_range((pfn << PAGE_SHIFT), |
72 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | 58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); |
73 | pte_val(entry) &= ~L_PTE_MT_MASK; | 59 | pte_val(entry) &= ~L_PTE_MT_MASK; |
74 | pte_val(entry) |= shared_pte_mask; | 60 | pte_val(entry) |= shared_pte_mask; |
75 | set_pte_at(vma->vm_mm, address, pte, entry); | 61 | set_pte_at(vma->vm_mm, address, ptep, entry); |
76 | flush_tlb_page(vma, address); | 62 | flush_tlb_page(vma, address); |
77 | } | 63 | } |
78 | pte_unmap(pte); | 64 | |
79 | return ret; | 65 | return ret; |
66 | } | ||
67 | |||
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | ||
69 | unsigned long pfn) | ||
70 | { | ||
71 | spinlock_t *ptl; | ||
72 | pgd_t *pgd; | ||
73 | pmd_t *pmd; | ||
74 | pte_t *pte; | ||
75 | int ret; | ||
80 | 76 | ||
81 | bad_pgd: | 77 | pgd = pgd_offset(vma->vm_mm, address); |
82 | pgd_ERROR(*pgd); | 78 | if (pgd_none_or_clear_bad(pgd)) |
83 | pgd_clear(pgd); | 79 | return 0; |
84 | no_pgd: | 80 | |
85 | return 0; | 81 | pmd = pmd_offset(pgd, address); |
86 | 82 | if (pmd_none_or_clear_bad(pmd)) | |
87 | bad_pmd: | 83 | return 0; |
88 | pmd_ERROR(*pmd); | 84 | |
89 | pmd_clear(pmd); | 85 | /* |
90 | no_pmd: | 86 | * This is called while another page table is mapped, so we |
91 | return 0; | 87 | * must use the nested version. This also means we need to |
88 | * open-code the spin-locking. | ||
89 | */ | ||
90 | ptl = pte_lockptr(vma->vm_mm, pmd); | ||
91 | pte = pte_offset_map_nested(pmd, address); | ||
92 | spin_lock(ptl); | ||
93 | |||
94 | ret = do_adjust_pte(vma, address, pfn, pte); | ||
95 | |||
96 | spin_unlock(ptl); | ||
97 | pte_unmap_nested(pte); | ||
98 | |||
99 | return ret; | ||
92 | } | 100 | } |
93 | 101 | ||
94 | static void | 102 | static void |
95 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | 103 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, |
104 | unsigned long addr, pte_t *ptep, unsigned long pfn) | ||
96 | { | 105 | { |
97 | struct mm_struct *mm = vma->vm_mm; | 106 | struct mm_struct *mm = vma->vm_mm; |
98 | struct vm_area_struct *mpnt; | 107 | struct vm_area_struct *mpnt; |
@@ -120,11 +129,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne | |||
120 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | 129 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
121 | continue; | 130 | continue; |
122 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 131 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
123 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset); | 132 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); |
124 | } | 133 | } |
125 | flush_dcache_mmap_unlock(mapping); | 134 | flush_dcache_mmap_unlock(mapping); |
126 | if (aliases) | 135 | if (aliases) |
127 | adjust_pte(vma, addr); | 136 | do_adjust_pte(vma, addr, pfn, ptep); |
128 | else | 137 | else |
129 | flush_cache_page(vma, addr, pfn); | 138 | flush_cache_page(vma, addr, pfn); |
130 | } | 139 | } |
@@ -142,16 +151,24 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne | |||
142 | * | 151 | * |
143 | * Note that the pte lock will be held. | 152 | * Note that the pte lock will be held. |
144 | */ | 153 | */ |
145 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | 154 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
155 | pte_t *ptep) | ||
146 | { | 156 | { |
147 | unsigned long pfn = pte_pfn(pte); | 157 | unsigned long pfn = pte_pfn(*ptep); |
148 | struct address_space *mapping; | 158 | struct address_space *mapping; |
149 | struct page *page; | 159 | struct page *page; |
150 | 160 | ||
151 | if (!pfn_valid(pfn)) | 161 | if (!pfn_valid(pfn)) |
152 | return; | 162 | return; |
153 | 163 | ||
164 | /* | ||
165 | * The zero page is never written to, so never has any dirty | ||
166 | * cache lines, and therefore never needs to be flushed. | ||
167 | */ | ||
154 | page = pfn_to_page(pfn); | 168 | page = pfn_to_page(pfn); |
169 | if (page == ZERO_PAGE(0)) | ||
170 | return; | ||
171 | |||
155 | mapping = page_mapping(page); | 172 | mapping = page_mapping(page); |
156 | #ifndef CONFIG_SMP | 173 | #ifndef CONFIG_SMP |
157 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 174 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
@@ -159,7 +176,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | |||
159 | #endif | 176 | #endif |
160 | if (mapping) { | 177 | if (mapping) { |
161 | if (cache_is_vivt()) | 178 | if (cache_is_vivt()) |
162 | make_coherent(mapping, vma, addr, pfn); | 179 | make_coherent(mapping, vma, addr, ptep, pfn); |
163 | else if (vma->vm_flags & VM_EXEC) | 180 | else if (vma->vm_flags & VM_EXEC) |
164 | __flush_icache_all(); | 181 | __flush_icache_all(); |
165 | } | 182 | } |
@@ -198,9 +215,8 @@ void __init check_writebuffer_bugs(void) | |||
198 | page = alloc_page(GFP_KERNEL); | 215 | page = alloc_page(GFP_KERNEL); |
199 | if (page) { | 216 | if (page) { |
200 | unsigned long *p1, *p2; | 217 | unsigned long *p1, *p2; |
201 | pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| | 218 | pgprot_t prot = __pgprot_modify(PAGE_KERNEL, |
202 | L_PTE_DIRTY|L_PTE_WRITE| | 219 | L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); |
203 | L_PTE_MT_BUFFERABLE); | ||
204 | 220 | ||
205 | p1 = vmap(&page, 1, VM_IOREMAP, prot); | 221 | p1 = vmap(&page, 1, VM_IOREMAP, prot); |
206 | p2 = vmap(&page, 1, VM_IOREMAP, prot); | 222 | p2 = vmap(&page, 1, VM_IOREMAP, prot); |