diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/arm/mm/fault-armv.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r-- | arch/arm/mm/fault-armv.c | 49 |
1 files changed, 39 insertions, 10 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906dec1ca1..7cab79179421 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -26,8 +26,9 @@ | |||
26 | 26 | ||
27 | #include "mm.h" | 27 | #include "mm.h" |
28 | 28 | ||
29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | 29 | static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; |
30 | 30 | ||
31 | #if __LINUX_ARM_ARCH__ < 6 | ||
31 | /* | 32 | /* |
32 | * We take the easy way out of this problem - we make the | 33 | * We take the easy way out of this problem - we make the |
33 | * PTE uncacheable. However, we leave the write buffer on. | 34 | * PTE uncacheable. However, we leave the write buffer on. |
@@ -65,11 +66,36 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
65 | return ret; | 66 | return ret; |
66 | } | 67 | } |
67 | 68 | ||
69 | #if USE_SPLIT_PTLOCKS | ||
70 | /* | ||
71 | * If we are using split PTE locks, then we need to take the page | ||
72 | * lock here. Otherwise we are using shared mm->page_table_lock | ||
73 | * which is already locked, thus cannot take it. | ||
74 | */ | ||
75 | static inline void do_pte_lock(spinlock_t *ptl) | ||
76 | { | ||
77 | /* | ||
78 | * Use nested version here to indicate that we are already | ||
79 | * holding one similar spinlock. | ||
80 | */ | ||
81 | spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); | ||
82 | } | ||
83 | |||
84 | static inline void do_pte_unlock(spinlock_t *ptl) | ||
85 | { | ||
86 | spin_unlock(ptl); | ||
87 | } | ||
88 | #else /* !USE_SPLIT_PTLOCKS */ | ||
89 | static inline void do_pte_lock(spinlock_t *ptl) {} | ||
90 | static inline void do_pte_unlock(spinlock_t *ptl) {} | ||
91 | #endif /* USE_SPLIT_PTLOCKS */ | ||
92 | |||
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | 93 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
69 | unsigned long pfn) | 94 | unsigned long pfn) |
70 | { | 95 | { |
71 | spinlock_t *ptl; | 96 | spinlock_t *ptl; |
72 | pgd_t *pgd; | 97 | pgd_t *pgd; |
98 | pud_t *pud; | ||
73 | pmd_t *pmd; | 99 | pmd_t *pmd; |
74 | pte_t *pte; | 100 | pte_t *pte; |
75 | int ret; | 101 | int ret; |
@@ -78,7 +104,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
78 | if (pgd_none_or_clear_bad(pgd)) | 104 | if (pgd_none_or_clear_bad(pgd)) |
79 | return 0; | 105 | return 0; |
80 | 106 | ||
81 | pmd = pmd_offset(pgd, address); | 107 | pud = pud_offset(pgd, address); |
108 | if (pud_none_or_clear_bad(pud)) | ||
109 | return 0; | ||
110 | |||
111 | pmd = pmd_offset(pud, address); | ||
82 | if (pmd_none_or_clear_bad(pmd)) | 112 | if (pmd_none_or_clear_bad(pmd)) |
83 | return 0; | 113 | return 0; |
84 | 114 | ||
@@ -88,13 +118,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
88 | * open-code the spin-locking. | 118 | * open-code the spin-locking. |
89 | */ | 119 | */ |
90 | ptl = pte_lockptr(vma->vm_mm, pmd); | 120 | ptl = pte_lockptr(vma->vm_mm, pmd); |
91 | pte = pte_offset_map_nested(pmd, address); | 121 | pte = pte_offset_map(pmd, address); |
92 | spin_lock(ptl); | 122 | do_pte_lock(ptl); |
93 | 123 | ||
94 | ret = do_adjust_pte(vma, address, pfn, pte); | 124 | ret = do_adjust_pte(vma, address, pfn, pte); |
95 | 125 | ||
96 | spin_unlock(ptl); | 126 | do_pte_unlock(ptl); |
97 | pte_unmap_nested(pte); | 127 | pte_unmap(pte); |
98 | 128 | ||
99 | return ret; | 129 | return ret; |
100 | } | 130 | } |
@@ -141,7 +171,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | |||
141 | * a page table, or changing an existing PTE. Basically, there are two | 171 | * a page table, or changing an existing PTE. Basically, there are two |
142 | * things that we need to take care of: | 172 | * things that we need to take care of: |
143 | * | 173 | * |
144 | * 1. If PG_dcache_dirty is set for the page, we need to ensure | 174 | * 1. If PG_dcache_clean is not set for the page, we need to ensure |
145 | * that any cache entries for the kernels virtual memory | 175 | * that any cache entries for the kernels virtual memory |
146 | * range are written back to the page. | 176 | * range are written back to the page. |
147 | * 2. If we have multiple shared mappings of the same space in | 177 | * 2. If we have multiple shared mappings of the same space in |
@@ -168,10 +198,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | |||
168 | return; | 198 | return; |
169 | 199 | ||
170 | mapping = page_mapping(page); | 200 | mapping = page_mapping(page); |
171 | #ifndef CONFIG_SMP | 201 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
172 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
173 | __flush_dcache_page(mapping, page); | 202 | __flush_dcache_page(mapping, page); |
174 | #endif | ||
175 | if (mapping) { | 203 | if (mapping) { |
176 | if (cache_is_vivt()) | 204 | if (cache_is_vivt()) |
177 | make_coherent(mapping, vma, addr, ptep, pfn); | 205 | make_coherent(mapping, vma, addr, ptep, pfn); |
@@ -179,6 +207,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | |||
179 | __flush_icache_all(); | 207 | __flush_icache_all(); |
180 | } | 208 | } |
181 | } | 209 | } |
210 | #endif /* __LINUX_ARM_ARCH__ < 6 */ | ||
182 | 211 | ||
183 | /* | 212 | /* |
184 | * Check whether the write buffer has physical address aliasing | 213 | * Check whether the write buffer has physical address aliasing |