diff options
-rw-r--r-- | include/asm-generic/pgtable.h | 214 | ||||
-rw-r--r-- | mm/Makefile | 2 | ||||
-rw-r--r-- | mm/pgtable-generic.c | 123 |
3 files changed, 278 insertions, 61 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 0ab2cd27c60f..f1eddf71dd0c 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -5,67 +5,108 @@ | |||
5 | #ifdef CONFIG_MMU | 5 | #ifdef CONFIG_MMU |
6 | 6 | ||
7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
8 | /* | 8 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
9 | * Largely same as above, but only sets the access flags (dirty, | 9 | unsigned long address, pte_t *ptep, |
10 | * accessed, and writable). Furthermore, we know it always gets set | 10 | pte_t entry, int dirty); |
11 | * to a "more permissive" setting, which allows most architectures | 11 | #endif |
12 | * to optimize this. We return whether the PTE actually changed, which | 12 | |
13 | * in turn instructs the caller to do things like update__mmu_cache. | 13 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS |
14 | * This used to be done in the caller, but sparc needs minor faults to | 14 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, |
15 | * force that call on sun4c so we changed this macro slightly | 15 | unsigned long address, pmd_t *pmdp, |
16 | */ | 16 | pmd_t entry, int dirty); |
17 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
18 | ({ \ | ||
19 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
20 | if (__changed) { \ | ||
21 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | ||
22 | flush_tlb_page(__vma, __address); \ | ||
23 | } \ | ||
24 | __changed; \ | ||
25 | }) | ||
26 | #endif | 17 | #endif |
27 | 18 | ||
28 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 19 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
29 | #define ptep_test_and_clear_young(__vma, __address, __ptep) \ | 20 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
30 | ({ \ | 21 | unsigned long address, |
31 | pte_t __pte = *(__ptep); \ | 22 | pte_t *ptep) |
32 | int r = 1; \ | 23 | { |
33 | if (!pte_young(__pte)) \ | 24 | pte_t pte = *ptep; |
34 | r = 0; \ | 25 | int r = 1; |
35 | else \ | 26 | if (!pte_young(pte)) |
36 | set_pte_at((__vma)->vm_mm, (__address), \ | 27 | r = 0; |
37 | (__ptep), pte_mkold(__pte)); \ | 28 | else |
38 | r; \ | 29 | set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); |
39 | }) | 30 | return r; |
31 | } | ||
32 | #endif | ||
33 | |||
34 | #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | ||
35 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
36 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | ||
37 | unsigned long address, | ||
38 | pmd_t *pmdp) | ||
39 | { | ||
40 | pmd_t pmd = *pmdp; | ||
41 | int r = 1; | ||
42 | if (!pmd_young(pmd)) | ||
43 | r = 0; | ||
44 | else | ||
45 | set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); | ||
46 | return r; | ||
47 | } | ||
48 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
49 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | ||
50 | unsigned long address, | ||
51 | pmd_t *pmdp) | ||
52 | { | ||
53 | BUG(); | ||
54 | return 0; | ||
55 | } | ||
56 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
40 | #endif | 57 | #endif |
41 | 58 | ||
42 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 59 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
43 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | 60 | int ptep_clear_flush_young(struct vm_area_struct *vma, |
44 | ({ \ | 61 | unsigned long address, pte_t *ptep); |
45 | int __young; \ | 62 | #endif |
46 | __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ | 63 | |
47 | if (__young) \ | 64 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH |
48 | flush_tlb_page(__vma, __address); \ | 65 | int pmdp_clear_flush_young(struct vm_area_struct *vma, |
49 | __young; \ | 66 | unsigned long address, pmd_t *pmdp); |
50 | }) | ||
51 | #endif | 67 | #endif |
52 | 68 | ||
53 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR | 69 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR |
54 | #define ptep_get_and_clear(__mm, __address, __ptep) \ | 70 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
55 | ({ \ | 71 | unsigned long address, |
56 | pte_t __pte = *(__ptep); \ | 72 | pte_t *ptep) |
57 | pte_clear((__mm), (__address), (__ptep)); \ | 73 | { |
58 | __pte; \ | 74 | pte_t pte = *ptep; |
75 | pte_clear(mm, address, ptep); | ||
76 | return pte; | ||
77 | } | ||
78 | #endif | ||
79 | |||
80 | #ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR | ||
81 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
82 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | ||
83 | unsigned long address, | ||
84 | pmd_t *pmdp) | ||
85 | { | ||
86 | pmd_t pmd = *pmdp; | ||
87 | pmd_clear(mm, address, pmdp); | ||
88 | return pmd; | ||
59 | }) | 89 | }) |
90 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
91 | static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, | ||
92 | unsigned long address, | ||
93 | pmd_t *pmdp) | ||
94 | { | ||
95 | BUG(); | ||
96 | return __pmd(0); | ||
97 | } | ||
98 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
60 | #endif | 99 | #endif |
61 | 100 | ||
62 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 101 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
63 | #define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \ | 102 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
64 | ({ \ | 103 | unsigned long address, pte_t *ptep, |
65 | pte_t __pte; \ | 104 | int full) |
66 | __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \ | 105 | { |
67 | __pte; \ | 106 | pte_t pte; |
68 | }) | 107 | pte = ptep_get_and_clear(mm, address, ptep); |
108 | return pte; | ||
109 | } | ||
69 | #endif | 110 | #endif |
70 | 111 | ||
71 | /* | 112 | /* |
@@ -74,20 +115,25 @@ | |||
74 | * not present, or in the process of an address space destruction. | 115 | * not present, or in the process of an address space destruction. |
75 | */ | 116 | */ |
76 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL | 117 | #ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL |
77 | #define pte_clear_not_present_full(__mm, __address, __ptep, __full) \ | 118 | static inline void pte_clear_not_present_full(struct mm_struct *mm, |
78 | do { \ | 119 | unsigned long address, |
79 | pte_clear((__mm), (__address), (__ptep)); \ | 120 | pte_t *ptep, |
80 | } while (0) | 121 | int full) |
122 | { | ||
123 | pte_clear(mm, address, ptep); | ||
124 | } | ||
81 | #endif | 125 | #endif |
82 | 126 | ||
83 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH | 127 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
84 | #define ptep_clear_flush(__vma, __address, __ptep) \ | 128 | extern pte_t ptep_clear_flush(struct vm_area_struct *vma, |
85 | ({ \ | 129 | unsigned long address, |
86 | pte_t __pte; \ | 130 | pte_t *ptep); |
87 | __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ | 131 | #endif |
88 | flush_tlb_page(__vma, __address); \ | 132 | |
89 | __pte; \ | 133 | #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH |
90 | }) | 134 | extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, |
135 | unsigned long address, | ||
136 | pmd_t *pmdp); | ||
91 | #endif | 137 | #endif |
92 | 138 | ||
93 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT | 139 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT |
@@ -99,8 +145,49 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
99 | } | 145 | } |
100 | #endif | 146 | #endif |
101 | 147 | ||
148 | #ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT | ||
149 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
150 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | ||
151 | unsigned long address, pmd_t *pmdp) | ||
152 | { | ||
153 | pmd_t old_pmd = *pmdp; | ||
154 | set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); | ||
155 | } | ||
156 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
157 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | ||
158 | unsigned long address, pmd_t *pmdp) | ||
159 | { | ||
160 | BUG(); | ||
161 | } | ||
162 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
163 | #endif | ||
164 | |||
165 | #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH | ||
166 | extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, | ||
167 | unsigned long address, | ||
168 | pmd_t *pmdp); | ||
169 | #endif | ||
170 | |||
102 | #ifndef __HAVE_ARCH_PTE_SAME | 171 | #ifndef __HAVE_ARCH_PTE_SAME |
103 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) | 172 | static inline int pte_same(pte_t pte_a, pte_t pte_b) |
173 | { | ||
174 | return pte_val(pte_a) == pte_val(pte_b); | ||
175 | } | ||
176 | #endif | ||
177 | |||
178 | #ifndef __HAVE_ARCH_PMD_SAME | ||
179 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
180 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | ||
181 | { | ||
182 | return pmd_val(pmd_a) == pmd_val(pmd_b); | ||
183 | } | ||
184 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
185 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | ||
186 | { | ||
187 | BUG(); | ||
188 | return 0; | ||
189 | } | ||
190 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
104 | #endif | 191 | #endif |
105 | 192 | ||
106 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY | 193 | #ifndef __HAVE_ARCH_PAGE_TEST_DIRTY |
@@ -357,6 +444,13 @@ static inline int pmd_trans_splitting(pmd_t pmd) | |||
357 | { | 444 | { |
358 | return 0; | 445 | return 0; |
359 | } | 446 | } |
447 | #ifndef __HAVE_ARCH_PMD_WRITE | ||
448 | static inline int pmd_write(pmd_t pmd) | ||
449 | { | ||
450 | BUG(); | ||
451 | return 0; | ||
452 | } | ||
453 | #endif /* __HAVE_ARCH_PMD_WRITE */ | ||
360 | #endif | 454 | #endif |
361 | 455 | ||
362 | #endif /* !__ASSEMBLY__ */ | 456 | #endif /* !__ASSEMBLY__ */ |
diff --git a/mm/Makefile b/mm/Makefile index f73f75a29f82..380772a9ccb8 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | mmu-y := nommu.o | 5 | mmu-y := nommu.o |
6 | mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ | 6 | mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ |
7 | mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ | 7 | mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ |
8 | vmalloc.o pagewalk.o | 8 | vmalloc.o pagewalk.o pgtable-generic.o |
9 | 9 | ||
10 | obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ | 10 | obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ |
11 | maccess.o page_alloc.o page-writeback.o \ | 11 | maccess.o page_alloc.o page-writeback.o \ |
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c new file mode 100644 index 000000000000..d030548047e2 --- /dev/null +++ b/mm/pgtable-generic.c | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * mm/pgtable-generic.c | ||
3 | * | ||
4 | * Generic pgtable methods declared in asm-generic/pgtable.h | ||
5 | * | ||
6 | * Copyright (C) 2010 Linus Torvalds | ||
7 | */ | ||
8 | |||
9 | #include <asm/tlb.h> | ||
10 | #include <asm-generic/pgtable.h> | ||
11 | |||
12 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
13 | /* | ||
14 | * Only sets the access flags (dirty, accessed, and | ||
15 | * writable). Furthermore, we know it always gets set to a "more | ||
16 | * permissive" setting, which allows most architectures to optimize | ||
17 | * this. We return whether the PTE actually changed, which in turn | ||
18 | * instructs the caller to do things like update__mmu_cache. This | ||
19 | * used to be done in the caller, but sparc needs minor faults to | ||
20 | * force that call on sun4c so we changed this macro slightly | ||
21 | */ | ||
22 | int ptep_set_access_flags(struct vm_area_struct *vma, | ||
23 | unsigned long address, pte_t *ptep, | ||
24 | pte_t entry, int dirty) | ||
25 | { | ||
26 | int changed = !pte_same(*ptep, entry); | ||
27 | if (changed) { | ||
28 | set_pte_at(vma->vm_mm, address, ptep, entry); | ||
29 | flush_tlb_page(vma, address); | ||
30 | } | ||
31 | return changed; | ||
32 | } | ||
33 | #endif | ||
34 | |||
35 | #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | ||
36 | int pmdp_set_access_flags(struct vm_area_struct *vma, | ||
37 | unsigned long address, pmd_t *pmdp, | ||
38 | pmd_t entry, int dirty) | ||
39 | { | ||
40 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
41 | int changed = !pmd_same(*pmdp, entry); | ||
42 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | ||
43 | if (changed) { | ||
44 | set_pmd_at(vma->vm_mm, address, pmdp, entry); | ||
45 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | ||
46 | } | ||
47 | return changed; | ||
48 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
49 | BUG(); | ||
50 | return 0; | ||
51 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
52 | } | ||
53 | #endif | ||
54 | |||
55 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
56 | int ptep_clear_flush_young(struct vm_area_struct *vma, | ||
57 | unsigned long address, pte_t *ptep) | ||
58 | { | ||
59 | int young; | ||
60 | young = ptep_test_and_clear_young(vma, address, ptep); | ||
61 | if (young) | ||
62 | flush_tlb_page(vma, address); | ||
63 | return young; | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | ||
68 | int pmdp_clear_flush_young(struct vm_area_struct *vma, | ||
69 | unsigned long address, pmd_t *pmdp) | ||
70 | { | ||
71 | int young; | ||
72 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | ||
73 | BUG(); | ||
74 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
75 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | ||
76 | young = pmdp_test_and_clear_young(vma, address, pmdp); | ||
77 | if (young) | ||
78 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | ||
79 | return young; | ||
80 | } | ||
81 | #endif | ||
82 | |||
83 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH | ||
84 | pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, | ||
85 | pte_t *ptep) | ||
86 | { | ||
87 | pte_t pte; | ||
88 | pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); | ||
89 | flush_tlb_page(vma, address); | ||
90 | return pte; | ||
91 | } | ||
92 | #endif | ||
93 | |||
94 | #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH | ||
95 | pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, | ||
96 | pmd_t *pmdp) | ||
97 | { | ||
98 | pmd_t pmd; | ||
99 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE | ||
100 | BUG(); | ||
101 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
102 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | ||
103 | pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); | ||
104 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | ||
105 | return pmd; | ||
106 | } | ||
107 | #endif | ||
108 | |||
109 | #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH | ||
110 | pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, | ||
111 | pmd_t *pmdp) | ||
112 | { | ||
113 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
114 | pmd_t pmd = pmd_mksplitting(*pmdp); | ||
115 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | ||
116 | set_pmd_at(vma->vm_mm, address, pmdp, pmd); | ||
117 | /* tlb flush only to serialize against gup-fast */ | ||
118 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); | ||
119 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
120 | BUG(); | ||
121 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
122 | } | ||
123 | #endif | ||