aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic/pgtable.h')
-rw-r--r--include/asm-generic/pgtable.h219
1 files changed, 159 insertions, 60 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6f3c6ae4fe03..b4bfe338ea0e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,68 +4,103 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#ifdef CONFIG_MMU 5#ifdef CONFIG_MMU
6 6
7#include <linux/mm_types.h>
8
7#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 9#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8/* 10extern int ptep_set_access_flags(struct vm_area_struct *vma,
9 * Largely same as above, but only sets the access flags (dirty, 11 unsigned long address, pte_t *ptep,
10 * accessed, and writable). Furthermore, we know it always gets set 12 pte_t entry, int dirty);
11 * to a "more permissive" setting, which allows most architectures 13#endif
12 * to optimize this. We return whether the PTE actually changed, which 14
13 * in turn instructs the caller to do things like update__mmu_cache. 15#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
14 * This used to be done in the caller, but sparc needs minor faults to 16extern int pmdp_set_access_flags(struct vm_area_struct *vma,
15 * force that call on sun4c so we changed this macro slightly 17 unsigned long address, pmd_t *pmdp,
16 */ 18 pmd_t entry, int dirty);
17#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
18({ \
19 int __changed = !pte_same(*(__ptep), __entry); \
20 if (__changed) { \
21 set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
22 flush_tlb_page(__vma, __address); \
23 } \
24 __changed; \
25})
26#endif 19#endif
27 20
28#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 21#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
29#define ptep_test_and_clear_young(__vma, __address, __ptep) \ 22static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
30({ \ 23 unsigned long address,
31 pte_t __pte = *(__ptep); \ 24 pte_t *ptep)
32 int r = 1; \ 25{
33 if (!pte_young(__pte)) \ 26 pte_t pte = *ptep;
34 r = 0; \ 27 int r = 1;
35 else \ 28 if (!pte_young(pte))
36 set_pte_at((__vma)->vm_mm, (__address), \ 29 r = 0;
37 (__ptep), pte_mkold(__pte)); \ 30 else
38 r; \ 31 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
39}) 32 return r;
33}
34#endif
35
36#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
37#ifdef CONFIG_TRANSPARENT_HUGEPAGE
38static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
39 unsigned long address,
40 pmd_t *pmdp)
41{
42 pmd_t pmd = *pmdp;
43 int r = 1;
44 if (!pmd_young(pmd))
45 r = 0;
46 else
47 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
48 return r;
49}
50#else /* CONFIG_TRANSPARENT_HUGEPAGE */
51static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
52 unsigned long address,
53 pmd_t *pmdp)
54{
55 BUG();
56 return 0;
57}
58#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
40#endif 59#endif
41 60
42#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 61#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
43#define ptep_clear_flush_young(__vma, __address, __ptep) \ 62int ptep_clear_flush_young(struct vm_area_struct *vma,
44({ \ 63 unsigned long address, pte_t *ptep);
45 int __young; \ 64#endif
46 __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ 65
47 if (__young) \ 66#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
48 flush_tlb_page(__vma, __address); \ 67int pmdp_clear_flush_young(struct vm_area_struct *vma,
49 __young; \ 68 unsigned long address, pmd_t *pmdp);
50})
51#endif 69#endif
52 70
53#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR 71#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
54#define ptep_get_and_clear(__mm, __address, __ptep) \ 72static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
55({ \ 73 unsigned long address,
56 pte_t __pte = *(__ptep); \ 74 pte_t *ptep)
57 pte_clear((__mm), (__address), (__ptep)); \ 75{
58 __pte; \ 76 pte_t pte = *ptep;
77 pte_clear(mm, address, ptep);
78 return pte;
79}
80#endif
81
82#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR
83#ifdef CONFIG_TRANSPARENT_HUGEPAGE
84static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
85 unsigned long address,
86 pmd_t *pmdp)
87{
88 pmd_t pmd = *pmdp;
89 pmd_clear(mm, address, pmdp);
90 return pmd;
59}) 91})
92#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
60#endif 93#endif
61 94
62#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 95#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
63#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \ 96static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
64({ \ 97 unsigned long address, pte_t *ptep,
65 pte_t __pte; \ 98 int full)
66 __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \ 99{
67 __pte; \ 100 pte_t pte;
68}) 101 pte = ptep_get_and_clear(mm, address, ptep);
102 return pte;
103}
69#endif 104#endif
70 105
71/* 106/*
@@ -74,20 +109,25 @@
74 * not present, or in the process of an address space destruction. 109 * not present, or in the process of an address space destruction.
75 */ 110 */
76#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL 111#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
77#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \ 112static inline void pte_clear_not_present_full(struct mm_struct *mm,
78do { \ 113 unsigned long address,
79 pte_clear((__mm), (__address), (__ptep)); \ 114 pte_t *ptep,
80} while (0) 115 int full)
116{
117 pte_clear(mm, address, ptep);
118}
81#endif 119#endif
82 120
83#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 121#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
84#define ptep_clear_flush(__vma, __address, __ptep) \ 122extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
85({ \ 123 unsigned long address,
86 pte_t __pte; \ 124 pte_t *ptep);
87 __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ 125#endif
88 flush_tlb_page(__vma, __address); \ 126
89 __pte; \ 127#ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
90}) 128extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
129 unsigned long address,
130 pmd_t *pmdp);
91#endif 131#endif
92 132
93#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT 133#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
@@ -99,8 +139,49 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
99} 139}
100#endif 140#endif
101 141
142#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
143#ifdef CONFIG_TRANSPARENT_HUGEPAGE
144static inline void pmdp_set_wrprotect(struct mm_struct *mm,
145 unsigned long address, pmd_t *pmdp)
146{
147 pmd_t old_pmd = *pmdp;
148 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
149}
150#else /* CONFIG_TRANSPARENT_HUGEPAGE */
151static inline void pmdp_set_wrprotect(struct mm_struct *mm,
152 unsigned long address, pmd_t *pmdp)
153{
154 BUG();
155}
156#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
157#endif
158
159#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
160extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma,
161 unsigned long address,
162 pmd_t *pmdp);
163#endif
164
102#ifndef __HAVE_ARCH_PTE_SAME 165#ifndef __HAVE_ARCH_PTE_SAME
103#define pte_same(A,B) (pte_val(A) == pte_val(B)) 166static inline int pte_same(pte_t pte_a, pte_t pte_b)
167{
168 return pte_val(pte_a) == pte_val(pte_b);
169}
170#endif
171
172#ifndef __HAVE_ARCH_PMD_SAME
173#ifdef CONFIG_TRANSPARENT_HUGEPAGE
174static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
175{
176 return pmd_val(pmd_a) == pmd_val(pmd_b);
177}
178#else /* CONFIG_TRANSPARENT_HUGEPAGE */
179static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
180{
181 BUG();
182 return 0;
183}
184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
104#endif 185#endif
105 186
106#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY 187#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
@@ -348,6 +429,24 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
348 unsigned long size); 429 unsigned long size);
349#endif 430#endif
350 431
432#ifndef CONFIG_TRANSPARENT_HUGEPAGE
433static inline int pmd_trans_huge(pmd_t pmd)
434{
435 return 0;
436}
437static inline int pmd_trans_splitting(pmd_t pmd)
438{
439 return 0;
440}
441#ifndef __HAVE_ARCH_PMD_WRITE
442static inline int pmd_write(pmd_t pmd)
443{
444 BUG();
445 return 0;
446}
447#endif /* __HAVE_ARCH_PMD_WRITE */
448#endif
449
351#endif /* !__ASSEMBLY__ */ 450#endif /* !__ASSEMBLY__ */
352 451
353#endif /* _ASM_GENERIC_PGTABLE_H */ 452#endif /* _ASM_GENERIC_PGTABLE_H */