diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:33:19 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:33:19 -0500 |
commit | 28cdac6690cb113856293bf79b40de33dbd8f974 (patch) | |
tree | 64cd8ca8376ccf9a12faee3588c15a5839f9a28b /arch/arm/include | |
parent | 4073723acb9cdcdbe4df9c0e0c376c65d1697e43 (diff) | |
parent | 36bb94ba36f332de767cfaa3af6a5136435a3a9c (diff) |
Merge branch 'pgt' (early part) into devel
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/page.h | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/pgalloc.h | 50 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 315 |
3 files changed, 181 insertions, 190 deletions
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index a485ac3c8696..f51a69595f6e 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | |||
151 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 151 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
152 | extern void copy_page(void *to, const void *from); | 152 | extern void copy_page(void *to, const void *from); |
153 | 153 | ||
154 | typedef unsigned long pteval_t; | ||
155 | |||
154 | #undef STRICT_MM_TYPECHECKS | 156 | #undef STRICT_MM_TYPECHECKS |
155 | 157 | ||
156 | #ifdef STRICT_MM_TYPECHECKS | 158 | #ifdef STRICT_MM_TYPECHECKS |
157 | /* | 159 | /* |
158 | * These are used to make use of C type-checking.. | 160 | * These are used to make use of C type-checking.. |
159 | */ | 161 | */ |
160 | typedef struct { unsigned long pte; } pte_t; | 162 | typedef struct { pteval_t pte; } pte_t; |
161 | typedef struct { unsigned long pmd; } pmd_t; | 163 | typedef struct { unsigned long pmd; } pmd_t; |
162 | typedef struct { unsigned long pgd[2]; } pgd_t; | 164 | typedef struct { unsigned long pgd[2]; } pgd_t; |
163 | typedef struct { unsigned long pgprot; } pgprot_t; | 165 | typedef struct { unsigned long pgprot; } pgprot_t; |
@@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
175 | /* | 177 | /* |
176 | * .. while these make it easier on the compiler | 178 | * .. while these make it easier on the compiler |
177 | */ | 179 | */ |
178 | typedef unsigned long pte_t; | 180 | typedef pteval_t pte_t; |
179 | typedef unsigned long pmd_t; | 181 | typedef unsigned long pmd_t; |
180 | typedef unsigned long pgd_t[2]; | 182 | typedef unsigned long pgd_t[2]; |
181 | typedef unsigned long pgprot_t; | 183 | typedef unsigned long pgprot_t; |
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b12cc98bbe04..9763be04f77e 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h | |||
@@ -30,14 +30,16 @@ | |||
30 | #define pmd_free(mm, pmd) do { } while (0) | 30 | #define pmd_free(mm, pmd) do { } while (0) |
31 | #define pgd_populate(mm,pmd,pte) BUG() | 31 | #define pgd_populate(mm,pmd,pte) BUG() |
32 | 32 | ||
33 | extern pgd_t *get_pgd_slow(struct mm_struct *mm); | 33 | extern pgd_t *pgd_alloc(struct mm_struct *mm); |
34 | extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); | 34 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
35 | |||
36 | #define pgd_alloc(mm) get_pgd_slow(mm) | ||
37 | #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) | ||
38 | 35 | ||
39 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | 36 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) |
40 | 37 | ||
38 | static inline void clean_pte_table(pte_t *pte) | ||
39 | { | ||
40 | clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE); | ||
41 | } | ||
42 | |||
41 | /* | 43 | /* |
42 | * Allocate one PTE table. | 44 | * Allocate one PTE table. |
43 | * | 45 | * |
@@ -45,14 +47,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); | |||
45 | * into one table thus: | 47 | * into one table thus: |
46 | * | 48 | * |
47 | * +------------+ | 49 | * +------------+ |
48 | * | h/w pt 0 | | ||
49 | * +------------+ | ||
50 | * | h/w pt 1 | | ||
51 | * +------------+ | ||
52 | * | Linux pt 0 | | 50 | * | Linux pt 0 | |
53 | * +------------+ | 51 | * +------------+ |
54 | * | Linux pt 1 | | 52 | * | Linux pt 1 | |
55 | * +------------+ | 53 | * +------------+ |
54 | * | h/w pt 0 | | ||
55 | * +------------+ | ||
56 | * | h/w pt 1 | | ||
57 | * +------------+ | ||
56 | */ | 58 | */ |
57 | static inline pte_t * | 59 | static inline pte_t * |
58 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | 60 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) |
@@ -60,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | |||
60 | pte_t *pte; | 62 | pte_t *pte; |
61 | 63 | ||
62 | pte = (pte_t *)__get_free_page(PGALLOC_GFP); | 64 | pte = (pte_t *)__get_free_page(PGALLOC_GFP); |
63 | if (pte) { | 65 | if (pte) |
64 | clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); | 66 | clean_pte_table(pte); |
65 | pte += PTRS_PER_PTE; | ||
66 | } | ||
67 | 67 | ||
68 | return pte; | 68 | return pte; |
69 | } | 69 | } |
@@ -79,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
79 | pte = alloc_pages(PGALLOC_GFP, 0); | 79 | pte = alloc_pages(PGALLOC_GFP, 0); |
80 | #endif | 80 | #endif |
81 | if (pte) { | 81 | if (pte) { |
82 | if (!PageHighMem(pte)) { | 82 | if (!PageHighMem(pte)) |
83 | void *page = page_address(pte); | 83 | clean_pte_table(page_address(pte)); |
84 | clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); | ||
85 | } | ||
86 | pgtable_page_ctor(pte); | 84 | pgtable_page_ctor(pte); |
87 | } | 85 | } |
88 | 86 | ||
@@ -94,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
94 | */ | 92 | */ |
95 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | 93 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
96 | { | 94 | { |
97 | if (pte) { | 95 | if (pte) |
98 | pte -= PTRS_PER_PTE; | ||
99 | free_page((unsigned long)pte); | 96 | free_page((unsigned long)pte); |
100 | } | ||
101 | } | 97 | } |
102 | 98 | ||
103 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | 99 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) |
@@ -106,8 +102,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
106 | __free_page(pte); | 102 | __free_page(pte); |
107 | } | 103 | } |
108 | 104 | ||
109 | static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) | 105 | static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, |
106 | unsigned long prot) | ||
110 | { | 107 | { |
108 | unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot; | ||
111 | pmdp[0] = __pmd(pmdval); | 109 | pmdp[0] = __pmd(pmdval); |
112 | pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); | 110 | pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); |
113 | flush_pmd_entry(pmdp); | 111 | flush_pmd_entry(pmdp); |
@@ -122,20 +120,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) | |||
122 | static inline void | 120 | static inline void |
123 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) | 121 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) |
124 | { | 122 | { |
125 | unsigned long pte_ptr = (unsigned long)ptep; | ||
126 | |||
127 | /* | 123 | /* |
128 | * The pmd must be loaded with the physical | 124 | * The pmd must be loaded with the physical address of the PTE table |
129 | * address of the PTE table | ||
130 | */ | 125 | */ |
131 | pte_ptr -= PTRS_PER_PTE * sizeof(void *); | 126 | __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE); |
132 | __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); | ||
133 | } | 127 | } |
134 | 128 | ||
135 | static inline void | 129 | static inline void |
136 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) | 130 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) |
137 | { | 131 | { |
138 | __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); | 132 | __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); |
139 | } | 133 | } |
140 | #define pmd_pgtable(pmd) pmd_page(pmd) | 134 | #define pmd_pgtable(pmd) pmd_page(pmd) |
141 | 135 | ||
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 53d1d5deb111..ebcb6432f45f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #ifndef _ASMARM_PGTABLE_H | 10 | #ifndef _ASMARM_PGTABLE_H |
11 | #define _ASMARM_PGTABLE_H | 11 | #define _ASMARM_PGTABLE_H |
12 | 12 | ||
13 | #include <linux/const.h> | ||
13 | #include <asm-generic/4level-fixup.h> | 14 | #include <asm-generic/4level-fixup.h> |
14 | #include <asm/proc-fns.h> | 15 | #include <asm/proc-fns.h> |
15 | 16 | ||
@@ -54,7 +55,7 @@ | |||
54 | * Therefore, we tweak the implementation slightly - we tell Linux that we | 55 | * Therefore, we tweak the implementation slightly - we tell Linux that we |
55 | * have 2048 entries in the first level, each of which is 8 bytes (iow, two | 56 | * have 2048 entries in the first level, each of which is 8 bytes (iow, two |
56 | * hardware pointers to the second level.) The second level contains two | 57 | * hardware pointers to the second level.) The second level contains two |
57 | * hardware PTE tables arranged contiguously, followed by Linux versions | 58 | * hardware PTE tables arranged contiguously, preceded by Linux versions |
58 | * which contain the state information Linux needs. We, therefore, end up | 59 | * which contain the state information Linux needs. We, therefore, end up |
59 | * with 512 entries in the "PTE" level. | 60 | * with 512 entries in the "PTE" level. |
60 | * | 61 | * |
@@ -62,15 +63,15 @@ | |||
62 | * | 63 | * |
63 | * pgd pte | 64 | * pgd pte |
64 | * | | | 65 | * | | |
65 | * +--------+ +0 | 66 | * +--------+ |
66 | * | |-----> +------------+ +0 | 67 | * | | +------------+ +0 |
68 | * +- - - - + | Linux pt 0 | | ||
69 | * | | +------------+ +1024 | ||
70 | * +--------+ +0 | Linux pt 1 | | ||
71 | * | |-----> +------------+ +2048 | ||
67 | * +- - - - + +4 | h/w pt 0 | | 72 | * +- - - - + +4 | h/w pt 0 | |
68 | * | |-----> +------------+ +1024 | 73 | * | |-----> +------------+ +3072 |
69 | * +--------+ +8 | h/w pt 1 | | 74 | * +--------+ +8 | h/w pt 1 | |
70 | * | | +------------+ +2048 | ||
71 | * +- - - - + | Linux pt 0 | | ||
72 | * | | +------------+ +3072 | ||
73 | * +--------+ | Linux pt 1 | | ||
74 | * | | +------------+ +4096 | 75 | * | | +------------+ +4096 |
75 | * | 76 | * |
76 | * See L_PTE_xxx below for definitions of bits in the "Linux pt", and | 77 | * See L_PTE_xxx below for definitions of bits in the "Linux pt", and |
@@ -102,6 +103,10 @@ | |||
102 | #define PTRS_PER_PMD 1 | 103 | #define PTRS_PER_PMD 1 |
103 | #define PTRS_PER_PGD 2048 | 104 | #define PTRS_PER_PGD 2048 |
104 | 105 | ||
106 | #define PTE_HWTABLE_PTRS (PTRS_PER_PTE) | ||
107 | #define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) | ||
108 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) | ||
109 | |||
105 | /* | 110 | /* |
106 | * PMD_SHIFT determines the size of the area a second-level page table can map | 111 | * PMD_SHIFT determines the size of the area a second-level page table can map |
107 | * PGDIR_SHIFT determines what a third-level page table entry can map | 112 | * PGDIR_SHIFT determines what a third-level page table entry can map |
@@ -112,13 +117,13 @@ | |||
112 | #define LIBRARY_TEXT_START 0x0c000000 | 117 | #define LIBRARY_TEXT_START 0x0c000000 |
113 | 118 | ||
114 | #ifndef __ASSEMBLY__ | 119 | #ifndef __ASSEMBLY__ |
115 | extern void __pte_error(const char *file, int line, unsigned long val); | 120 | extern void __pte_error(const char *file, int line, pte_t); |
116 | extern void __pmd_error(const char *file, int line, unsigned long val); | 121 | extern void __pmd_error(const char *file, int line, pmd_t); |
117 | extern void __pgd_error(const char *file, int line, unsigned long val); | 122 | extern void __pgd_error(const char *file, int line, pgd_t); |
118 | 123 | ||
119 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | 124 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) |
120 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) | 125 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) |
121 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | 126 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) |
122 | #endif /* !__ASSEMBLY__ */ | 127 | #endif /* !__ASSEMBLY__ */ |
123 | 128 | ||
124 | #define PMD_SIZE (1UL << PMD_SHIFT) | 129 | #define PMD_SIZE (1UL << PMD_SHIFT) |
@@ -133,8 +138,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
133 | */ | 138 | */ |
134 | #define FIRST_USER_ADDRESS PAGE_SIZE | 139 | #define FIRST_USER_ADDRESS PAGE_SIZE |
135 | 140 | ||
136 | #define FIRST_USER_PGD_NR 1 | 141 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
137 | #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) | ||
138 | 142 | ||
139 | /* | 143 | /* |
140 | * section address mask and size definitions. | 144 | * section address mask and size definitions. |
@@ -161,30 +165,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
161 | * The PTE table pointer refers to the hardware entries; the "Linux" | 165 | * The PTE table pointer refers to the hardware entries; the "Linux" |
162 | * entries are stored 1024 bytes below. | 166 | * entries are stored 1024 bytes below. |
163 | */ | 167 | */ |
164 | #define L_PTE_PRESENT (1 << 0) | 168 | #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) |
165 | #define L_PTE_YOUNG (1 << 1) | 169 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) |
166 | #define L_PTE_FILE (1 << 2) /* only when !PRESENT */ | 170 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ |
167 | #define L_PTE_DIRTY (1 << 6) | 171 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) |
168 | #define L_PTE_WRITE (1 << 7) | 172 | #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) |
169 | #define L_PTE_USER (1 << 8) | 173 | #define L_PTE_USER (_AT(pteval_t, 1) << 8) |
170 | #define L_PTE_EXEC (1 << 9) | 174 | #define L_PTE_XN (_AT(pteval_t, 1) << 9) |
171 | #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ | 175 | #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ |
172 | 176 | ||
173 | /* | 177 | /* |
174 | * These are the memory types, defined to be compatible with | 178 | * These are the memory types, defined to be compatible with |
175 | * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB | 179 | * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB |
176 | */ | 180 | */ |
177 | #define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ | 181 | #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ |
178 | #define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ | 182 | #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ |
179 | #define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ | 183 | #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ |
180 | #define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ | 184 | #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ |
181 | #define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ | 185 | #define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ |
182 | #define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ | 186 | #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ |
183 | #define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ | 187 | #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ |
184 | #define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ | 188 | #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ |
185 | #define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ | 189 | #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ |
186 | #define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ | 190 | #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ |
187 | #define L_PTE_MT_MASK (0x0f << 2) | 191 | #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) |
188 | 192 | ||
189 | #ifndef __ASSEMBLY__ | 193 | #ifndef __ASSEMBLY__ |
190 | 194 | ||
@@ -201,23 +205,44 @@ extern pgprot_t pgprot_kernel; | |||
201 | 205 | ||
202 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) | 206 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
203 | 207 | ||
204 | #define PAGE_NONE pgprot_user | 208 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) |
205 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) | 209 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) |
206 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) | 210 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) |
207 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) | 211 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
208 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) | 212 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
209 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) | 213 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
210 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) | 214 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
211 | #define PAGE_KERNEL pgprot_kernel | 215 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
212 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) | 216 | #define PAGE_KERNEL_EXEC pgprot_kernel |
213 | 217 | ||
214 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) | 218 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) |
215 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) | 219 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
216 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) | 220 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) |
217 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | 221 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
218 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | 222 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
219 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | 223 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
220 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | 224 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
225 | |||
226 | #define __pgprot_modify(prot,mask,bits) \ | ||
227 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | ||
228 | |||
229 | #define pgprot_noncached(prot) \ | ||
230 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) | ||
231 | |||
232 | #define pgprot_writecombine(prot) \ | ||
233 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) | ||
234 | |||
235 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
236 | #define pgprot_dmacoherent(prot) \ | ||
237 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) | ||
238 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
239 | struct file; | ||
240 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
241 | unsigned long size, pgprot_t vma_prot); | ||
242 | #else | ||
243 | #define pgprot_dmacoherent(prot) \ | ||
244 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) | ||
245 | #endif | ||
221 | 246 | ||
222 | #endif /* __ASSEMBLY__ */ | 247 | #endif /* __ASSEMBLY__ */ |
223 | 248 | ||
@@ -255,26 +280,84 @@ extern pgprot_t pgprot_kernel; | |||
255 | extern struct page *empty_zero_page; | 280 | extern struct page *empty_zero_page; |
256 | #define ZERO_PAGE(vaddr) (empty_zero_page) | 281 | #define ZERO_PAGE(vaddr) (empty_zero_page) |
257 | 282 | ||
258 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
259 | #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
260 | 283 | ||
261 | #define pte_none(pte) (!pte_val(pte)) | 284 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
262 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) | 285 | |
263 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | 286 | /* to find an entry in a page-table-directory */ |
264 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) | 287 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) |
288 | |||
289 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | ||
290 | |||
291 | /* to find an entry in a kernel page-table-directory */ | ||
292 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | ||
293 | |||
294 | /* | ||
295 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
296 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
297 | * into the pgd entry) | ||
298 | */ | ||
299 | #define pgd_none(pgd) (0) | ||
300 | #define pgd_bad(pgd) (0) | ||
301 | #define pgd_present(pgd) (1) | ||
302 | #define pgd_clear(pgdp) do { } while (0) | ||
303 | #define set_pgd(pgd,pgdp) do { } while (0) | ||
304 | |||
305 | |||
306 | /* Find an entry in the second-level page table.. */ | ||
307 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) | ||
308 | |||
309 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
310 | #define pmd_present(pmd) (pmd_val(pmd)) | ||
311 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) | ||
312 | |||
313 | #define copy_pmd(pmdpd,pmdps) \ | ||
314 | do { \ | ||
315 | pmdpd[0] = pmdps[0]; \ | ||
316 | pmdpd[1] = pmdps[1]; \ | ||
317 | flush_pmd_entry(pmdpd); \ | ||
318 | } while (0) | ||
319 | |||
320 | #define pmd_clear(pmdp) \ | ||
321 | do { \ | ||
322 | pmdp[0] = __pmd(0); \ | ||
323 | pmdp[1] = __pmd(0); \ | ||
324 | clean_pmd_entry(pmdp); \ | ||
325 | } while (0) | ||
326 | |||
327 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) | ||
328 | { | ||
329 | return __va(pmd_val(pmd) & PAGE_MASK); | ||
330 | } | ||
331 | |||
332 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) | ||
333 | |||
334 | /* we don't need complex calculations here as the pmd is folded into the pgd */ | ||
335 | #define pmd_addr_end(addr,end) (end) | ||
265 | 336 | ||
266 | #define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr)) | ||
267 | #define pte_unmap(pte) __pte_unmap(pte) | ||
268 | 337 | ||
269 | #ifndef CONFIG_HIGHPTE | 338 | #ifndef CONFIG_HIGHPTE |
270 | #define __pte_map(dir) pmd_page_vaddr(*(dir)) | 339 | #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) |
271 | #define __pte_unmap(pte) do { } while (0) | 340 | #define __pte_unmap(pte) do { } while (0) |
272 | #else | 341 | #else |
273 | #define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) | 342 | #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) |
274 | #define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) | 343 | #define __pte_unmap(pte) kunmap_atomic(pte) |
275 | #endif | 344 | #endif |
276 | 345 | ||
346 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
347 | |||
348 | #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) | ||
349 | |||
350 | #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) | ||
351 | #define pte_unmap(pte) __pte_unmap(pte) | ||
352 | |||
353 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
354 | #define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
355 | |||
356 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | ||
357 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) | ||
358 | |||
277 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) | 359 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) |
360 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) | ||
278 | 361 | ||
279 | #if __LINUX_ARM_ARCH__ < 6 | 362 | #if __LINUX_ARM_ARCH__ < 6 |
280 | static inline void __sync_icache_dcache(pte_t pteval) | 363 | static inline void __sync_icache_dcache(pte_t pteval) |
@@ -295,15 +378,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
295 | } | 378 | } |
296 | } | 379 | } |
297 | 380 | ||
298 | /* | 381 | #define pte_none(pte) (!pte_val(pte)) |
299 | * The following only work if pte_present() is true. | ||
300 | * Undefined behaviour if not.. | ||
301 | */ | ||
302 | #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) | 382 | #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) |
303 | #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) | 383 | #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) |
304 | #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) | 384 | #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) |
305 | #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) | 385 | #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) |
306 | #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) | 386 | #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) |
307 | #define pte_special(pte) (0) | 387 | #define pte_special(pte) (0) |
308 | 388 | ||
309 | #define pte_present_user(pte) \ | 389 | #define pte_present_user(pte) \ |
@@ -313,8 +393,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
313 | #define PTE_BIT_FUNC(fn,op) \ | 393 | #define PTE_BIT_FUNC(fn,op) \ |
314 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | 394 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
315 | 395 | ||
316 | PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); | 396 | PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); |
317 | PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); | 397 | PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); |
318 | PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); | 398 | PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); |
319 | PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); | 399 | PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); |
320 | PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); | 400 | PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); |
@@ -322,101 +402,13 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); | |||
322 | 402 | ||
323 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | 403 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
324 | 404 | ||
325 | #define __pgprot_modify(prot,mask,bits) \ | ||
326 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | ||
327 | |||
328 | /* | ||
329 | * Mark the prot value as uncacheable and unbufferable. | ||
330 | */ | ||
331 | #define pgprot_noncached(prot) \ | ||
332 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) | ||
333 | #define pgprot_writecombine(prot) \ | ||
334 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) | ||
335 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
336 | #define pgprot_dmacoherent(prot) \ | ||
337 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) | ||
338 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
339 | struct file; | ||
340 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
341 | unsigned long size, pgprot_t vma_prot); | ||
342 | #else | ||
343 | #define pgprot_dmacoherent(prot) \ | ||
344 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) | ||
345 | #endif | ||
346 | |||
347 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
348 | #define pmd_present(pmd) (pmd_val(pmd)) | ||
349 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) | ||
350 | |||
351 | #define copy_pmd(pmdpd,pmdps) \ | ||
352 | do { \ | ||
353 | pmdpd[0] = pmdps[0]; \ | ||
354 | pmdpd[1] = pmdps[1]; \ | ||
355 | flush_pmd_entry(pmdpd); \ | ||
356 | } while (0) | ||
357 | |||
358 | #define pmd_clear(pmdp) \ | ||
359 | do { \ | ||
360 | pmdp[0] = __pmd(0); \ | ||
361 | pmdp[1] = __pmd(0); \ | ||
362 | clean_pmd_entry(pmdp); \ | ||
363 | } while (0) | ||
364 | |||
365 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) | ||
366 | { | ||
367 | unsigned long ptr; | ||
368 | |||
369 | ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); | ||
370 | ptr += PTRS_PER_PTE * sizeof(void *); | ||
371 | |||
372 | return __va(ptr); | ||
373 | } | ||
374 | |||
375 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) | ||
376 | |||
377 | /* we don't need complex calculations here as the pmd is folded into the pgd */ | ||
378 | #define pmd_addr_end(addr,end) (end) | ||
379 | |||
380 | /* | ||
381 | * Conversion functions: convert a page and protection to a page entry, | ||
382 | * and a page entry and page directory to the page they refer to. | ||
383 | */ | ||
384 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | ||
385 | |||
386 | /* | ||
387 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
388 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
389 | * into the pgd entry) | ||
390 | */ | ||
391 | #define pgd_none(pgd) (0) | ||
392 | #define pgd_bad(pgd) (0) | ||
393 | #define pgd_present(pgd) (1) | ||
394 | #define pgd_clear(pgdp) do { } while (0) | ||
395 | #define set_pgd(pgd,pgdp) do { } while (0) | ||
396 | |||
397 | /* to find an entry in a page-table-directory */ | ||
398 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) | ||
399 | |||
400 | #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) | ||
401 | |||
402 | /* to find an entry in a kernel page-table-directory */ | ||
403 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | ||
404 | |||
405 | /* Find an entry in the second-level page table.. */ | ||
406 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) | ||
407 | |||
408 | /* Find an entry in the third-level page table.. */ | ||
409 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
410 | |||
411 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 405 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
412 | { | 406 | { |
413 | const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; | 407 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; |
414 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 408 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
415 | return pte; | 409 | return pte; |
416 | } | 410 | } |
417 | 411 | ||
418 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
419 | |||
420 | /* | 412 | /* |
421 | * Encode and decode a swap entry. Swap entries are stored in the Linux | 413 | * Encode and decode a swap entry. Swap entries are stored in the Linux |
422 | * page tables as follows: | 414 | * page tables as follows: |
@@ -481,6 +473,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |||
481 | 473 | ||
482 | #define pgtable_cache_init() do { } while (0) | 474 | #define pgtable_cache_init() do { } while (0) |
483 | 475 | ||
476 | void identity_mapping_add(pgd_t *, unsigned long, unsigned long); | ||
477 | void identity_mapping_del(pgd_t *, unsigned long, unsigned long); | ||
478 | |||
484 | #endif /* !__ASSEMBLY__ */ | 479 | #endif /* !__ASSEMBLY__ */ |
485 | 480 | ||
486 | #endif /* CONFIG_MMU */ | 481 | #endif /* CONFIG_MMU */ |