diff options
-rw-r--r-- | arch/arm/include/asm/page.h | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/pgalloc.h | 50 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable.h | 315 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 36 | ||||
-rw-r--r-- | arch/arm/kernel/traps.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/Makefile | 4 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/idmap.c | 67 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 62 | ||||
-rw-r--r-- | arch/arm/mm/pgd.c | 37 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 30 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 18 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 4 |
15 files changed, 315 insertions, 332 deletions
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index a485ac3c8696..f51a69595f6e 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | |||
151 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 151 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
152 | extern void copy_page(void *to, const void *from); | 152 | extern void copy_page(void *to, const void *from); |
153 | 153 | ||
154 | typedef unsigned long pteval_t; | ||
155 | |||
154 | #undef STRICT_MM_TYPECHECKS | 156 | #undef STRICT_MM_TYPECHECKS |
155 | 157 | ||
156 | #ifdef STRICT_MM_TYPECHECKS | 158 | #ifdef STRICT_MM_TYPECHECKS |
157 | /* | 159 | /* |
158 | * These are used to make use of C type-checking.. | 160 | * These are used to make use of C type-checking.. |
159 | */ | 161 | */ |
160 | typedef struct { unsigned long pte; } pte_t; | 162 | typedef struct { pteval_t pte; } pte_t; |
161 | typedef struct { unsigned long pmd; } pmd_t; | 163 | typedef struct { unsigned long pmd; } pmd_t; |
162 | typedef struct { unsigned long pgd[2]; } pgd_t; | 164 | typedef struct { unsigned long pgd[2]; } pgd_t; |
163 | typedef struct { unsigned long pgprot; } pgprot_t; | 165 | typedef struct { unsigned long pgprot; } pgprot_t; |
@@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
175 | /* | 177 | /* |
176 | * .. while these make it easier on the compiler | 178 | * .. while these make it easier on the compiler |
177 | */ | 179 | */ |
178 | typedef unsigned long pte_t; | 180 | typedef pteval_t pte_t; |
179 | typedef unsigned long pmd_t; | 181 | typedef unsigned long pmd_t; |
180 | typedef unsigned long pgd_t[2]; | 182 | typedef unsigned long pgd_t[2]; |
181 | typedef unsigned long pgprot_t; | 183 | typedef unsigned long pgprot_t; |
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index b12cc98bbe04..9763be04f77e 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h | |||
@@ -30,14 +30,16 @@ | |||
30 | #define pmd_free(mm, pmd) do { } while (0) | 30 | #define pmd_free(mm, pmd) do { } while (0) |
31 | #define pgd_populate(mm,pmd,pte) BUG() | 31 | #define pgd_populate(mm,pmd,pte) BUG() |
32 | 32 | ||
33 | extern pgd_t *get_pgd_slow(struct mm_struct *mm); | 33 | extern pgd_t *pgd_alloc(struct mm_struct *mm); |
34 | extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); | 34 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
35 | |||
36 | #define pgd_alloc(mm) get_pgd_slow(mm) | ||
37 | #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) | ||
38 | 35 | ||
39 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | 36 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) |
40 | 37 | ||
38 | static inline void clean_pte_table(pte_t *pte) | ||
39 | { | ||
40 | clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE); | ||
41 | } | ||
42 | |||
41 | /* | 43 | /* |
42 | * Allocate one PTE table. | 44 | * Allocate one PTE table. |
43 | * | 45 | * |
@@ -45,14 +47,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); | |||
45 | * into one table thus: | 47 | * into one table thus: |
46 | * | 48 | * |
47 | * +------------+ | 49 | * +------------+ |
48 | * | h/w pt 0 | | ||
49 | * +------------+ | ||
50 | * | h/w pt 1 | | ||
51 | * +------------+ | ||
52 | * | Linux pt 0 | | 50 | * | Linux pt 0 | |
53 | * +------------+ | 51 | * +------------+ |
54 | * | Linux pt 1 | | 52 | * | Linux pt 1 | |
55 | * +------------+ | 53 | * +------------+ |
54 | * | h/w pt 0 | | ||
55 | * +------------+ | ||
56 | * | h/w pt 1 | | ||
57 | * +------------+ | ||
56 | */ | 58 | */ |
57 | static inline pte_t * | 59 | static inline pte_t * |
58 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | 60 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) |
@@ -60,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | |||
60 | pte_t *pte; | 62 | pte_t *pte; |
61 | 63 | ||
62 | pte = (pte_t *)__get_free_page(PGALLOC_GFP); | 64 | pte = (pte_t *)__get_free_page(PGALLOC_GFP); |
63 | if (pte) { | 65 | if (pte) |
64 | clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE); | 66 | clean_pte_table(pte); |
65 | pte += PTRS_PER_PTE; | ||
66 | } | ||
67 | 67 | ||
68 | return pte; | 68 | return pte; |
69 | } | 69 | } |
@@ -79,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
79 | pte = alloc_pages(PGALLOC_GFP, 0); | 79 | pte = alloc_pages(PGALLOC_GFP, 0); |
80 | #endif | 80 | #endif |
81 | if (pte) { | 81 | if (pte) { |
82 | if (!PageHighMem(pte)) { | 82 | if (!PageHighMem(pte)) |
83 | void *page = page_address(pte); | 83 | clean_pte_table(page_address(pte)); |
84 | clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE); | ||
85 | } | ||
86 | pgtable_page_ctor(pte); | 84 | pgtable_page_ctor(pte); |
87 | } | 85 | } |
88 | 86 | ||
@@ -94,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
94 | */ | 92 | */ |
95 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | 93 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
96 | { | 94 | { |
97 | if (pte) { | 95 | if (pte) |
98 | pte -= PTRS_PER_PTE; | ||
99 | free_page((unsigned long)pte); | 96 | free_page((unsigned long)pte); |
100 | } | ||
101 | } | 97 | } |
102 | 98 | ||
103 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | 99 | static inline void pte_free(struct mm_struct *mm, pgtable_t pte) |
@@ -106,8 +102,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte) | |||
106 | __free_page(pte); | 102 | __free_page(pte); |
107 | } | 103 | } |
108 | 104 | ||
109 | static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) | 105 | static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, |
106 | unsigned long prot) | ||
110 | { | 107 | { |
108 | unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot; | ||
111 | pmdp[0] = __pmd(pmdval); | 109 | pmdp[0] = __pmd(pmdval); |
112 | pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); | 110 | pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); |
113 | flush_pmd_entry(pmdp); | 111 | flush_pmd_entry(pmdp); |
@@ -122,20 +120,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval) | |||
122 | static inline void | 120 | static inline void |
123 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) | 121 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) |
124 | { | 122 | { |
125 | unsigned long pte_ptr = (unsigned long)ptep; | ||
126 | |||
127 | /* | 123 | /* |
128 | * The pmd must be loaded with the physical | 124 | * The pmd must be loaded with the physical address of the PTE table |
129 | * address of the PTE table | ||
130 | */ | 125 | */ |
131 | pte_ptr -= PTRS_PER_PTE * sizeof(void *); | 126 | __pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE); |
132 | __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE); | ||
133 | } | 127 | } |
134 | 128 | ||
135 | static inline void | 129 | static inline void |
136 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) | 130 | pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) |
137 | { | 131 | { |
138 | __pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE); | 132 | __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); |
139 | } | 133 | } |
140 | #define pmd_pgtable(pmd) pmd_page(pmd) | 134 | #define pmd_pgtable(pmd) pmd_page(pmd) |
141 | 135 | ||
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 53d1d5deb111..ebcb6432f45f 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #ifndef _ASMARM_PGTABLE_H | 10 | #ifndef _ASMARM_PGTABLE_H |
11 | #define _ASMARM_PGTABLE_H | 11 | #define _ASMARM_PGTABLE_H |
12 | 12 | ||
13 | #include <linux/const.h> | ||
13 | #include <asm-generic/4level-fixup.h> | 14 | #include <asm-generic/4level-fixup.h> |
14 | #include <asm/proc-fns.h> | 15 | #include <asm/proc-fns.h> |
15 | 16 | ||
@@ -54,7 +55,7 @@ | |||
54 | * Therefore, we tweak the implementation slightly - we tell Linux that we | 55 | * Therefore, we tweak the implementation slightly - we tell Linux that we |
55 | * have 2048 entries in the first level, each of which is 8 bytes (iow, two | 56 | * have 2048 entries in the first level, each of which is 8 bytes (iow, two |
56 | * hardware pointers to the second level.) The second level contains two | 57 | * hardware pointers to the second level.) The second level contains two |
57 | * hardware PTE tables arranged contiguously, followed by Linux versions | 58 | * hardware PTE tables arranged contiguously, preceded by Linux versions |
58 | * which contain the state information Linux needs. We, therefore, end up | 59 | * which contain the state information Linux needs. We, therefore, end up |
59 | * with 512 entries in the "PTE" level. | 60 | * with 512 entries in the "PTE" level. |
60 | * | 61 | * |
@@ -62,15 +63,15 @@ | |||
62 | * | 63 | * |
63 | * pgd pte | 64 | * pgd pte |
64 | * | | | 65 | * | | |
65 | * +--------+ +0 | 66 | * +--------+ |
66 | * | |-----> +------------+ +0 | 67 | * | | +------------+ +0 |
68 | * +- - - - + | Linux pt 0 | | ||
69 | * | | +------------+ +1024 | ||
70 | * +--------+ +0 | Linux pt 1 | | ||
71 | * | |-----> +------------+ +2048 | ||
67 | * +- - - - + +4 | h/w pt 0 | | 72 | * +- - - - + +4 | h/w pt 0 | |
68 | * | |-----> +------------+ +1024 | 73 | * | |-----> +------------+ +3072 |
69 | * +--------+ +8 | h/w pt 1 | | 74 | * +--------+ +8 | h/w pt 1 | |
70 | * | | +------------+ +2048 | ||
71 | * +- - - - + | Linux pt 0 | | ||
72 | * | | +------------+ +3072 | ||
73 | * +--------+ | Linux pt 1 | | ||
74 | * | | +------------+ +4096 | 75 | * | | +------------+ +4096 |
75 | * | 76 | * |
76 | * See L_PTE_xxx below for definitions of bits in the "Linux pt", and | 77 | * See L_PTE_xxx below for definitions of bits in the "Linux pt", and |
@@ -102,6 +103,10 @@ | |||
102 | #define PTRS_PER_PMD 1 | 103 | #define PTRS_PER_PMD 1 |
103 | #define PTRS_PER_PGD 2048 | 104 | #define PTRS_PER_PGD 2048 |
104 | 105 | ||
106 | #define PTE_HWTABLE_PTRS (PTRS_PER_PTE) | ||
107 | #define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t)) | ||
108 | #define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32)) | ||
109 | |||
105 | /* | 110 | /* |
106 | * PMD_SHIFT determines the size of the area a second-level page table can map | 111 | * PMD_SHIFT determines the size of the area a second-level page table can map |
107 | * PGDIR_SHIFT determines what a third-level page table entry can map | 112 | * PGDIR_SHIFT determines what a third-level page table entry can map |
@@ -112,13 +117,13 @@ | |||
112 | #define LIBRARY_TEXT_START 0x0c000000 | 117 | #define LIBRARY_TEXT_START 0x0c000000 |
113 | 118 | ||
114 | #ifndef __ASSEMBLY__ | 119 | #ifndef __ASSEMBLY__ |
115 | extern void __pte_error(const char *file, int line, unsigned long val); | 120 | extern void __pte_error(const char *file, int line, pte_t); |
116 | extern void __pmd_error(const char *file, int line, unsigned long val); | 121 | extern void __pmd_error(const char *file, int line, pmd_t); |
117 | extern void __pgd_error(const char *file, int line, unsigned long val); | 122 | extern void __pgd_error(const char *file, int line, pgd_t); |
118 | 123 | ||
119 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | 124 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte) |
120 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) | 125 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) |
121 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | 126 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd) |
122 | #endif /* !__ASSEMBLY__ */ | 127 | #endif /* !__ASSEMBLY__ */ |
123 | 128 | ||
124 | #define PMD_SIZE (1UL << PMD_SHIFT) | 129 | #define PMD_SIZE (1UL << PMD_SHIFT) |
@@ -133,8 +138,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
133 | */ | 138 | */ |
134 | #define FIRST_USER_ADDRESS PAGE_SIZE | 139 | #define FIRST_USER_ADDRESS PAGE_SIZE |
135 | 140 | ||
136 | #define FIRST_USER_PGD_NR 1 | 141 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
137 | #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) | ||
138 | 142 | ||
139 | /* | 143 | /* |
140 | * section address mask and size definitions. | 144 | * section address mask and size definitions. |
@@ -161,30 +165,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
161 | * The PTE table pointer refers to the hardware entries; the "Linux" | 165 | * The PTE table pointer refers to the hardware entries; the "Linux" |
162 | * entries are stored 1024 bytes below. | 166 | * entries are stored 1024 bytes below. |
163 | */ | 167 | */ |
164 | #define L_PTE_PRESENT (1 << 0) | 168 | #define L_PTE_PRESENT (_AT(pteval_t, 1) << 0) |
165 | #define L_PTE_YOUNG (1 << 1) | 169 | #define L_PTE_YOUNG (_AT(pteval_t, 1) << 1) |
166 | #define L_PTE_FILE (1 << 2) /* only when !PRESENT */ | 170 | #define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ |
167 | #define L_PTE_DIRTY (1 << 6) | 171 | #define L_PTE_DIRTY (_AT(pteval_t, 1) << 6) |
168 | #define L_PTE_WRITE (1 << 7) | 172 | #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) |
169 | #define L_PTE_USER (1 << 8) | 173 | #define L_PTE_USER (_AT(pteval_t, 1) << 8) |
170 | #define L_PTE_EXEC (1 << 9) | 174 | #define L_PTE_XN (_AT(pteval_t, 1) << 9) |
171 | #define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ | 175 | #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */ |
172 | 176 | ||
173 | /* | 177 | /* |
174 | * These are the memory types, defined to be compatible with | 178 | * These are the memory types, defined to be compatible with |
175 | * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB | 179 | * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB |
176 | */ | 180 | */ |
177 | #define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */ | 181 | #define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */ |
178 | #define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */ | 182 | #define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */ |
179 | #define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */ | 183 | #define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */ |
180 | #define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */ | 184 | #define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */ |
181 | #define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */ | 185 | #define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */ |
182 | #define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */ | 186 | #define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */ |
183 | #define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */ | 187 | #define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */ |
184 | #define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */ | 188 | #define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */ |
185 | #define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */ | 189 | #define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */ |
186 | #define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */ | 190 | #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */ |
187 | #define L_PTE_MT_MASK (0x0f << 2) | 191 | #define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2) |
188 | 192 | ||
189 | #ifndef __ASSEMBLY__ | 193 | #ifndef __ASSEMBLY__ |
190 | 194 | ||
@@ -201,23 +205,44 @@ extern pgprot_t pgprot_kernel; | |||
201 | 205 | ||
202 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) | 206 | #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) |
203 | 207 | ||
204 | #define PAGE_NONE pgprot_user | 208 | #define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY) |
205 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE) | 209 | #define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN) |
206 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) | 210 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER) |
207 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER) | 211 | #define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
208 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) | 212 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
209 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER) | 213 | #define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
210 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC) | 214 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) |
211 | #define PAGE_KERNEL pgprot_kernel | 215 | #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) |
212 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC) | 216 | #define PAGE_KERNEL_EXEC pgprot_kernel |
213 | 217 | ||
214 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT) | 218 | #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN) |
215 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE) | 219 | #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) |
216 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC) | 220 | #define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER) |
217 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | 221 | #define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
218 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | 222 | #define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
219 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER) | 223 | #define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN) |
220 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC) | 224 | #define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY) |
225 | |||
226 | #define __pgprot_modify(prot,mask,bits) \ | ||
227 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | ||
228 | |||
229 | #define pgprot_noncached(prot) \ | ||
230 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) | ||
231 | |||
232 | #define pgprot_writecombine(prot) \ | ||
233 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) | ||
234 | |||
235 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
236 | #define pgprot_dmacoherent(prot) \ | ||
237 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) | ||
238 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
239 | struct file; | ||
240 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
241 | unsigned long size, pgprot_t vma_prot); | ||
242 | #else | ||
243 | #define pgprot_dmacoherent(prot) \ | ||
244 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN) | ||
245 | #endif | ||
221 | 246 | ||
222 | #endif /* __ASSEMBLY__ */ | 247 | #endif /* __ASSEMBLY__ */ |
223 | 248 | ||
@@ -255,26 +280,84 @@ extern pgprot_t pgprot_kernel; | |||
255 | extern struct page *empty_zero_page; | 280 | extern struct page *empty_zero_page; |
256 | #define ZERO_PAGE(vaddr) (empty_zero_page) | 281 | #define ZERO_PAGE(vaddr) (empty_zero_page) |
257 | 282 | ||
258 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
259 | #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) | ||
260 | 283 | ||
261 | #define pte_none(pte) (!pte_val(pte)) | 284 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
262 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) | 285 | |
263 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | 286 | /* to find an entry in a page-table-directory */ |
264 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) | 287 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) |
288 | |||
289 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | ||
290 | |||
291 | /* to find an entry in a kernel page-table-directory */ | ||
292 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | ||
293 | |||
294 | /* | ||
295 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
296 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
297 | * into the pgd entry) | ||
298 | */ | ||
299 | #define pgd_none(pgd) (0) | ||
300 | #define pgd_bad(pgd) (0) | ||
301 | #define pgd_present(pgd) (1) | ||
302 | #define pgd_clear(pgdp) do { } while (0) | ||
303 | #define set_pgd(pgd,pgdp) do { } while (0) | ||
304 | |||
305 | |||
306 | /* Find an entry in the second-level page table.. */ | ||
307 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) | ||
308 | |||
309 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
310 | #define pmd_present(pmd) (pmd_val(pmd)) | ||
311 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) | ||
312 | |||
313 | #define copy_pmd(pmdpd,pmdps) \ | ||
314 | do { \ | ||
315 | pmdpd[0] = pmdps[0]; \ | ||
316 | pmdpd[1] = pmdps[1]; \ | ||
317 | flush_pmd_entry(pmdpd); \ | ||
318 | } while (0) | ||
319 | |||
320 | #define pmd_clear(pmdp) \ | ||
321 | do { \ | ||
322 | pmdp[0] = __pmd(0); \ | ||
323 | pmdp[1] = __pmd(0); \ | ||
324 | clean_pmd_entry(pmdp); \ | ||
325 | } while (0) | ||
326 | |||
327 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) | ||
328 | { | ||
329 | return __va(pmd_val(pmd) & PAGE_MASK); | ||
330 | } | ||
331 | |||
332 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) | ||
333 | |||
334 | /* we don't need complex calculations here as the pmd is folded into the pgd */ | ||
335 | #define pmd_addr_end(addr,end) (end) | ||
265 | 336 | ||
266 | #define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr)) | ||
267 | #define pte_unmap(pte) __pte_unmap(pte) | ||
268 | 337 | ||
269 | #ifndef CONFIG_HIGHPTE | 338 | #ifndef CONFIG_HIGHPTE |
270 | #define __pte_map(dir) pmd_page_vaddr(*(dir)) | 339 | #define __pte_map(pmd) pmd_page_vaddr(*(pmd)) |
271 | #define __pte_unmap(pte) do { } while (0) | 340 | #define __pte_unmap(pte) do { } while (0) |
272 | #else | 341 | #else |
273 | #define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE) | 342 | #define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd))) |
274 | #define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE)) | 343 | #define __pte_unmap(pte) kunmap_atomic(pte) |
275 | #endif | 344 | #endif |
276 | 345 | ||
346 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
347 | |||
348 | #define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr)) | ||
349 | |||
350 | #define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr)) | ||
351 | #define pte_unmap(pte) __pte_unmap(pte) | ||
352 | |||
353 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | ||
354 | #define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
355 | |||
356 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | ||
357 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) | ||
358 | |||
277 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) | 359 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) |
360 | #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) | ||
278 | 361 | ||
279 | #if __LINUX_ARM_ARCH__ < 6 | 362 | #if __LINUX_ARM_ARCH__ < 6 |
280 | static inline void __sync_icache_dcache(pte_t pteval) | 363 | static inline void __sync_icache_dcache(pte_t pteval) |
@@ -295,15 +378,12 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
295 | } | 378 | } |
296 | } | 379 | } |
297 | 380 | ||
298 | /* | 381 | #define pte_none(pte) (!pte_val(pte)) |
299 | * The following only work if pte_present() is true. | ||
300 | * Undefined behaviour if not.. | ||
301 | */ | ||
302 | #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) | 382 | #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) |
303 | #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) | 383 | #define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) |
304 | #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) | 384 | #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) |
305 | #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) | 385 | #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) |
306 | #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) | 386 | #define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) |
307 | #define pte_special(pte) (0) | 387 | #define pte_special(pte) (0) |
308 | 388 | ||
309 | #define pte_present_user(pte) \ | 389 | #define pte_present_user(pte) \ |
@@ -313,8 +393,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
313 | #define PTE_BIT_FUNC(fn,op) \ | 393 | #define PTE_BIT_FUNC(fn,op) \ |
314 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | 394 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } |
315 | 395 | ||
316 | PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE); | 396 | PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); |
317 | PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE); | 397 | PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); |
318 | PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); | 398 | PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); |
319 | PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); | 399 | PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); |
320 | PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); | 400 | PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); |
@@ -322,101 +402,13 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); | |||
322 | 402 | ||
323 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | 403 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
324 | 404 | ||
325 | #define __pgprot_modify(prot,mask,bits) \ | ||
326 | __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) | ||
327 | |||
328 | /* | ||
329 | * Mark the prot value as uncacheable and unbufferable. | ||
330 | */ | ||
331 | #define pgprot_noncached(prot) \ | ||
332 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED) | ||
333 | #define pgprot_writecombine(prot) \ | ||
334 | __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) | ||
335 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
336 | #define pgprot_dmacoherent(prot) \ | ||
337 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE) | ||
338 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
339 | struct file; | ||
340 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
341 | unsigned long size, pgprot_t vma_prot); | ||
342 | #else | ||
343 | #define pgprot_dmacoherent(prot) \ | ||
344 | __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED) | ||
345 | #endif | ||
346 | |||
347 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
348 | #define pmd_present(pmd) (pmd_val(pmd)) | ||
349 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) | ||
350 | |||
351 | #define copy_pmd(pmdpd,pmdps) \ | ||
352 | do { \ | ||
353 | pmdpd[0] = pmdps[0]; \ | ||
354 | pmdpd[1] = pmdps[1]; \ | ||
355 | flush_pmd_entry(pmdpd); \ | ||
356 | } while (0) | ||
357 | |||
358 | #define pmd_clear(pmdp) \ | ||
359 | do { \ | ||
360 | pmdp[0] = __pmd(0); \ | ||
361 | pmdp[1] = __pmd(0); \ | ||
362 | clean_pmd_entry(pmdp); \ | ||
363 | } while (0) | ||
364 | |||
365 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) | ||
366 | { | ||
367 | unsigned long ptr; | ||
368 | |||
369 | ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1); | ||
370 | ptr += PTRS_PER_PTE * sizeof(void *); | ||
371 | |||
372 | return __va(ptr); | ||
373 | } | ||
374 | |||
375 | #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) | ||
376 | |||
377 | /* we don't need complex calculations here as the pmd is folded into the pgd */ | ||
378 | #define pmd_addr_end(addr,end) (end) | ||
379 | |||
380 | /* | ||
381 | * Conversion functions: convert a page and protection to a page entry, | ||
382 | * and a page entry and page directory to the page they refer to. | ||
383 | */ | ||
384 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | ||
385 | |||
386 | /* | ||
387 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
388 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
389 | * into the pgd entry) | ||
390 | */ | ||
391 | #define pgd_none(pgd) (0) | ||
392 | #define pgd_bad(pgd) (0) | ||
393 | #define pgd_present(pgd) (1) | ||
394 | #define pgd_clear(pgdp) do { } while (0) | ||
395 | #define set_pgd(pgd,pgdp) do { } while (0) | ||
396 | |||
397 | /* to find an entry in a page-table-directory */ | ||
398 | #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) | ||
399 | |||
400 | #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) | ||
401 | |||
402 | /* to find an entry in a kernel page-table-directory */ | ||
403 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | ||
404 | |||
405 | /* Find an entry in the second-level page table.. */ | ||
406 | #define pmd_offset(dir, addr) ((pmd_t *)(dir)) | ||
407 | |||
408 | /* Find an entry in the third-level page table.. */ | ||
409 | #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
410 | |||
411 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 405 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
412 | { | 406 | { |
413 | const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER; | 407 | const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER; |
414 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 408 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
415 | return pte; | 409 | return pte; |
416 | } | 410 | } |
417 | 411 | ||
418 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
419 | |||
420 | /* | 412 | /* |
421 | * Encode and decode a swap entry. Swap entries are stored in the Linux | 413 | * Encode and decode a swap entry. Swap entries are stored in the Linux |
422 | * page tables as follows: | 414 | * page tables as follows: |
@@ -481,6 +473,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |||
481 | 473 | ||
482 | #define pgtable_cache_init() do { } while (0) | 474 | #define pgtable_cache_init() do { } while (0) |
483 | 475 | ||
476 | void identity_mapping_add(pgd_t *, unsigned long, unsigned long); | ||
477 | void identity_mapping_del(pgd_t *, unsigned long, unsigned long); | ||
478 | |||
484 | #endif /* !__ASSEMBLY__ */ | 479 | #endif /* !__ASSEMBLY__ */ |
485 | 480 | ||
486 | #endif /* CONFIG_MMU */ | 481 | #endif /* CONFIG_MMU */ |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 5ec79b4ff950..4539ebcb089f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -55,42 +55,6 @@ enum ipi_msg_type { | |||
55 | IPI_CPU_STOP, | 55 | IPI_CPU_STOP, |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, | ||
59 | unsigned long end) | ||
60 | { | ||
61 | unsigned long addr, prot; | ||
62 | pmd_t *pmd; | ||
63 | |||
64 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | ||
65 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
66 | prot |= PMD_BIT4; | ||
67 | |||
68 | for (addr = start & PGDIR_MASK; addr < end;) { | ||
69 | pmd = pmd_offset(pgd + pgd_index(addr), addr); | ||
70 | pmd[0] = __pmd(addr | prot); | ||
71 | addr += SECTION_SIZE; | ||
72 | pmd[1] = __pmd(addr | prot); | ||
73 | addr += SECTION_SIZE; | ||
74 | flush_pmd_entry(pmd); | ||
75 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, | ||
80 | unsigned long end) | ||
81 | { | ||
82 | unsigned long addr; | ||
83 | pmd_t *pmd; | ||
84 | |||
85 | for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { | ||
86 | pmd = pmd_offset(pgd + pgd_index(addr), addr); | ||
87 | pmd[0] = __pmd(0); | ||
88 | pmd[1] = __pmd(0); | ||
89 | clean_pmd_entry(pmd); | ||
90 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | ||
91 | } | ||
92 | } | ||
93 | |||
94 | int __cpuinit __cpu_up(unsigned int cpu) | 58 | int __cpuinit __cpu_up(unsigned int cpu) |
95 | { | 59 | { |
96 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 60 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index e02f4f7537c5..ee57640ba2bb 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -710,19 +710,19 @@ void __readwrite_bug(const char *fn) | |||
710 | } | 710 | } |
711 | EXPORT_SYMBOL(__readwrite_bug); | 711 | EXPORT_SYMBOL(__readwrite_bug); |
712 | 712 | ||
713 | void __pte_error(const char *file, int line, unsigned long val) | 713 | void __pte_error(const char *file, int line, pte_t pte) |
714 | { | 714 | { |
715 | printk("%s:%d: bad pte %08lx.\n", file, line, val); | 715 | printk("%s:%d: bad pte %08lx.\n", file, line, pte_val(pte)); |
716 | } | 716 | } |
717 | 717 | ||
718 | void __pmd_error(const char *file, int line, unsigned long val) | 718 | void __pmd_error(const char *file, int line, pmd_t pmd) |
719 | { | 719 | { |
720 | printk("%s:%d: bad pmd %08lx.\n", file, line, val); | 720 | printk("%s:%d: bad pmd %08lx.\n", file, line, pmd_val(pmd)); |
721 | } | 721 | } |
722 | 722 | ||
723 | void __pgd_error(const char *file, int line, unsigned long val) | 723 | void __pgd_error(const char *file, int line, pgd_t pgd) |
724 | { | 724 | { |
725 | printk("%s:%d: bad pgd %08lx.\n", file, line, val); | 725 | printk("%s:%d: bad pgd %08lx.\n", file, line, pgd_val(pgd)); |
726 | } | 726 | } |
727 | 727 | ||
728 | asmlinkage void __div0(void) | 728 | asmlinkage void __div0(void) |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index d63b6c413758..00d74a04af3a 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -5,8 +5,8 @@ | |||
5 | obj-y := dma-mapping.o extable.o fault.o init.o \ | 5 | obj-y := dma-mapping.o extable.o fault.o init.o \ |
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ |
9 | pgd.o mmu.o vmregion.o | 9 | mmap.o pgd.o mmu.o vmregion.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 83e59f870426..01210dba0221 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include "mm.h" | 27 | #include "mm.h" |
28 | 28 | ||
29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | 29 | static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; |
30 | 30 | ||
31 | #if __LINUX_ARM_ARCH__ < 6 | 31 | #if __LINUX_ARM_ARCH__ < 6 |
32 | /* | 32 | /* |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 1e21e125fe3a..f10f9bac2206 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -108,7 +108,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
108 | 108 | ||
109 | pte = pte_offset_map(pmd, addr); | 109 | pte = pte_offset_map(pmd, addr); |
110 | printk(", *pte=%08lx", pte_val(*pte)); | 110 | printk(", *pte=%08lx", pte_val(*pte)); |
111 | printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); | 111 | printk(", *ppte=%08lx", pte_val(pte[PTE_HWTABLE_PTRS])); |
112 | pte_unmap(pte); | 112 | pte_unmap(pte); |
113 | } while(0); | 113 | } while(0); |
114 | 114 | ||
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c new file mode 100644 index 000000000000..57299446f787 --- /dev/null +++ b/arch/arm/mm/idmap.c | |||
@@ -0,0 +1,67 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | |||
3 | #include <asm/cputype.h> | ||
4 | #include <asm/pgalloc.h> | ||
5 | #include <asm/pgtable.h> | ||
6 | |||
7 | static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
8 | unsigned long prot) | ||
9 | { | ||
10 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
11 | |||
12 | addr = (addr & PMD_MASK) | prot; | ||
13 | pmd[0] = __pmd(addr); | ||
14 | addr += SECTION_SIZE; | ||
15 | pmd[1] = __pmd(addr); | ||
16 | flush_pmd_entry(pmd); | ||
17 | } | ||
18 | |||
19 | void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) | ||
20 | { | ||
21 | unsigned long prot, next; | ||
22 | |||
23 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | ||
24 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
25 | prot |= PMD_BIT4; | ||
26 | |||
27 | pgd += pgd_index(addr); | ||
28 | do { | ||
29 | next = pgd_addr_end(addr, end); | ||
30 | idmap_add_pmd(pgd, addr, next, prot); | ||
31 | } while (pgd++, addr = next, addr != end); | ||
32 | } | ||
33 | |||
34 | #ifdef CONFIG_SMP | ||
35 | static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end) | ||
36 | { | ||
37 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
38 | pmd_clear(pmd); | ||
39 | } | ||
40 | |||
41 | void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) | ||
42 | { | ||
43 | unsigned long next; | ||
44 | |||
45 | pgd += pgd_index(addr); | ||
46 | do { | ||
47 | next = pgd_addr_end(addr, end); | ||
48 | idmap_del_pmd(pgd, addr, next); | ||
49 | } while (pgd++, addr = next, addr != end); | ||
50 | } | ||
51 | #endif | ||
52 | |||
53 | /* | ||
54 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
55 | * the user-mode pages. This will then ensure that we have predictable | ||
56 | * results when turning the mmu off | ||
57 | */ | ||
58 | void setup_mm_for_reboot(char mode) | ||
59 | { | ||
60 | /* | ||
61 | * We need to access to user-mode page tables here. For kernel threads | ||
62 | * we don't have any user-mode mappings so we use the context that we | ||
63 | * "borrowed". | ||
64 | */ | ||
65 | identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); | ||
66 | local_flush_tlb_all(); | ||
67 | } | ||
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 6630620380a4..36960df5fb76 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -16,7 +16,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt) | |||
16 | } | 16 | } |
17 | 17 | ||
18 | struct mem_type { | 18 | struct mem_type { |
19 | unsigned int prot_pte; | 19 | pteval_t prot_pte; |
20 | unsigned int prot_l1; | 20 | unsigned int prot_l1; |
21 | unsigned int prot_sect; | 21 | unsigned int prot_sect; |
22 | unsigned int domain; | 22 | unsigned int domain; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 79c01f540cbe..3c67e92f7d59 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -63,7 +63,7 @@ struct cachepolicy { | |||
63 | const char policy[16]; | 63 | const char policy[16]; |
64 | unsigned int cr_mask; | 64 | unsigned int cr_mask; |
65 | unsigned int pmd; | 65 | unsigned int pmd; |
66 | unsigned int pte; | 66 | pteval_t pte; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | static struct cachepolicy cache_policies[] __initdata = { | 69 | static struct cachepolicy cache_policies[] __initdata = { |
@@ -191,7 +191,7 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
191 | } | 191 | } |
192 | #endif | 192 | #endif |
193 | 193 | ||
194 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE | 194 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN |
195 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 195 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
196 | 196 | ||
197 | static struct mem_type mem_types[] = { | 197 | static struct mem_type mem_types[] = { |
@@ -236,19 +236,18 @@ static struct mem_type mem_types[] = { | |||
236 | }, | 236 | }, |
237 | [MT_LOW_VECTORS] = { | 237 | [MT_LOW_VECTORS] = { |
238 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 238 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
239 | L_PTE_EXEC, | 239 | L_PTE_RDONLY, |
240 | .prot_l1 = PMD_TYPE_TABLE, | 240 | .prot_l1 = PMD_TYPE_TABLE, |
241 | .domain = DOMAIN_USER, | 241 | .domain = DOMAIN_USER, |
242 | }, | 242 | }, |
243 | [MT_HIGH_VECTORS] = { | 243 | [MT_HIGH_VECTORS] = { |
244 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 244 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
245 | L_PTE_USER | L_PTE_EXEC, | 245 | L_PTE_USER | L_PTE_RDONLY, |
246 | .prot_l1 = PMD_TYPE_TABLE, | 246 | .prot_l1 = PMD_TYPE_TABLE, |
247 | .domain = DOMAIN_USER, | 247 | .domain = DOMAIN_USER, |
248 | }, | 248 | }, |
249 | [MT_MEMORY] = { | 249 | [MT_MEMORY] = { |
250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
251 | L_PTE_WRITE | L_PTE_EXEC, | ||
252 | .prot_l1 = PMD_TYPE_TABLE, | 251 | .prot_l1 = PMD_TYPE_TABLE, |
253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 252 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
254 | .domain = DOMAIN_KERNEL, | 253 | .domain = DOMAIN_KERNEL, |
@@ -259,21 +258,20 @@ static struct mem_type mem_types[] = { | |||
259 | }, | 258 | }, |
260 | [MT_MEMORY_NONCACHED] = { | 259 | [MT_MEMORY_NONCACHED] = { |
261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 260 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
262 | L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | 261 | L_PTE_MT_BUFFERABLE, |
263 | .prot_l1 = PMD_TYPE_TABLE, | 262 | .prot_l1 = PMD_TYPE_TABLE, |
264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 263 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
265 | .domain = DOMAIN_KERNEL, | 264 | .domain = DOMAIN_KERNEL, |
266 | }, | 265 | }, |
267 | [MT_MEMORY_DTCM] = { | 266 | [MT_MEMORY_DTCM] = { |
268 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 267 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
269 | L_PTE_WRITE, | 268 | L_PTE_XN, |
270 | .prot_l1 = PMD_TYPE_TABLE, | 269 | .prot_l1 = PMD_TYPE_TABLE, |
271 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 270 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
272 | .domain = DOMAIN_KERNEL, | 271 | .domain = DOMAIN_KERNEL, |
273 | }, | 272 | }, |
274 | [MT_MEMORY_ITCM] = { | 273 | [MT_MEMORY_ITCM] = { |
275 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 274 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
276 | L_PTE_WRITE | L_PTE_EXEC, | ||
277 | .prot_l1 = PMD_TYPE_TABLE, | 275 | .prot_l1 = PMD_TYPE_TABLE, |
278 | .domain = DOMAIN_KERNEL, | 276 | .domain = DOMAIN_KERNEL, |
279 | }, | 277 | }, |
@@ -480,7 +478,7 @@ static void __init build_mem_type_table(void) | |||
480 | 478 | ||
481 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 479 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
482 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 480 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
483 | L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); | 481 | L_PTE_DIRTY | kern_pgprot); |
484 | 482 | ||
485 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 483 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
486 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 484 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
@@ -536,7 +534,7 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned l | |||
536 | { | 534 | { |
537 | if (pmd_none(*pmd)) { | 535 | if (pmd_none(*pmd)) { |
538 | pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); | 536 | pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); |
539 | __pmd_populate(pmd, __pa(pte) | prot); | 537 | __pmd_populate(pmd, __pa(pte), prot); |
540 | } | 538 | } |
541 | BUG_ON(pmd_bad(*pmd)); | 539 | BUG_ON(pmd_bad(*pmd)); |
542 | return pte_offset_kernel(pmd, addr); | 540 | return pte_offset_kernel(pmd, addr); |
@@ -554,7 +552,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
554 | } | 552 | } |
555 | 553 | ||
556 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | 554 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, |
557 | unsigned long end, unsigned long phys, | 555 | unsigned long end, phys_addr_t phys, |
558 | const struct mem_type *type) | 556 | const struct mem_type *type) |
559 | { | 557 | { |
560 | pmd_t *pmd = pmd_offset(pgd, addr); | 558 | pmd_t *pmd = pmd_offset(pgd, addr); |
@@ -589,7 +587,8 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | |||
589 | static void __init create_36bit_mapping(struct map_desc *md, | 587 | static void __init create_36bit_mapping(struct map_desc *md, |
590 | const struct mem_type *type) | 588 | const struct mem_type *type) |
591 | { | 589 | { |
592 | unsigned long phys, addr, length, end; | 590 | unsigned long addr, length, end; |
591 | phys_addr_t phys; | ||
593 | pgd_t *pgd; | 592 | pgd_t *pgd; |
594 | 593 | ||
595 | addr = md->virtual; | 594 | addr = md->virtual; |
@@ -1044,38 +1043,3 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1044 | empty_zero_page = virt_to_page(zero_page); | 1043 | empty_zero_page = virt_to_page(zero_page); |
1045 | __flush_dcache_page(NULL, empty_zero_page); | 1044 | __flush_dcache_page(NULL, empty_zero_page); |
1046 | } | 1045 | } |
1047 | |||
1048 | /* | ||
1049 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
1050 | * the user-mode pages. This will then ensure that we have predictable | ||
1051 | * results when turning the mmu off | ||
1052 | */ | ||
1053 | void setup_mm_for_reboot(char mode) | ||
1054 | { | ||
1055 | unsigned long base_pmdval; | ||
1056 | pgd_t *pgd; | ||
1057 | int i; | ||
1058 | |||
1059 | /* | ||
1060 | * We need to access to user-mode page tables here. For kernel threads | ||
1061 | * we don't have any user-mode mappings so we use the context that we | ||
1062 | * "borrowed". | ||
1063 | */ | ||
1064 | pgd = current->active_mm->pgd; | ||
1065 | |||
1066 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | ||
1067 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
1068 | base_pmdval |= PMD_BIT4; | ||
1069 | |||
1070 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | ||
1071 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | ||
1072 | pmd_t *pmd; | ||
1073 | |||
1074 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); | ||
1075 | pmd[0] = __pmd(pmdval); | ||
1076 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | ||
1077 | flush_pmd_entry(pmd); | ||
1078 | } | ||
1079 | |||
1080 | local_flush_tlb_all(); | ||
1081 | } | ||
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 69bbfc6645a6..93292a18cf77 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -17,12 +17,10 @@ | |||
17 | 17 | ||
18 | #include "mm.h" | 18 | #include "mm.h" |
19 | 19 | ||
20 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | ||
21 | |||
22 | /* | 20 | /* |
23 | * need to get a 16k page for level 1 | 21 | * need to get a 16k page for level 1 |
24 | */ | 22 | */ |
25 | pgd_t *get_pgd_slow(struct mm_struct *mm) | 23 | pgd_t *pgd_alloc(struct mm_struct *mm) |
26 | { | 24 | { |
27 | pgd_t *new_pgd, *init_pgd; | 25 | pgd_t *new_pgd, *init_pgd; |
28 | pmd_t *new_pmd, *init_pmd; | 26 | pmd_t *new_pmd, *init_pmd; |
@@ -32,14 +30,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
32 | if (!new_pgd) | 30 | if (!new_pgd) |
33 | goto no_pgd; | 31 | goto no_pgd; |
34 | 32 | ||
35 | memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | 33 | memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
36 | 34 | ||
37 | /* | 35 | /* |
38 | * Copy over the kernel and IO PGD entries | 36 | * Copy over the kernel and IO PGD entries |
39 | */ | 37 | */ |
40 | init_pgd = pgd_offset_k(0); | 38 | init_pgd = pgd_offset_k(0); |
41 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | 39 | memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, |
42 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | 40 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
43 | 41 | ||
44 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 42 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
45 | 43 | ||
@@ -73,28 +71,29 @@ no_pgd: | |||
73 | return NULL; | 71 | return NULL; |
74 | } | 72 | } |
75 | 73 | ||
76 | void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) | 74 | void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) |
77 | { | 75 | { |
76 | pgd_t *pgd; | ||
78 | pmd_t *pmd; | 77 | pmd_t *pmd; |
79 | pgtable_t pte; | 78 | pgtable_t pte; |
80 | 79 | ||
81 | if (!pgd) | 80 | if (!pgd_base) |
82 | return; | 81 | return; |
83 | 82 | ||
84 | /* pgd is always present and good */ | 83 | pgd = pgd_base + pgd_index(0); |
85 | pmd = pmd_off(pgd, 0); | 84 | if (pgd_none_or_clear_bad(pgd)) |
86 | if (pmd_none(*pmd)) | 85 | goto no_pgd; |
87 | goto free; | 86 | |
88 | if (pmd_bad(*pmd)) { | 87 | pmd = pmd_offset(pgd, 0); |
89 | pmd_ERROR(*pmd); | 88 | if (pmd_none_or_clear_bad(pmd)) |
90 | pmd_clear(pmd); | 89 | goto no_pmd; |
91 | goto free; | ||
92 | } | ||
93 | 90 | ||
94 | pte = pmd_pgtable(*pmd); | 91 | pte = pmd_pgtable(*pmd); |
95 | pmd_clear(pmd); | 92 | pmd_clear(pmd); |
96 | pte_free(mm, pte); | 93 | pte_free(mm, pte); |
94 | no_pmd: | ||
95 | pgd_clear(pgd); | ||
97 | pmd_free(mm, pmd); | 96 | pmd_free(mm, pmd); |
98 | free: | 97 | no_pgd: |
99 | free_pages((unsigned long) pgd, 2); | 98 | free_pages((unsigned long) pgd_base, 2); |
100 | } | 99 | } |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index f8f777df8d72..e32fa499194c 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -91,7 +91,7 @@ | |||
91 | #if L_PTE_SHARED != PTE_EXT_SHARED | 91 | #if L_PTE_SHARED != PTE_EXT_SHARED |
92 | #error PTE shared bit mismatch | 92 | #error PTE shared bit mismatch |
93 | #endif | 93 | #endif |
94 | #if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ | 94 | #if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ |
95 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED | 95 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED |
96 | #error Invalid Linux PTE bit settings | 96 | #error Invalid Linux PTE bit settings |
97 | #endif | 97 | #endif |
@@ -135,7 +135,7 @@ | |||
135 | .endm | 135 | .endm |
136 | 136 | ||
137 | .macro armv6_set_pte_ext pfx | 137 | .macro armv6_set_pte_ext pfx |
138 | str r1, [r0], #-2048 @ linux version | 138 | str r1, [r0], #2048 @ linux version |
139 | 139 | ||
140 | bic r3, r1, #0x000003fc | 140 | bic r3, r1, #0x000003fc |
141 | bic r3, r3, #PTE_TYPE_MASK | 141 | bic r3, r3, #PTE_TYPE_MASK |
@@ -146,9 +146,9 @@ | |||
146 | and r2, r1, #L_PTE_MT_MASK | 146 | and r2, r1, #L_PTE_MT_MASK |
147 | ldr r2, [ip, r2] | 147 | ldr r2, [ip, r2] |
148 | 148 | ||
149 | tst r1, #L_PTE_WRITE | 149 | eor r1, r1, #L_PTE_DIRTY |
150 | tstne r1, #L_PTE_DIRTY | 150 | tst r1, #L_PTE_DIRTY|L_PTE_RDONLY |
151 | orreq r3, r3, #PTE_EXT_APX | 151 | orrne r3, r3, #PTE_EXT_APX |
152 | 152 | ||
153 | tst r1, #L_PTE_USER | 153 | tst r1, #L_PTE_USER |
154 | orrne r3, r3, #PTE_EXT_AP1 | 154 | orrne r3, r3, #PTE_EXT_AP1 |
@@ -158,8 +158,8 @@ | |||
158 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | 158 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | tst r1, #L_PTE_EXEC | 161 | tst r1, #L_PTE_XN |
162 | orreq r3, r3, #PTE_EXT_XN | 162 | orrne r3, r3, #PTE_EXT_XN |
163 | 163 | ||
164 | orr r3, r3, r2 | 164 | orr r3, r3, r2 |
165 | 165 | ||
@@ -187,9 +187,9 @@ | |||
187 | * 1111 0xff r/w r/w | 187 | * 1111 0xff r/w r/w |
188 | */ | 188 | */ |
189 | .macro armv3_set_pte_ext wc_disable=1 | 189 | .macro armv3_set_pte_ext wc_disable=1 |
190 | str r1, [r0], #-2048 @ linux version | 190 | str r1, [r0], #2048 @ linux version |
191 | 191 | ||
192 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY | 192 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
193 | 193 | ||
194 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits | 194 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits |
195 | bic r2, r2, #PTE_TYPE_MASK | 195 | bic r2, r2, #PTE_TYPE_MASK |
@@ -198,7 +198,7 @@ | |||
198 | tst r3, #L_PTE_USER @ user? | 198 | tst r3, #L_PTE_USER @ user? |
199 | orrne r2, r2, #PTE_SMALL_AP_URO_SRW | 199 | orrne r2, r2, #PTE_SMALL_AP_URO_SRW |
200 | 200 | ||
201 | tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? | 201 | tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? |
202 | orreq r2, r2, #PTE_SMALL_AP_UNO_SRW | 202 | orreq r2, r2, #PTE_SMALL_AP_UNO_SRW |
203 | 203 | ||
204 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? | 204 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? |
@@ -210,7 +210,7 @@ | |||
210 | bicne r2, r2, #PTE_BUFFERABLE | 210 | bicne r2, r2, #PTE_BUFFERABLE |
211 | #endif | 211 | #endif |
212 | .endif | 212 | .endif |
213 | str r2, [r0] @ hardware version | 213 | str r2, [r0] @ hardware version |
214 | .endm | 214 | .endm |
215 | 215 | ||
216 | 216 | ||
@@ -230,9 +230,9 @@ | |||
230 | * 1111 11 r/w r/w | 230 | * 1111 11 r/w r/w |
231 | */ | 231 | */ |
232 | .macro xscale_set_pte_ext_prologue | 232 | .macro xscale_set_pte_ext_prologue |
233 | str r1, [r0], #-2048 @ linux version | 233 | str r1, [r0] @ linux version |
234 | 234 | ||
235 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY | 235 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
236 | 236 | ||
237 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits | 237 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits |
238 | orr r2, r2, #PTE_TYPE_EXT @ extended page | 238 | orr r2, r2, #PTE_TYPE_EXT @ extended page |
@@ -240,7 +240,7 @@ | |||
240 | tst r3, #L_PTE_USER @ user? | 240 | tst r3, #L_PTE_USER @ user? |
241 | orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w | 241 | orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w |
242 | 242 | ||
243 | tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? | 243 | tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? |
244 | orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w | 244 | orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w |
245 | @ combined with user -> user r/w | 245 | @ combined with user -> user r/w |
246 | .endm | 246 | .endm |
@@ -249,7 +249,7 @@ | |||
249 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? | 249 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? |
250 | movne r2, #0 @ no -> fault | 250 | movne r2, #0 @ no -> fault |
251 | 251 | ||
252 | str r2, [r0] @ hardware version | 252 | str r2, [r0, #2048]! @ hardware version |
253 | mov ip, #0 | 253 | mov ip, #0 |
254 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 254 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
255 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier | 255 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 7401f4d7e676..b49fab21517c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -124,15 +124,13 @@ ENDPROC(cpu_v7_switch_mm) | |||
124 | * Set a level 2 translation table entry. | 124 | * Set a level 2 translation table entry. |
125 | * | 125 | * |
126 | * - ptep - pointer to level 2 translation table entry | 126 | * - ptep - pointer to level 2 translation table entry |
127 | * (hardware version is stored at -1024 bytes) | 127 | * (hardware version is stored at +2048 bytes) |
128 | * - pte - PTE value to store | 128 | * - pte - PTE value to store |
129 | * - ext - value for extended PTE bits | 129 | * - ext - value for extended PTE bits |
130 | */ | 130 | */ |
131 | ENTRY(cpu_v7_set_pte_ext) | 131 | ENTRY(cpu_v7_set_pte_ext) |
132 | #ifdef CONFIG_MMU | 132 | #ifdef CONFIG_MMU |
133 | ARM( str r1, [r0], #-2048 ) @ linux version | 133 | str r1, [r0] @ linux version |
134 | THUMB( str r1, [r0] ) @ linux version | ||
135 | THUMB( sub r0, r0, #2048 ) | ||
136 | 134 | ||
137 | bic r3, r1, #0x000003f0 | 135 | bic r3, r1, #0x000003f0 |
138 | bic r3, r3, #PTE_TYPE_MASK | 136 | bic r3, r3, #PTE_TYPE_MASK |
@@ -142,9 +140,9 @@ ENTRY(cpu_v7_set_pte_ext) | |||
142 | tst r1, #1 << 4 | 140 | tst r1, #1 << 4 |
143 | orrne r3, r3, #PTE_EXT_TEX(1) | 141 | orrne r3, r3, #PTE_EXT_TEX(1) |
144 | 142 | ||
145 | tst r1, #L_PTE_WRITE | 143 | eor r1, r1, #L_PTE_DIRTY |
146 | tstne r1, #L_PTE_DIRTY | 144 | tst r1, #L_PTE_RDONLY | L_PTE_DIRTY |
147 | orreq r3, r3, #PTE_EXT_APX | 145 | orrne r3, r3, #PTE_EXT_APX |
148 | 146 | ||
149 | tst r1, #L_PTE_USER | 147 | tst r1, #L_PTE_USER |
150 | orrne r3, r3, #PTE_EXT_AP1 | 148 | orrne r3, r3, #PTE_EXT_AP1 |
@@ -154,14 +152,14 @@ ENTRY(cpu_v7_set_pte_ext) | |||
154 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | 152 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 |
155 | #endif | 153 | #endif |
156 | 154 | ||
157 | tst r1, #L_PTE_EXEC | 155 | tst r1, #L_PTE_XN |
158 | orreq r3, r3, #PTE_EXT_XN | 156 | orrne r3, r3, #PTE_EXT_XN |
159 | 157 | ||
160 | tst r1, #L_PTE_YOUNG | 158 | tst r1, #L_PTE_YOUNG |
161 | tstne r1, #L_PTE_PRESENT | 159 | tstne r1, #L_PTE_PRESENT |
162 | moveq r3, #0 | 160 | moveq r3, #0 |
163 | 161 | ||
164 | str r3, [r0] | 162 | str r3, [r0, #2048]! |
165 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | 163 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte |
166 | #endif | 164 | #endif |
167 | mov pc, lr | 165 | mov pc, lr |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 523408c0bb38..5a37c5e45c41 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -500,8 +500,8 @@ ENTRY(cpu_xscale_set_pte_ext) | |||
500 | @ | 500 | @ |
501 | @ Erratum 40: must set memory to write-through for user read-only pages | 501 | @ Erratum 40: must set memory to write-through for user read-only pages |
502 | @ | 502 | @ |
503 | and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2) | 503 | and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2) |
504 | teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | 504 | teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY |
505 | 505 | ||
506 | moveq r1, #L_PTE_MT_WRITETHROUGH | 506 | moveq r1, #L_PTE_MT_WRITETHROUGH |
507 | and r1, r1, #L_PTE_MT_MASK | 507 | and r1, r1, #L_PTE_MT_MASK |