aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm/pgtable.h')
-rw-r--r--arch/arm/include/asm/pgtable.h54
1 files changed, 7 insertions, 47 deletions
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 2f659e239727..f66626d71e7d 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -11,20 +11,24 @@
11#define _ASMARM_PGTABLE_H 11#define _ASMARM_PGTABLE_H
12 12
13#include <linux/const.h> 13#include <linux/const.h>
14#include <asm-generic/4level-fixup.h>
15#include <asm/proc-fns.h> 14#include <asm/proc-fns.h>
16 15
17#ifndef CONFIG_MMU 16#ifndef CONFIG_MMU
18 17
18#include <asm-generic/4level-fixup.h>
19#include "pgtable-nommu.h" 19#include "pgtable-nommu.h"
20 20
21#else 21#else
22 22
23#include <asm-generic/pgtable-nopud.h>
23#include <asm/memory.h> 24#include <asm/memory.h>
24#include <mach/vmalloc.h>
25#include <asm/pgtable-hwdef.h> 25#include <asm/pgtable-hwdef.h>
26 26
27#ifdef CONFIG_ARM_LPAE
28#include <asm/pgtable-3level.h>
29#else
27#include <asm/pgtable-2level.h> 30#include <asm/pgtable-2level.h>
31#endif
28 32
29/* 33/*
30 * Just any arbitrary offset to the start of the vmalloc VM area: the 34 * Just any arbitrary offset to the start of the vmalloc VM area: the
@@ -33,15 +37,10 @@
33 * any out-of-bounds memory accesses will hopefully be caught. 37 * any out-of-bounds memory accesses will hopefully be caught.
34 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 38 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
35 * area for the same reason. ;) 39 * area for the same reason. ;)
36 *
37 * Note that platforms may override VMALLOC_START, but they must provide
38 * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
39 * which may not overlap IO space.
40 */ 40 */
41#ifndef VMALLOC_START
42#define VMALLOC_OFFSET (8*1024*1024) 41#define VMALLOC_OFFSET (8*1024*1024)
43#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) 42#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
44#endif 43#define VMALLOC_END 0xff000000UL
45 44
46#define LIBRARY_TEXT_START 0x0c000000 45#define LIBRARY_TEXT_START 0x0c000000
47 46
@@ -163,39 +162,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
163/* to find an entry in a kernel page-table-directory */ 162/* to find an entry in a kernel page-table-directory */
164#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 163#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
165 164
166/*
167 * The "pgd_xxx()" functions here are trivial for a folded two-level
168 * setup: the pgd is never bad, and a pmd always exists (as it's folded
169 * into the pgd entry)
170 */
171#define pgd_none(pgd) (0)
172#define pgd_bad(pgd) (0)
173#define pgd_present(pgd) (1)
174#define pgd_clear(pgdp) do { } while (0)
175#define set_pgd(pgd,pgdp) do { } while (0)
176#define set_pud(pud,pudp) do { } while (0)
177
178
179/* Find an entry in the second-level page table.. */
180#define pmd_offset(dir, addr) ((pmd_t *)(dir))
181
182#define pmd_none(pmd) (!pmd_val(pmd)) 165#define pmd_none(pmd) (!pmd_val(pmd))
183#define pmd_present(pmd) (pmd_val(pmd)) 166#define pmd_present(pmd) (pmd_val(pmd))
184#define pmd_bad(pmd) (pmd_val(pmd) & 2)
185
186#define copy_pmd(pmdpd,pmdps) \
187 do { \
188 pmdpd[0] = pmdps[0]; \
189 pmdpd[1] = pmdps[1]; \
190 flush_pmd_entry(pmdpd); \
191 } while (0)
192
193#define pmd_clear(pmdp) \
194 do { \
195 pmdp[0] = __pmd(0); \
196 pmdp[1] = __pmd(0); \
197 clean_pmd_entry(pmdp); \
198 } while (0)
199 167
200static inline pte_t *pmd_page_vaddr(pmd_t pmd) 168static inline pte_t *pmd_page_vaddr(pmd_t pmd)
201{ 169{
@@ -204,10 +172,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
204 172
205#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 173#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
206 174
207/* we don't need complex calculations here as the pmd is folded into the pgd */
208#define pmd_addr_end(addr,end) (end)
209
210
211#ifndef CONFIG_HIGHPTE 175#ifndef CONFIG_HIGHPTE
212#define __pte_map(pmd) pmd_page_vaddr(*(pmd)) 176#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
213#define __pte_unmap(pte) do { } while (0) 177#define __pte_unmap(pte) do { } while (0)
@@ -229,7 +193,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
229#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 193#define pte_page(pte) pfn_to_page(pte_pfn(pte))
230#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot) 194#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
231 195
232#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
233#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 196#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
234 197
235#if __LINUX_ARM_ARCH__ < 6 198#if __LINUX_ARM_ARCH__ < 6
@@ -347,9 +310,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
347 310
348#define pgtable_cache_init() do { } while (0) 311#define pgtable_cache_init() do { } while (0)
349 312
350void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
351void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
352
353#endif /* !__ASSEMBLY__ */ 313#endif /* !__ASSEMBLY__ */
354 314
355#endif /* CONFIG_MMU */ 315#endif /* CONFIG_MMU */