aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/pgtable.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 23:03:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 23:03:47 -0400
commit588ab3f9afdfa1a6b1e5761c858b2c4ab6098285 (patch)
treec9aa4c4f8a63d25c3cf05330c68948dceec79cc2 /arch/arm64/include/asm/pgtable.h
parent3d15cfdb1b77536c205d8e49c0312219ddf162ec (diff)
parent2776e0e8ef683a42fe3e9a5facf576b73579700e (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "Here are the main arm64 updates for 4.6. There are some relatively intrusive changes to support KASLR, the reworking of the kernel virtual memory layout and initial page table creation. Summary: - Initial page table creation reworked to avoid breaking large block mappings (huge pages) into smaller ones. The ARM architecture requires break-before-make in such cases to avoid TLB conflicts but that's not always possible on live page tables - Kernel virtual memory layout: the kernel image is no longer linked to the bottom of the linear mapping (PAGE_OFFSET) but at the bottom of the vmalloc space, allowing the kernel to be loaded (nearly) anywhere in physical RAM - Kernel ASLR: position independent kernel Image and modules being randomly mapped in the vmalloc space with the randomness is provided by UEFI (efi_get_random_bytes() patches merged via the arm64 tree, acked by Matt Fleming) - Implement relative exception tables for arm64, required by KASLR (initial code for ARCH_HAS_RELATIVE_EXTABLE added to lib/extable.c but actual x86 conversion to deferred to 4.7 because of the merge dependencies) - Support for the User Access Override feature of ARMv8.2: this allows uaccess functions (get_user etc.) to be implemented using LDTR/STTR instructions. Such instructions, when run by the kernel, perform unprivileged accesses adding an extra level of protection. The set_fs() macro is used to "upgrade" such instruction to privileged accesses via the UAO bit - Half-precision floating point support (part of ARMv8.2) - Optimisations for CPUs with or without a hardware prefetcher (using run-time code patching) - copy_page performance improvement to deal with 128 bytes at a time - Sanity checks on the CPU capabilities (via CPUID) to prevent incompatible secondary CPUs from being brought up (e.g. weird big.LITTLE configurations) - valid_user_regs() reworked for better sanity check of the sigcontext information (restored pstate information) - ACPI parking protocol implementation - CONFIG_DEBUG_RODATA enabled by default - VDSO code marked as read-only - DEBUG_PAGEALLOC support - ARCH_HAS_UBSAN_SANITIZE_ALL enabled - Erratum workaround Cavium ThunderX SoC - set_pte_at() fix for PROT_NONE mappings - Code clean-ups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (99 commits) arm64: kasan: Fix zero shadow mapping overriding kernel image shadow arm64: kasan: Use actual memory node when populating the kernel image shadow arm64: Update PTE_RDONLY in set_pte_at() for PROT_NONE permission arm64: Fix misspellings in comments. arm64: efi: add missing frame pointer assignment arm64: make mrs_s prefixing implicit in read_cpuid arm64: enable CONFIG_DEBUG_RODATA by default arm64: Rework valid_user_regs arm64: mm: check at build time that PAGE_OFFSET divides the VA space evenly arm64: KVM: Move kvm_call_hyp back to its original localtion arm64: mm: treat memstart_addr as a signed quantity arm64: mm: list kernel sections in order arm64: lse: deal with clobbered IP registers after branch via PLT arm64: mm: dump: Use VA_START directly instead of private LOWEST_ADDR arm64: kconfig: add submenu for 8.2 architectural features arm64: kernel: acpi: fix ioremap in ACPI parking protocol cpu_postboot arm64: Add support for Half precision floating point arm64: Remove fixmap include fragility arm64: Add workaround for Cavium erratum 27456 arm64: mm: Mark .rodata as RO ...
Diffstat (limited to 'arch/arm64/include/asm/pgtable.h')
-rw-r--r--arch/arm64/include/asm/pgtable.h178
1 files changed, 79 insertions, 99 deletions
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 819aff5d593f..989fef16d461 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -21,34 +21,20 @@
21 21
22#include <asm/memory.h> 22#include <asm/memory.h>
23#include <asm/pgtable-hwdef.h> 23#include <asm/pgtable-hwdef.h>
24 24#include <asm/pgtable-prot.h>
25/*
26 * Software defined PTE bits definition.
27 */
28#define PTE_VALID (_AT(pteval_t, 1) << 0)
29#define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
30#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
31#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
32#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
33 25
34/* 26/*
35 * VMALLOC and SPARSEMEM_VMEMMAP ranges. 27 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
36 * 28 *
37 * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array 29 * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
38 * (rounded up to PUD_SIZE). 30 * (rounded up to PUD_SIZE).
39 * VMALLOC_START: beginning of the kernel VA space 31 * VMALLOC_START: beginning of the kernel vmalloc space
40 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, 32 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
41 * fixed mappings and modules 33 * fixed mappings and modules
42 */ 34 */
43#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) 35#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
44 36
45#ifndef CONFIG_KASAN 37#define VMALLOC_START (MODULES_END)
46#define VMALLOC_START (VA_START)
47#else
48#include <asm/kasan.h>
49#define VMALLOC_START (KASAN_SHADOW_END + SZ_64K)
50#endif
51
52#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) 38#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
53 39
54#define VMEMMAP_START (VMALLOC_END + SZ_64K) 40#define VMEMMAP_START (VMALLOC_END + SZ_64K)
@@ -59,6 +45,7 @@
59 45
60#ifndef __ASSEMBLY__ 46#ifndef __ASSEMBLY__
61 47
48#include <asm/fixmap.h>
62#include <linux/mmdebug.h> 49#include <linux/mmdebug.h>
63 50
64extern void __pte_error(const char *file, int line, unsigned long val); 51extern void __pte_error(const char *file, int line, unsigned long val);
@@ -66,65 +53,12 @@ extern void __pmd_error(const char *file, int line, unsigned long val);
66extern void __pud_error(const char *file, int line, unsigned long val); 53extern void __pud_error(const char *file, int line, unsigned long val);
67extern void __pgd_error(const char *file, int line, unsigned long val); 54extern void __pgd_error(const char *file, int line, unsigned long val);
68 55
69#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
70#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
71
72#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
73#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
74#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
75#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
76#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
77
78#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
79#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
80#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
81
82#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
83
84#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
85#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
86#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
87#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
88#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
89
90#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
91#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
92
93#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
94#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
95
96#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
97#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
98#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
99#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
100#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
101#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
102#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
103
104#define __P000 PAGE_NONE
105#define __P001 PAGE_READONLY
106#define __P010 PAGE_COPY
107#define __P011 PAGE_COPY
108#define __P100 PAGE_READONLY_EXEC
109#define __P101 PAGE_READONLY_EXEC
110#define __P110 PAGE_COPY_EXEC
111#define __P111 PAGE_COPY_EXEC
112
113#define __S000 PAGE_NONE
114#define __S001 PAGE_READONLY
115#define __S010 PAGE_SHARED
116#define __S011 PAGE_SHARED
117#define __S100 PAGE_READONLY_EXEC
118#define __S101 PAGE_READONLY_EXEC
119#define __S110 PAGE_SHARED_EXEC
120#define __S111 PAGE_SHARED_EXEC
121
122/* 56/*
123 * ZERO_PAGE is a global shared page that is always zero: used 57 * ZERO_PAGE is a global shared page that is always zero: used
124 * for zero-mapped memory areas etc.. 58 * for zero-mapped memory areas etc..
125 */ 59 */
126extern struct page *empty_zero_page; 60extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
127#define ZERO_PAGE(vaddr) (empty_zero_page) 61#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
128 62
129#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 63#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
130 64
@@ -136,16 +70,6 @@ extern struct page *empty_zero_page;
136#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) 70#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
137#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 71#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
138 72
139/* Find an entry in the third-level page table. */
140#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
141
142#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
143
144#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
145#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
146#define pte_unmap(pte) do { } while (0)
147#define pte_unmap_nested(pte) do { } while (0)
148
149/* 73/*
150 * The following only work if pte_present(). Undefined behaviour otherwise. 74 * The following only work if pte_present(). Undefined behaviour otherwise.
151 */ 75 */
@@ -279,7 +203,7 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
279static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 203static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep, pte_t pte) 204 pte_t *ptep, pte_t pte)
281{ 205{
282 if (pte_valid(pte)) { 206 if (pte_present(pte)) {
283 if (pte_sw_dirty(pte) && pte_write(pte)) 207 if (pte_sw_dirty(pte) && pte_write(pte))
284 pte_val(pte) &= ~PTE_RDONLY; 208 pte_val(pte) &= ~PTE_RDONLY;
285 else 209 else
@@ -412,7 +336,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
412#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 336#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
413 PMD_TYPE_SECT) 337 PMD_TYPE_SECT)
414 338
415#ifdef CONFIG_ARM64_64K_PAGES 339#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
416#define pud_sect(pud) (0) 340#define pud_sect(pud) (0)
417#define pud_table(pud) (1) 341#define pud_table(pud) (1)
418#else 342#else
@@ -434,13 +358,31 @@ static inline void pmd_clear(pmd_t *pmdp)
434 set_pmd(pmdp, __pmd(0)); 358 set_pmd(pmdp, __pmd(0));
435} 359}
436 360
437static inline pte_t *pmd_page_vaddr(pmd_t pmd) 361static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
438{ 362{
439 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); 363 return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
440} 364}
441 365
366/* Find an entry in the third-level page table. */
367#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
368
369#define pte_offset_phys(dir,addr) (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
370#define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr))))
371
372#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
373#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
374#define pte_unmap(pte) do { } while (0)
375#define pte_unmap_nested(pte) do { } while (0)
376
377#define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
378#define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr))
379#define pte_clear_fixmap() clear_fixmap(FIX_PTE)
380
442#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 381#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
443 382
383/* use ONLY for statically allocated translation tables */
384#define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
385
444/* 386/*
445 * Conversion functions: convert a page and protection to a page entry, 387 * Conversion functions: convert a page and protection to a page entry,
446 * and a page entry and page directory to the page they refer to. 388 * and a page entry and page directory to the page they refer to.
@@ -467,21 +409,37 @@ static inline void pud_clear(pud_t *pudp)
467 set_pud(pudp, __pud(0)); 409 set_pud(pudp, __pud(0));
468} 410}
469 411
470static inline pmd_t *pud_page_vaddr(pud_t pud) 412static inline phys_addr_t pud_page_paddr(pud_t pud)
471{ 413{
472 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); 414 return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
473} 415}
474 416
475/* Find an entry in the second-level page table. */ 417/* Find an entry in the second-level page table. */
476#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 418#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
477 419
478static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) 420#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
479{ 421#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
480 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); 422
481} 423#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
424#define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr))
425#define pmd_clear_fixmap() clear_fixmap(FIX_PMD)
482 426
483#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) 427#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
484 428
429/* use ONLY for statically allocated translation tables */
430#define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
431
432#else
433
434#define pud_page_paddr(pud) ({ BUILD_BUG(); 0; })
435
436/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
437#define pmd_set_fixmap(addr) NULL
438#define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp)
439#define pmd_clear_fixmap()
440
441#define pmd_offset_kimg(dir,addr) ((pmd_t *)dir)
442
485#endif /* CONFIG_PGTABLE_LEVELS > 2 */ 443#endif /* CONFIG_PGTABLE_LEVELS > 2 */
486 444
487#if CONFIG_PGTABLE_LEVELS > 3 445#if CONFIG_PGTABLE_LEVELS > 3
@@ -503,21 +461,37 @@ static inline void pgd_clear(pgd_t *pgdp)
503 set_pgd(pgdp, __pgd(0)); 461 set_pgd(pgdp, __pgd(0));
504} 462}
505 463
506static inline pud_t *pgd_page_vaddr(pgd_t pgd) 464static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
507{ 465{
508 return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK); 466 return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
509} 467}
510 468
511/* Find an entry in the frst-level page table. */ 469/* Find an entry in the frst-level page table. */
512#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 470#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
513 471
514static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) 472#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
515{ 473#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
516 return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr); 474
517} 475#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
476#define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr))
477#define pud_clear_fixmap() clear_fixmap(FIX_PUD)
518 478
519#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) 479#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
520 480
481/* use ONLY for statically allocated translation tables */
482#define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
483
484#else
485
486#define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;})
487
488/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
489#define pud_set_fixmap(addr) NULL
490#define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp)
491#define pud_clear_fixmap()
492
493#define pud_offset_kimg(dir,addr) ((pud_t *)dir)
494
521#endif /* CONFIG_PGTABLE_LEVELS > 3 */ 495#endif /* CONFIG_PGTABLE_LEVELS > 3 */
522 496
523#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 497#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -525,11 +499,16 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
525/* to find an entry in a page-table-directory */ 499/* to find an entry in a page-table-directory */
526#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 500#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
527 501
528#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) 502#define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr))
503
504#define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr)))
529 505
530/* to find an entry in a kernel page-table-directory */ 506/* to find an entry in a kernel page-table-directory */
531#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 507#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
532 508
509#define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
510#define pgd_clear_fixmap() clear_fixmap(FIX_PGD)
511
533static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 512static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
534{ 513{
535 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 514 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -649,6 +628,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
649 * bits 0-1: present (must be zero) 628 * bits 0-1: present (must be zero)
650 * bits 2-7: swap type 629 * bits 2-7: swap type
651 * bits 8-57: swap offset 630 * bits 8-57: swap offset
631 * bit 58: PTE_PROT_NONE (must be zero)
652 */ 632 */
653#define __SWP_TYPE_SHIFT 2 633#define __SWP_TYPE_SHIFT 2
654#define __SWP_TYPE_BITS 6 634#define __SWP_TYPE_BITS 6