diff options
| author | Jeff Dike <jdike@addtoit.com> | 2008-02-05 01:30:47 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:26 -0500 |
| commit | d83ecf083a2163705f5ebcede4637a955eb7b964 (patch) | |
| tree | 3ab72f51a15b8accbe50cbc3dc953f98b5365120 /include | |
| parent | edea138584d7586a3b93b6d5ab5ec021d18e11e9 (diff) | |
uml: tidy pgtable.h
Large pieces of include/asm/pgtable.h were unused cruft.
This uncovered arch/um/kernel/trap.c needing skas.h in order to get
ptrace_faultinfo.
Signed-off-by: Jeff Dike <jdike@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-um/pgtable.h | 86 |
1 files changed, 7 insertions, 79 deletions
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index 830fc6e5d49d..cb0d2048eca6 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
| 3 | * Copyright 2003 PathScale, Inc. | 3 | * Copyright 2003 PathScale, Inc. |
| 4 | * Derived from include/asm-i386/pgtable.h | 4 | * Derived from include/asm-i386/pgtable.h |
| 5 | * Licensed under the GPL | 5 | * Licensed under the GPL |
| @@ -9,10 +9,6 @@ | |||
| 9 | #define __UM_PGTABLE_H | 9 | #define __UM_PGTABLE_H |
| 10 | 10 | ||
| 11 | #include "linux/sched.h" | 11 | #include "linux/sched.h" |
| 12 | #include "linux/linkage.h" | ||
| 13 | #include "asm/processor.h" | ||
| 14 | #include "asm/page.h" | ||
| 15 | #include "asm/fixmap.h" | ||
| 16 | 12 | ||
| 17 | #define _PAGE_PRESENT 0x001 | 13 | #define _PAGE_PRESENT 0x001 |
| 18 | #define _PAGE_NEWPAGE 0x002 | 14 | #define _PAGE_NEWPAGE 0x002 |
| @@ -42,14 +38,6 @@ extern unsigned long *empty_zero_page; | |||
| 42 | 38 | ||
| 43 | #define pgtable_cache_init() do ; while (0) | 39 | #define pgtable_cache_init() do ; while (0) |
| 44 | 40 | ||
| 45 | /* | ||
| 46 | * pgd entries used up by user/kernel: | ||
| 47 | */ | ||
| 48 | |||
| 49 | #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT) | ||
| 50 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | ||
| 51 | |||
| 52 | #ifndef __ASSEMBLY__ | ||
| 53 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | 41 | /* Just any arbitrary offset to the start of the vmalloc VM area: the |
| 54 | * current 8MB value just means that there will be a 8MB "hole" after the | 42 | * current 8MB value just means that there will be a 8MB "hole" after the |
| 55 | * physical memory until the kernel virtual memory starts. That means that | 43 | * physical memory until the kernel virtual memory starts. That means that |
| @@ -62,16 +50,12 @@ extern unsigned long end_iomem; | |||
| 62 | 50 | ||
| 63 | #define VMALLOC_OFFSET (__va_space) | 51 | #define VMALLOC_OFFSET (__va_space) |
| 64 | #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | 52 | #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
| 65 | |||
| 66 | #ifdef CONFIG_HIGHMEM | 53 | #ifdef CONFIG_HIGHMEM |
| 67 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | 54 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) |
| 68 | #else | 55 | #else |
| 69 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | 56 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) |
| 70 | #endif | 57 | #endif |
| 71 | 58 | ||
| 72 | #define REGION_SHIFT (sizeof(pte_t) * 8 - 4) | ||
| 73 | #define REGION_MASK (((unsigned long) 0xf) << REGION_SHIFT) | ||
| 74 | |||
| 75 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 59 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) |
| 76 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 60 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
| 77 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 61 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
| @@ -81,11 +65,12 @@ extern unsigned long end_iomem; | |||
| 81 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 65 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
| 82 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 66 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
| 83 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | 67 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) |
| 84 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
| 85 | 68 | ||
| 86 | /* | 69 | /* |
| 87 | * The i386 can't do page protection for execute, and considers that the same are read. | 70 | * The i386 can't do page protection for execute, and considers that the same |
| 88 | * Also, write permissions imply read permissions. This is the closest we can get.. | 71 | * are read. |
| 72 | * Also, write permissions imply read permissions. This is the closest we can | ||
| 73 | * get.. | ||
| 89 | */ | 74 | */ |
| 90 | #define __P000 PAGE_NONE | 75 | #define __P000 PAGE_NONE |
| 91 | #define __P001 PAGE_READONLY | 76 | #define __P001 PAGE_READONLY |
| @@ -106,40 +91,16 @@ extern unsigned long end_iomem; | |||
| 106 | #define __S111 PAGE_SHARED | 91 | #define __S111 PAGE_SHARED |
| 107 | 92 | ||
| 108 | /* | 93 | /* |
| 109 | * Define this if things work differently on an i386 and an i486: | ||
| 110 | * it will (on an i486) warn about kernel memory accesses that are | ||
| 111 | * done without a 'access_ok(VERIFY_WRITE,..)' | ||
| 112 | */ | ||
| 113 | #undef TEST_VERIFY_AREA | ||
| 114 | |||
| 115 | /* page table for 0-4MB for everybody */ | ||
| 116 | extern unsigned long pg0[1024]; | ||
| 117 | |||
| 118 | /* | ||
| 119 | * ZERO_PAGE is a global shared page that is always zero: used | 94 | * ZERO_PAGE is a global shared page that is always zero: used |
| 120 | * for zero-mapped memory areas etc.. | 95 | * for zero-mapped memory areas etc.. |
| 121 | */ | 96 | */ |
| 122 | |||
| 123 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) | 97 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) |
| 124 | 98 | ||
| 125 | /* number of bits that fit into a memory pointer */ | ||
| 126 | #define BITS_PER_PTR (8*sizeof(unsigned long)) | ||
| 127 | |||
| 128 | /* to align the pointer to a pointer address */ | ||
| 129 | #define PTR_MASK (~(sizeof(void*)-1)) | ||
| 130 | |||
| 131 | /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ | ||
| 132 | /* 64-bit machines, beware! SRB. */ | ||
| 133 | #define SIZEOF_PTR_LOG2 3 | ||
| 134 | |||
| 135 | /* to find an entry in a page-table */ | ||
| 136 | #define PAGE_PTR(address) \ | ||
| 137 | ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) | ||
| 138 | |||
| 139 | #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) | 99 | #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) |
| 140 | 100 | ||
| 141 | #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) | 101 | #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) |
| 142 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 102 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
| 103 | |||
| 143 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 104 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
| 144 | #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) | 105 | #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) |
| 145 | 106 | ||
| @@ -149,14 +110,9 @@ extern unsigned long pg0[1024]; | |||
| 149 | #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) | 110 | #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) |
| 150 | #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) | 111 | #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) |
| 151 | 112 | ||
| 152 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | ||
| 153 | |||
| 154 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) | 113 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) |
| 155 | 114 | ||
| 156 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 115 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
| 157 | #define pte_address(x) (__va(pte_val(x) & PAGE_MASK)) | ||
| 158 | #define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT)) | ||
| 159 | #define phys_addr(p) ((p) & ~REGION_MASK) | ||
| 160 | 116 | ||
| 161 | #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) | 117 | #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) |
| 162 | 118 | ||
| @@ -310,6 +266,7 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) | |||
| 310 | #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) | 266 | #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) |
| 311 | #define __virt_to_page(virt) phys_to_page(__pa(virt)) | 267 | #define __virt_to_page(virt) phys_to_page(__pa(virt)) |
| 312 | #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) | 268 | #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) |
| 269 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) | ||
| 313 | 270 | ||
| 314 | #define mk_pte(page, pgprot) \ | 271 | #define mk_pte(page, pgprot) \ |
| 315 | ({ pte_t pte; \ | 272 | ({ pte_t pte; \ |
| @@ -325,8 +282,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
| 325 | return pte; | 282 | return pte; |
| 326 | } | 283 | } |
| 327 | 284 | ||
| 328 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
| 329 | |||
| 330 | /* | 285 | /* |
| 331 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | 286 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] |
| 332 | * | 287 | * |
| @@ -335,8 +290,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
| 335 | */ | 290 | */ |
| 336 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 291 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
| 337 | 292 | ||
| 338 | #define pgd_index_k(addr) pgd_index(addr) | ||
| 339 | |||
| 340 | /* | 293 | /* |
| 341 | * pgd_offset() returns a (pgd_t *) | 294 | * pgd_offset() returns a (pgd_t *) |
| 342 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | 295 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
| @@ -388,29 +341,4 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
| 388 | 341 | ||
| 389 | #include <asm-generic/pgtable.h> | 342 | #include <asm-generic/pgtable.h> |
| 390 | 343 | ||
| 391 | #include <asm-generic/pgtable-nopud.h> | ||
| 392 | |||
| 393 | #ifdef CONFIG_HIGHMEM | ||
| 394 | /* Clear a kernel PTE and flush it from the TLB */ | ||
| 395 | #define kpte_clear_flush(ptep, vaddr) \ | ||
| 396 | do { \ | ||
| 397 | pte_clear(&init_mm, vaddr, ptep); \ | ||
| 398 | __flush_tlb_one(vaddr); \ | ||
| 399 | } while (0) | ||
| 400 | #endif | 344 | #endif |
| 401 | |||
| 402 | #endif | ||
| 403 | #endif | ||
| 404 | |||
| 405 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) | ||
| 406 | |||
| 407 | /* | ||
| 408 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
| 409 | * Emacs will notice this stuff at the end of the file and automatically | ||
| 410 | * adjust the settings for this buffer only. This must remain at the end | ||
| 411 | * of the file. | ||
| 412 | * --------------------------------------------------------------------------- | ||
| 413 | * Local variables: | ||
| 414 | * c-file-style: "linux" | ||
| 415 | * End: | ||
| 416 | */ | ||
