diff options
Diffstat (limited to 'include/asm-um/pgtable.h')
-rw-r--r-- | include/asm-um/pgtable.h | 100 |
1 files changed, 16 insertions, 84 deletions
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index 830fc6e5d49d..4102b443e925 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) | 2 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
3 | * Copyright 2003 PathScale, Inc. | 3 | * Copyright 2003 PathScale, Inc. |
4 | * Derived from include/asm-i386/pgtable.h | 4 | * Derived from include/asm-i386/pgtable.h |
5 | * Licensed under the GPL | 5 | * Licensed under the GPL |
@@ -8,11 +8,7 @@ | |||
8 | #ifndef __UM_PGTABLE_H | 8 | #ifndef __UM_PGTABLE_H |
9 | #define __UM_PGTABLE_H | 9 | #define __UM_PGTABLE_H |
10 | 10 | ||
11 | #include "linux/sched.h" | 11 | #include <asm/fixmap.h> |
12 | #include "linux/linkage.h" | ||
13 | #include "asm/processor.h" | ||
14 | #include "asm/page.h" | ||
15 | #include "asm/fixmap.h" | ||
16 | 12 | ||
17 | #define _PAGE_PRESENT 0x001 | 13 | #define _PAGE_PRESENT 0x001 |
18 | #define _PAGE_NEWPAGE 0x002 | 14 | #define _PAGE_NEWPAGE 0x002 |
@@ -34,22 +30,11 @@ | |||
34 | 30 | ||
35 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 31 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
36 | 32 | ||
37 | extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt, | ||
38 | pte_t *pte_out); | ||
39 | |||
40 | /* zero page used for uninitialized stuff */ | 33 | /* zero page used for uninitialized stuff */ |
41 | extern unsigned long *empty_zero_page; | 34 | extern unsigned long *empty_zero_page; |
42 | 35 | ||
43 | #define pgtable_cache_init() do ; while (0) | 36 | #define pgtable_cache_init() do ; while (0) |
44 | 37 | ||
45 | /* | ||
46 | * pgd entries used up by user/kernel: | ||
47 | */ | ||
48 | |||
49 | #define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT) | ||
50 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | ||
51 | |||
52 | #ifndef __ASSEMBLY__ | ||
53 | /* Just any arbitrary offset to the start of the vmalloc VM area: the | 38 | /* Just any arbitrary offset to the start of the vmalloc VM area: the |
54 | * current 8MB value just means that there will be a 8MB "hole" after the | 39 | * current 8MB value just means that there will be a 8MB "hole" after the |
55 | * physical memory until the kernel virtual memory starts. That means that | 40 | * physical memory until the kernel virtual memory starts. That means that |
@@ -62,16 +47,12 @@ extern unsigned long end_iomem; | |||
62 | 47 | ||
63 | #define VMALLOC_OFFSET (__va_space) | 48 | #define VMALLOC_OFFSET (__va_space) |
64 | #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) | 49 | #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) |
65 | |||
66 | #ifdef CONFIG_HIGHMEM | 50 | #ifdef CONFIG_HIGHMEM |
67 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | 51 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) |
68 | #else | 52 | #else |
69 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | 53 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) |
70 | #endif | 54 | #endif |
71 | 55 | ||
72 | #define REGION_SHIFT (sizeof(pte_t) * 8 - 4) | ||
73 | #define REGION_MASK (((unsigned long) 0xf) << REGION_SHIFT) | ||
74 | |||
75 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 56 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) |
76 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 57 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) |
77 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 58 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
@@ -81,11 +62,12 @@ extern unsigned long end_iomem; | |||
81 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 62 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
82 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 63 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) |
83 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | 64 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) |
84 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
85 | 65 | ||
86 | /* | 66 | /* |
87 | * The i386 can't do page protection for execute, and considers that the same are read. | 67 | * The i386 can't do page protection for execute, and considers that the same |
88 | * Also, write permissions imply read permissions. This is the closest we can get.. | 68 | * are read. |
69 | * Also, write permissions imply read permissions. This is the closest we can | ||
70 | * get.. | ||
89 | */ | 71 | */ |
90 | #define __P000 PAGE_NONE | 72 | #define __P000 PAGE_NONE |
91 | #define __P001 PAGE_READONLY | 73 | #define __P001 PAGE_READONLY |
@@ -106,40 +88,16 @@ extern unsigned long end_iomem; | |||
106 | #define __S111 PAGE_SHARED | 88 | #define __S111 PAGE_SHARED |
107 | 89 | ||
108 | /* | 90 | /* |
109 | * Define this if things work differently on an i386 and an i486: | ||
110 | * it will (on an i486) warn about kernel memory accesses that are | ||
111 | * done without a 'access_ok(VERIFY_WRITE,..)' | ||
112 | */ | ||
113 | #undef TEST_VERIFY_AREA | ||
114 | |||
115 | /* page table for 0-4MB for everybody */ | ||
116 | extern unsigned long pg0[1024]; | ||
117 | |||
118 | /* | ||
119 | * ZERO_PAGE is a global shared page that is always zero: used | 91 | * ZERO_PAGE is a global shared page that is always zero: used |
120 | * for zero-mapped memory areas etc.. | 92 | * for zero-mapped memory areas etc.. |
121 | */ | 93 | */ |
122 | |||
123 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) | 94 | #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page) |
124 | 95 | ||
125 | /* number of bits that fit into a memory pointer */ | ||
126 | #define BITS_PER_PTR (8*sizeof(unsigned long)) | ||
127 | |||
128 | /* to align the pointer to a pointer address */ | ||
129 | #define PTR_MASK (~(sizeof(void*)-1)) | ||
130 | |||
131 | /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */ | ||
132 | /* 64-bit machines, beware! SRB. */ | ||
133 | #define SIZEOF_PTR_LOG2 3 | ||
134 | |||
135 | /* to find an entry in a page-table */ | ||
136 | #define PAGE_PTR(address) \ | ||
137 | ((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK) | ||
138 | |||
139 | #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) | 96 | #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE)) |
140 | 97 | ||
141 | #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) | 98 | #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE)) |
142 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 99 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
100 | |||
143 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 101 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
144 | #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) | 102 | #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0) |
145 | 103 | ||
@@ -149,14 +107,9 @@ extern unsigned long pg0[1024]; | |||
149 | #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) | 107 | #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE) |
150 | #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) | 108 | #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE) |
151 | 109 | ||
152 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | ||
153 | |||
154 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) | 110 | #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) |
155 | 111 | ||
156 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 112 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
157 | #define pte_address(x) (__va(pte_val(x) & PAGE_MASK)) | ||
158 | #define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT)) | ||
159 | #define phys_addr(p) ((p) & ~REGION_MASK) | ||
160 | 113 | ||
161 | #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) | 114 | #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE)) |
162 | 115 | ||
@@ -309,7 +262,8 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) | |||
309 | 262 | ||
310 | #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) | 263 | #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) |
311 | #define __virt_to_page(virt) phys_to_page(__pa(virt)) | 264 | #define __virt_to_page(virt) phys_to_page(__pa(virt)) |
312 | #define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) | 265 | #define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page)) |
266 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) | ||
313 | 267 | ||
314 | #define mk_pte(page, pgprot) \ | 268 | #define mk_pte(page, pgprot) \ |
315 | ({ pte_t pte; \ | 269 | ({ pte_t pte; \ |
@@ -325,8 +279,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
325 | return pte; | 279 | return pte; |
326 | } | 280 | } |
327 | 281 | ||
328 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
329 | |||
330 | /* | 282 | /* |
331 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | 283 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] |
332 | * | 284 | * |
@@ -335,8 +287,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
335 | */ | 287 | */ |
336 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 288 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
337 | 289 | ||
338 | #define pgd_index_k(addr) pgd_index(addr) | ||
339 | |||
340 | /* | 290 | /* |
341 | * pgd_offset() returns a (pgd_t *) | 291 | * pgd_offset() returns a (pgd_t *) |
342 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | 292 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; |
@@ -355,8 +305,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
355 | * this macro returns the index of the entry in the pmd page which would | 305 | * this macro returns the index of the entry in the pmd page which would |
356 | * control the given virtual address | 306 | * control the given virtual address |
357 | */ | 307 | */ |
308 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
358 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | 309 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
359 | 310 | ||
311 | #define pmd_page_vaddr(pmd) \ | ||
312 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
313 | |||
360 | /* | 314 | /* |
361 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | 315 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] |
362 | * | 316 | * |
@@ -372,6 +326,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
372 | #define pte_unmap(pte) do { } while (0) | 326 | #define pte_unmap(pte) do { } while (0) |
373 | #define pte_unmap_nested(pte) do { } while (0) | 327 | #define pte_unmap_nested(pte) do { } while (0) |
374 | 328 | ||
329 | struct mm_struct; | ||
330 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | ||
331 | |||
375 | #define update_mmu_cache(vma,address,pte) do ; while (0) | 332 | #define update_mmu_cache(vma,address,pte) do ; while (0) |
376 | 333 | ||
377 | /* Encode and de-code a swap entry */ | 334 | /* Encode and de-code a swap entry */ |
@@ -388,29 +345,4 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
388 | 345 | ||
389 | #include <asm-generic/pgtable.h> | 346 | #include <asm-generic/pgtable.h> |
390 | 347 | ||
391 | #include <asm-generic/pgtable-nopud.h> | ||
392 | |||
393 | #ifdef CONFIG_HIGHMEM | ||
394 | /* Clear a kernel PTE and flush it from the TLB */ | ||
395 | #define kpte_clear_flush(ptep, vaddr) \ | ||
396 | do { \ | ||
397 | pte_clear(&init_mm, vaddr, ptep); \ | ||
398 | __flush_tlb_one(vaddr); \ | ||
399 | } while (0) | ||
400 | #endif | 348 | #endif |
401 | |||
402 | #endif | ||
403 | #endif | ||
404 | |||
405 | #define virt_to_page(addr) __virt_to_page((const unsigned long) addr) | ||
406 | |||
407 | /* | ||
408 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
409 | * Emacs will notice this stuff at the end of the file and automatically | ||
410 | * adjust the settings for this buffer only. This must remain at the end | ||
411 | * of the file. | ||
412 | * --------------------------------------------------------------------------- | ||
413 | * Local variables: | ||
414 | * c-file-style: "linux" | ||
415 | * End: | ||
416 | */ | ||