aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@in.ibm.com>2007-05-02 13:27:06 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:06 -0400
commit9d291e787b2b71d1b57e5fbb24ba9c70e748ed84 (patch)
tree9cbc475b8e6c096dfd75fe1c393dcbf657405f81 /include
parente65845045588806fa5c8df8a4f4253516515a5e3 (diff)
[PATCH] x86-64: Assembly safe page.h and pgtable.h
This patch makes pgtable.h and page.h safe to include in assembly files like head.S. Allowing us to use symbolic constants instead of hard coded numbers when refering to the page tables. This patch copies asm-sparc64/const.h to asm-x86_64 to get a definition of _AC() a very convinient macro that allows us to force the type when we are compiling the code in C and to drop all of the type information when we are using the constant in assembly. Previously this was done with multiple definition of the same constant. const.h was modified slightly so that it works when given CONFIG options as arguments. This patch adds #ifndef __ASSEMBLY__ ... #endif and _AC(1,UL) where appropriate so the assembler won't choke on the header files. Otherwise nothing should have changed. AK: added const.h to exported headers to fix headers_check Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Vivek Goyal <vgoyal@in.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86_64/Kbuild1
-rw-r--r--include/asm-x86_64/const.h20
-rw-r--r--include/asm-x86_64/page.h28
-rw-r--r--include/asm-x86_64/pgtable.h33
4 files changed, 52 insertions, 30 deletions
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild
index ebd7117782a6..242296ede3dd 100644
--- a/include/asm-x86_64/Kbuild
+++ b/include/asm-x86_64/Kbuild
@@ -18,3 +18,4 @@ header-y += vsyscall32.h
18unifdef-y += mce.h 18unifdef-y += mce.h
19unifdef-y += mtrr.h 19unifdef-y += mtrr.h
20unifdef-y += vsyscall.h 20unifdef-y += vsyscall.h
21unifdef-y += const.h
diff --git a/include/asm-x86_64/const.h b/include/asm-x86_64/const.h
new file mode 100644
index 000000000000..54fb08f3db9b
--- /dev/null
+++ b/include/asm-x86_64/const.h
@@ -0,0 +1,20 @@
1/* const.h: Macros for dealing with constants. */
2
3#ifndef _X86_64_CONST_H
4#define _X86_64_CONST_H
5
6/* Some constant macros are used in both assembler and
7 * C code. Therefore we cannot annotate them always with
8 * 'UL' and other type specificers unilaterally. We
9 * use the following macros to deal with this.
10 */
11
12#ifdef __ASSEMBLY__
13#define _AC(X,Y) X
14#else
15#define __AC(X,Y) (X##Y)
16#define _AC(X,Y) __AC(X,Y)
17#endif
18
19
20#endif /* !(_X86_64_CONST_H) */
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index 10f346165cab..d554b94485df 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -1,14 +1,11 @@
1#ifndef _X86_64_PAGE_H 1#ifndef _X86_64_PAGE_H
2#define _X86_64_PAGE_H 2#define _X86_64_PAGE_H
3 3
4#include <asm/const.h>
4 5
5/* PAGE_SHIFT determines the page size */ 6/* PAGE_SHIFT determines the page size */
6#define PAGE_SHIFT 12 7#define PAGE_SHIFT 12
7#ifdef __ASSEMBLY__ 8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
8#define PAGE_SIZE (0x1 << PAGE_SHIFT)
9#else
10#define PAGE_SIZE (1UL << PAGE_SHIFT)
11#endif
12#define PAGE_MASK (~(PAGE_SIZE-1)) 9#define PAGE_MASK (~(PAGE_SIZE-1))
13#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) 10#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
14 11
@@ -33,10 +30,10 @@
33#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ 30#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
34 31
35#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) 32#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
36#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) 33#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
37 34
38#define HPAGE_SHIFT PMD_SHIFT 35#define HPAGE_SHIFT PMD_SHIFT
39#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 36#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
40#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 37#define HPAGE_MASK (~(HPAGE_SIZE - 1))
41#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 38#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
42 39
@@ -76,29 +73,24 @@ typedef struct { unsigned long pgprot; } pgprot_t;
76#define __pgd(x) ((pgd_t) { (x) } ) 73#define __pgd(x) ((pgd_t) { (x) } )
77#define __pgprot(x) ((pgprot_t) { (x) } ) 74#define __pgprot(x) ((pgprot_t) { (x) } )
78 75
79#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) 76#endif /* !__ASSEMBLY__ */
80#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
81#define __START_KERNEL_map 0xffffffff80000000UL
82#define __PAGE_OFFSET 0xffff810000000000UL
83 77
84#else
85#define __PHYSICAL_START CONFIG_PHYSICAL_START 78#define __PHYSICAL_START CONFIG_PHYSICAL_START
86#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) 79#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
87#define __START_KERNEL_map 0xffffffff80000000 80#define __START_KERNEL_map 0xffffffff80000000
88#define __PAGE_OFFSET 0xffff810000000000 81#define __PAGE_OFFSET 0xffff810000000000
89#endif /* !__ASSEMBLY__ */
90 82
91/* to align the pointer to the (next) page boundary */ 83/* to align the pointer to the (next) page boundary */
92#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) 84#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
93 85
94/* See Documentation/x86_64/mm.txt for a description of the memory map. */ 86/* See Documentation/x86_64/mm.txt for a description of the memory map. */
95#define __PHYSICAL_MASK_SHIFT 46 87#define __PHYSICAL_MASK_SHIFT 46
96#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) 88#define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
97#define __VIRTUAL_MASK_SHIFT 48 89#define __VIRTUAL_MASK_SHIFT 48
98#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 90#define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
99 91
100#define KERNEL_TEXT_SIZE (40UL*1024*1024) 92#define KERNEL_TEXT_SIZE (40*1024*1024)
101#define KERNEL_TEXT_START 0xffffffff80000000UL 93#define KERNEL_TEXT_START 0xffffffff80000000
102 94
103#ifndef __ASSEMBLY__ 95#ifndef __ASSEMBLY__
104 96
@@ -106,7 +98,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
106 98
107#endif /* __ASSEMBLY__ */ 99#endif /* __ASSEMBLY__ */
108 100
109#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 101#define PAGE_OFFSET __PAGE_OFFSET
110 102
111/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. 103/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
112 Otherwise you risk miscompilation. */ 104 Otherwise you risk miscompilation. */
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h
index 5957361782fe..c514deb658a3 100644
--- a/include/asm-x86_64/pgtable.h
+++ b/include/asm-x86_64/pgtable.h
@@ -1,6 +1,9 @@
1#ifndef _X86_64_PGTABLE_H 1#ifndef _X86_64_PGTABLE_H
2#define _X86_64_PGTABLE_H 2#define _X86_64_PGTABLE_H
3 3
4#include <asm/const.h>
5#ifndef __ASSEMBLY__
6
4/* 7/*
5 * This file contains the functions and defines necessary to modify and use 8 * This file contains the functions and defines necessary to modify and use
6 * the x86-64 page table tree. 9 * the x86-64 page table tree.
@@ -30,6 +33,8 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
30extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 33extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
31#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 34#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
32 35
36#endif /* !__ASSEMBLY__ */
37
33/* 38/*
34 * PGDIR_SHIFT determines what a top-level page table entry can map 39 * PGDIR_SHIFT determines what a top-level page table entry can map
35 */ 40 */
@@ -54,6 +59,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
54 */ 59 */
55#define PTRS_PER_PTE 512 60#define PTRS_PER_PTE 512
56 61
62#ifndef __ASSEMBLY__
63
57#define pte_ERROR(e) \ 64#define pte_ERROR(e) \
58 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) 65 printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e))
59#define pmd_ERROR(e) \ 66#define pmd_ERROR(e) \
@@ -117,22 +124,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
117 124
118#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) 125#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
119 126
120#define PMD_SIZE (1UL << PMD_SHIFT) 127#endif /* !__ASSEMBLY__ */
128
129#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
121#define PMD_MASK (~(PMD_SIZE-1)) 130#define PMD_MASK (~(PMD_SIZE-1))
122#define PUD_SIZE (1UL << PUD_SHIFT) 131#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
123#define PUD_MASK (~(PUD_SIZE-1)) 132#define PUD_MASK (~(PUD_SIZE-1))
124#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 133#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
125#define PGDIR_MASK (~(PGDIR_SIZE-1)) 134#define PGDIR_MASK (~(PGDIR_SIZE-1))
126 135
127#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) 136#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
128#define FIRST_USER_ADDRESS 0 137#define FIRST_USER_ADDRESS 0
129 138
130#ifndef __ASSEMBLY__ 139#define MAXMEM 0x3fffffffffff
131#define MAXMEM 0x3fffffffffffUL 140#define VMALLOC_START 0xffffc20000000000
132#define VMALLOC_START 0xffffc20000000000UL 141#define VMALLOC_END 0xffffe1ffffffffff
133#define VMALLOC_END 0xffffe1ffffffffffUL 142#define MODULES_VADDR 0xffffffff88000000
134#define MODULES_VADDR 0xffffffff88000000UL 143#define MODULES_END 0xfffffffffff00000
135#define MODULES_END 0xfffffffffff00000UL
136#define MODULES_LEN (MODULES_END - MODULES_VADDR) 144#define MODULES_LEN (MODULES_END - MODULES_VADDR)
137 145
138#define _PAGE_BIT_PRESENT 0 146#define _PAGE_BIT_PRESENT 0
@@ -158,7 +166,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
158#define _PAGE_GLOBAL 0x100 /* Global TLB entry */ 166#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
159 167
160#define _PAGE_PROTNONE 0x080 /* If not present */ 168#define _PAGE_PROTNONE 0x080 /* If not present */
161#define _PAGE_NX (1UL<<_PAGE_BIT_NX) 169#define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX)
162 170
163#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) 171#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
164#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) 172#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -220,6 +228,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
220#define __S110 PAGE_SHARED_EXEC 228#define __S110 PAGE_SHARED_EXEC
221#define __S111 PAGE_SHARED_EXEC 229#define __S111 PAGE_SHARED_EXEC
222 230
231#ifndef __ASSEMBLY__
232
223static inline unsigned long pgd_bad(pgd_t pgd) 233static inline unsigned long pgd_bad(pgd_t pgd)
224{ 234{
225 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); 235 return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER);
@@ -405,8 +415,6 @@ extern spinlock_t pgd_lock;
405extern struct page *pgd_list; 415extern struct page *pgd_list;
406void vmalloc_sync_all(void); 416void vmalloc_sync_all(void);
407 417
408#endif /* !__ASSEMBLY__ */
409
410extern int kern_addr_valid(unsigned long addr); 418extern int kern_addr_valid(unsigned long addr);
411 419
412#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 420#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
@@ -436,5 +444,6 @@ extern int kern_addr_valid(unsigned long addr);
436#define __HAVE_ARCH_PTEP_SET_WRPROTECT 444#define __HAVE_ARCH_PTEP_SET_WRPROTECT
437#define __HAVE_ARCH_PTE_SAME 445#define __HAVE_ARCH_PTE_SAME
438#include <asm-generic/pgtable.h> 446#include <asm-generic/pgtable.h>
447#endif /* !__ASSEMBLY__ */
439 448
440#endif /* _X86_64_PGTABLE_H */ 449#endif /* _X86_64_PGTABLE_H */