aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/page.h167
-rw-r--r--arch/x86/include/asm/page_32.h89
-rw-r--r--arch/x86/include/asm/page_32_types.h62
-rw-r--r--arch/x86/include/asm/page_64.h101
-rw-r--r--arch/x86/include/asm/page_64.h.rej114
-rw-r--r--arch/x86/include/asm/page_64_types.h91
-rw-r--r--arch/x86/include/asm/page_types.h63
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/pgtable-2level_types.h (renamed from arch/x86/include/asm/pgtable-2level-defs.h)15
-rw-r--r--arch/x86/include/asm/pgtable-3level_types.h (renamed from arch/x86/include/asm/pgtable-3level-defs.h)18
-rw-r--r--arch/x86/include/asm/pgtable.h322
-rw-r--r--arch/x86/include/asm/pgtable_32.h42
-rw-r--r--arch/x86/include/asm/pgtable_32_types.h46
-rw-r--r--arch/x86/include/asm/pgtable_64.h48
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h62
-rw-r--r--arch/x86/include/asm/pgtable_types.h321
-rw-r--r--arch/x86/include/asm/processor.h1
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c9
-rw-r--r--arch/x86/mm/fault.c1
19 files changed, 876 insertions, 698 deletions
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 05f2da7f387a..467ce69306b2 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -1,42 +1,9 @@
1#ifndef _ASM_X86_PAGE_H 1#ifndef _ASM_X86_PAGE_H
2#define _ASM_X86_PAGE_H 2#define _ASM_X86_PAGE_H
3 3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10
11#ifdef __KERNEL__ 4#ifdef __KERNEL__
12 5
13#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) 6#include <asm/page_types.h>
14#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
15
16/* Cast PAGE_MASK to a signed type so that it is sign-extended if
17 virtual addresses are 32-bits but physical addresses are larger
18 (ie, 32-bit PAE). */
19#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
20
21/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
22#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
23
24/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
25#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
26
27#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
28#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
29
30#define HPAGE_SHIFT PMD_SHIFT
31#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
32#define HPAGE_MASK (~(HPAGE_SIZE - 1))
33#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
34
35#define HUGE_MAX_HSTATE 2
36
37#ifndef __ASSEMBLY__
38#include <linux/types.h>
39#endif
40 7
41#ifdef CONFIG_X86_64 8#ifdef CONFIG_X86_64
42#include <asm/page_64.h> 9#include <asm/page_64.h>
@@ -44,38 +11,18 @@
44#include <asm/page_32.h> 11#include <asm/page_32.h>
45#endif /* CONFIG_X86_64 */ 12#endif /* CONFIG_X86_64 */
46 13
47#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
48
49#define VM_DATA_DEFAULT_FLAGS \
50 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
51 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
52
53
54#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
55 15
56typedef struct { pgdval_t pgd; } pgd_t;
57typedef struct { pgprotval_t pgprot; } pgprot_t;
58
59extern int page_is_ram(unsigned long pagenr);
60extern int devmem_is_allowed(unsigned long pagenr);
61extern void map_devmem(unsigned long pfn, unsigned long size,
62 pgprot_t vma_prot);
63extern void unmap_devmem(unsigned long pfn, unsigned long size,
64 pgprot_t vma_prot);
65
66extern unsigned long max_low_pfn_mapped;
67extern unsigned long max_pfn_mapped;
68
69struct page; 16struct page;
70 17
71static inline void clear_user_page(void *page, unsigned long vaddr, 18static inline void clear_user_page(void *page, unsigned long vaddr,
72 struct page *pg) 19 struct page *pg)
73{ 20{
74 clear_page(page); 21 clear_page(page);
75} 22}
76 23
77static inline void copy_user_page(void *to, void *from, unsigned long vaddr, 24static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
78 struct page *topage) 25 struct page *topage)
79{ 26{
80 copy_page(to, from); 27 copy_page(to, from);
81} 28}
@@ -84,114 +31,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
84 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) 31 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
85#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 32#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
86 33
87static inline pgd_t native_make_pgd(pgdval_t val)
88{
89 return (pgd_t) { val };
90}
91
92static inline pgdval_t native_pgd_val(pgd_t pgd)
93{
94 return pgd.pgd;
95}
96
97static inline pgdval_t pgd_flags(pgd_t pgd)
98{
99 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
100}
101
102#if PAGETABLE_LEVELS >= 3
103#if PAGETABLE_LEVELS == 4
104typedef struct { pudval_t pud; } pud_t;
105
106static inline pud_t native_make_pud(pmdval_t val)
107{
108 return (pud_t) { val };
109}
110
111static inline pudval_t native_pud_val(pud_t pud)
112{
113 return pud.pud;
114}
115#else /* PAGETABLE_LEVELS == 3 */
116#include <asm-generic/pgtable-nopud.h>
117
118static inline pudval_t native_pud_val(pud_t pud)
119{
120 return native_pgd_val(pud.pgd);
121}
122#endif /* PAGETABLE_LEVELS == 4 */
123
124static inline pudval_t pud_flags(pud_t pud)
125{
126 return native_pud_val(pud) & PTE_FLAGS_MASK;
127}
128
129typedef struct { pmdval_t pmd; } pmd_t;
130
131static inline pmd_t native_make_pmd(pmdval_t val)
132{
133 return (pmd_t) { val };
134}
135
136static inline pmdval_t native_pmd_val(pmd_t pmd)
137{
138 return pmd.pmd;
139}
140
141#else /* PAGETABLE_LEVELS == 2 */
142#include <asm-generic/pgtable-nopmd.h>
143
144static inline pmdval_t native_pmd_val(pmd_t pmd)
145{
146 return native_pgd_val(pmd.pud.pgd);
147}
148#endif /* PAGETABLE_LEVELS >= 3 */
149
150static inline pmdval_t pmd_flags(pmd_t pmd)
151{
152 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
153}
154
155static inline pte_t native_make_pte(pteval_t val)
156{
157 return (pte_t) { .pte = val };
158}
159
160static inline pteval_t native_pte_val(pte_t pte)
161{
162 return pte.pte;
163}
164
165static inline pteval_t pte_flags(pte_t pte)
166{
167 return native_pte_val(pte) & PTE_FLAGS_MASK;
168}
169
170#define pgprot_val(x) ((x).pgprot)
171#define __pgprot(x) ((pgprot_t) { (x) } )
172
173#ifdef CONFIG_PARAVIRT
174#include <asm/paravirt.h>
175#else /* !CONFIG_PARAVIRT */
176
177#define pgd_val(x) native_pgd_val(x)
178#define __pgd(x) native_make_pgd(x)
179
180#ifndef __PAGETABLE_PUD_FOLDED
181#define pud_val(x) native_pud_val(x)
182#define __pud(x) native_make_pud(x)
183#endif
184
185#ifndef __PAGETABLE_PMD_FOLDED
186#define pmd_val(x) native_pmd_val(x)
187#define __pmd(x) native_make_pmd(x)
188#endif
189
190#define pte_val(x) native_pte_val(x)
191#define __pte(x) native_make_pte(x)
192
193#endif /* CONFIG_PARAVIRT */
194
195#define __pa(x) __phys_addr((unsigned long)(x)) 34#define __pa(x) __phys_addr((unsigned long)(x))
196#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x)) 35#define __pa_nodebug(x) __phys_addr_nodebug((unsigned long)(x))
197/* __pa_symbol should be used for C visible symbols. 36/* __pa_symbol should be used for C visible symbols.
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index bcde0d7b4325..b3f0bf79e843 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -1,82 +1,12 @@
1#ifndef _ASM_X86_PAGE_32_H 1#ifndef _ASM_X86_PAGE_32_H
2#define _ASM_X86_PAGE_32_H 2#define _ASM_X86_PAGE_32_H
3 3
4/* 4#include <asm/page_32_types.h>
5 * This handles the memory map.
6 *
7 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
8 * a virtual address space of one gigabyte, which limits the
9 * amount of physical memory you can use to about 950MB.
10 *
11 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
12 * and CONFIG_HIGHMEM64G options in the kernel configuration.
13 */
14#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
15
16#ifdef CONFIG_4KSTACKS
17#define THREAD_ORDER 0
18#else
19#define THREAD_ORDER 1
20#endif
21#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
22
23#define STACKFAULT_STACK 0
24#define DOUBLEFAULT_STACK 1
25#define NMI_STACK 0
26#define DEBUG_STACK 0
27#define MCE_STACK 0
28#define N_EXCEPTION_STACKS 1
29
30#ifdef CONFIG_X86_PAE
31/* 44=32+12, the limit we can fit into an unsigned long pfn */
32#define __PHYSICAL_MASK_SHIFT 44
33#define __VIRTUAL_MASK_SHIFT 32
34#define PAGETABLE_LEVELS 3
35
36#ifndef __ASSEMBLY__
37typedef u64 pteval_t;
38typedef u64 pmdval_t;
39typedef u64 pudval_t;
40typedef u64 pgdval_t;
41typedef u64 pgprotval_t;
42
43typedef union {
44 struct {
45 unsigned long pte_low, pte_high;
46 };
47 pteval_t pte;
48} pte_t;
49#endif /* __ASSEMBLY__
50 */
51#else /* !CONFIG_X86_PAE */
52#define __PHYSICAL_MASK_SHIFT 32
53#define __VIRTUAL_MASK_SHIFT 32
54#define PAGETABLE_LEVELS 2
55
56#ifndef __ASSEMBLY__
57typedef unsigned long pteval_t;
58typedef unsigned long pmdval_t;
59typedef unsigned long pudval_t;
60typedef unsigned long pgdval_t;
61typedef unsigned long pgprotval_t;
62
63typedef union {
64 pteval_t pte;
65 pteval_t pte_low;
66} pte_t;
67
68#endif /* __ASSEMBLY__ */
69#endif /* CONFIG_X86_PAE */
70
71#ifndef __ASSEMBLY__
72typedef struct page *pgtable_t;
73#endif
74 5
75#ifdef CONFIG_HUGETLB_PAGE 6#ifdef CONFIG_HUGETLB_PAGE
76#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 7#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
77#endif 8#endif
78 9
79#ifndef __ASSEMBLY__
80#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) 10#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
81#ifdef CONFIG_DEBUG_VIRTUAL 11#ifdef CONFIG_DEBUG_VIRTUAL
82extern unsigned long __phys_addr(unsigned long); 12extern unsigned long __phys_addr(unsigned long);
@@ -89,22 +19,7 @@ extern unsigned long __phys_addr(unsigned long);
89#define pfn_valid(pfn) ((pfn) < max_mapnr) 19#define pfn_valid(pfn) ((pfn) < max_mapnr)
90#endif /* CONFIG_FLATMEM */ 20#endif /* CONFIG_FLATMEM */
91 21
92extern int nx_enabled; 22#ifndef __ASSEMBLY__
93
94/*
95 * This much address space is reserved for vmalloc() and iomap()
96 * as well as fixmap mappings.
97 */
98extern unsigned int __VMALLOC_RESERVE;
99extern int sysctl_legacy_va_layout;
100
101extern void find_low_pfn_range(void);
102extern unsigned long init_memory_mapping(unsigned long start,
103 unsigned long end);
104extern void initmem_init(unsigned long, unsigned long);
105extern void free_initmem(void);
106extern void setup_bootmem_allocator(void);
107
108 23
109#ifdef CONFIG_X86_USE_3DNOW 24#ifdef CONFIG_X86_USE_3DNOW
110#include <asm/mmx.h> 25#include <asm/mmx.h>
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
new file mode 100644
index 000000000000..b5486aaf36ec
--- /dev/null
+++ b/arch/x86/include/asm/page_32_types.h
@@ -0,0 +1,62 @@
1#ifndef _ASM_X86_PAGE_32_DEFS_H
2#define _ASM_X86_PAGE_32_DEFS_H
3
4#include <linux/const.h>
5
6/*
7 * This handles the memory map.
8 *
9 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
10 * a virtual address space of one gigabyte, which limits the
11 * amount of physical memory you can use to about 950MB.
12 *
13 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
14 * and CONFIG_HIGHMEM64G options in the kernel configuration.
15 */
16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
17
18#ifdef CONFIG_4KSTACKS
19#define THREAD_ORDER 0
20#else
21#define THREAD_ORDER 1
22#endif
23#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
24
25#define STACKFAULT_STACK 0
26#define DOUBLEFAULT_STACK 1
27#define NMI_STACK 0
28#define DEBUG_STACK 0
29#define MCE_STACK 0
30#define N_EXCEPTION_STACKS 1
31
32#ifdef CONFIG_X86_PAE
33/* 44=32+12, the limit we can fit into an unsigned long pfn */
34#define __PHYSICAL_MASK_SHIFT 44
35#define __VIRTUAL_MASK_SHIFT 32
36#define PAGETABLE_LEVELS 3
37
38#else /* !CONFIG_X86_PAE */
39#define __PHYSICAL_MASK_SHIFT 32
40#define __VIRTUAL_MASK_SHIFT 32
41#define PAGETABLE_LEVELS 2
42#endif /* CONFIG_X86_PAE */
43
44#ifndef __ASSEMBLY__
45
46/*
47 * This much address space is reserved for vmalloc() and iomap()
48 * as well as fixmap mappings.
49 */
50extern unsigned int __VMALLOC_RESERVE;
51extern int sysctl_legacy_va_layout;
52
53extern void find_low_pfn_range(void);
54extern unsigned long init_memory_mapping(unsigned long start,
55 unsigned long end);
56extern void initmem_init(unsigned long, unsigned long);
57extern void free_initmem(void);
58extern void setup_bootmem_allocator(void);
59
60#endif /* !__ASSEMBLY__ */
61
62#endif /* _ASM_X86_PAGE_32_DEFS_H */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index e27fdbe5f9e4..072694ed81a5 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -1,105 +1,6 @@
1#ifndef _ASM_X86_PAGE_64_H 1#ifndef _ASM_X86_PAGE_64_H
2#define _ASM_X86_PAGE_64_H 2#define _ASM_X86_PAGE_64_H
3 3
4#define PAGETABLE_LEVELS 4 4#include <asm/page_64_types.h>
5
6#define THREAD_ORDER 1
7#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
8#define CURRENT_MASK (~(THREAD_SIZE - 1))
9
10#define EXCEPTION_STACK_ORDER 0
11#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
12
13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
15
16#define IRQ_STACK_ORDER 2
17#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
18
19#define STACKFAULT_STACK 1
20#define DOUBLEFAULT_STACK 2
21#define NMI_STACK 3
22#define DEBUG_STACK 4
23#define MCE_STACK 5
24#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
25
26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
28
29/*
30 * Set __PAGE_OFFSET to the most negative possible address +
31 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
32 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
33 * what Xen requires.
34 */
35#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
36
37#define __PHYSICAL_START CONFIG_PHYSICAL_START
38#define __KERNEL_ALIGN 0x200000
39
40/*
41 * Make sure kernel is aligned to 2MB address. Catching it at compile
42 * time is better. Change your config file and compile the kernel
43 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
44 */
45#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
46#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
47#endif
48
49#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
50#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
51
52/* See Documentation/x86_64/mm.txt for a description of the memory map. */
53#define __PHYSICAL_MASK_SHIFT 46
54#define __VIRTUAL_MASK_SHIFT 48
55
56/*
57 * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
58 * arch/x86/kernel/head_64.S), and it is mapped here:
59 */
60#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
61#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
62
63#ifndef __ASSEMBLY__
64void clear_page(void *page);
65void copy_page(void *to, void *from);
66
67/* duplicated to the one in bootmem.h */
68extern unsigned long max_pfn;
69extern unsigned long phys_base;
70
71extern unsigned long __phys_addr(unsigned long);
72#define __phys_reloc_hide(x) (x)
73
74/*
75 * These are used to make use of C type-checking..
76 */
77typedef unsigned long pteval_t;
78typedef unsigned long pmdval_t;
79typedef unsigned long pudval_t;
80typedef unsigned long pgdval_t;
81typedef unsigned long pgprotval_t;
82
83typedef struct page *pgtable_t;
84
85typedef struct { pteval_t pte; } pte_t;
86
87#define vmemmap ((struct page *)VMEMMAP_START)
88
89extern unsigned long init_memory_mapping(unsigned long start,
90 unsigned long end);
91
92extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
93extern void free_initmem(void);
94
95extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
96extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
97
98#endif /* !__ASSEMBLY__ */
99
100#ifdef CONFIG_FLATMEM
101#define pfn_valid(pfn) ((pfn) < max_pfn)
102#endif
103
104 5
105#endif /* _ASM_X86_PAGE_64_H */ 6#endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/page_64.h.rej b/arch/x86/include/asm/page_64.h.rej
new file mode 100644
index 000000000000..9b1807f18596
--- /dev/null
+++ b/arch/x86/include/asm/page_64.h.rej
@@ -0,0 +1,114 @@
1***************
2*** 1,105 ****
3 #ifndef _ASM_X86_PAGE_64_H
4 #define _ASM_X86_PAGE_64_H
5
6- #define PAGETABLE_LEVELS 4
7-
8- #define THREAD_ORDER 1
9- #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
10- #define CURRENT_MASK (~(THREAD_SIZE - 1))
11-
12- #define EXCEPTION_STACK_ORDER 0
13- #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
14-
15- #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
16- #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
17-
18- #define IRQSTACK_ORDER 2
19- #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
20-
21- #define STACKFAULT_STACK 1
22- #define DOUBLEFAULT_STACK 2
23- #define NMI_STACK 3
24- #define DEBUG_STACK 4
25- #define MCE_STACK 5
26- #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
27-
28- #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
29- #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
30-
31- /*
32- * Set __PAGE_OFFSET to the most negative possible address +
33- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
34- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
35- * what Xen requires.
36- */
37- #define __PAGE_OFFSET _AC(0xffff880000000000, UL)
38-
39- #define __PHYSICAL_START CONFIG_PHYSICAL_START
40- #define __KERNEL_ALIGN 0x200000
41-
42- /*
43- * Make sure kernel is aligned to 2MB address. Catching it at compile
44- * time is better. Change your config file and compile the kernel
45- * for a 2MB aligned address (CONFIG_PHYSICAL_START)
46- */
47- #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
48- #error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
49- #endif
50-
51- #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
52- #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
53-
54- /* See Documentation/x86_64/mm.txt for a description of the memory map. */
55- #define __PHYSICAL_MASK_SHIFT 46
56- #define __VIRTUAL_MASK_SHIFT 48
57-
58- /*
59- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
60- * arch/x86/kernel/head_64.S), and it is mapped here:
61- */
62- #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
63- #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
64-
65- #ifndef __ASSEMBLY__
66- void clear_page(void *page);
67- void copy_page(void *to, void *from);
68-
69- /* duplicated to the one in bootmem.h */
70- extern unsigned long max_pfn;
71- extern unsigned long phys_base;
72-
73- extern unsigned long __phys_addr(unsigned long);
74- #define __phys_reloc_hide(x) (x)
75-
76- /*
77- * These are used to make use of C type-checking..
78- */
79- typedef unsigned long pteval_t;
80- typedef unsigned long pmdval_t;
81- typedef unsigned long pudval_t;
82- typedef unsigned long pgdval_t;
83- typedef unsigned long pgprotval_t;
84-
85- typedef struct page *pgtable_t;
86-
87- typedef struct { pteval_t pte; } pte_t;
88-
89- #define vmemmap ((struct page *)VMEMMAP_START)
90-
91- extern unsigned long init_memory_mapping(unsigned long start,
92- unsigned long end);
93-
94- extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
95- extern void free_initmem(void);
96-
97- extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
98- extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
99-
100- #endif /* !__ASSEMBLY__ */
101-
102- #ifdef CONFIG_FLATMEM
103- #define pfn_valid(pfn) ((pfn) < max_pfn)
104- #endif
105-
106
107 #endif /* _ASM_X86_PAGE_64_H */
108--- 1,6 ----
109 #ifndef _ASM_X86_PAGE_64_H
110 #define _ASM_X86_PAGE_64_H
111
112+ #include <asm/page_64_types.h>
113
114 #endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
new file mode 100644
index 000000000000..bc73af3eda9c
--- /dev/null
+++ b/arch/x86/include/asm/page_64_types.h
@@ -0,0 +1,91 @@
1#ifndef _ASM_X86_PAGE_64_DEFS_H
2#define _ASM_X86_PAGE_64_DEFS_H
3
4#define PAGETABLE_LEVELS 4
5
6#define THREAD_ORDER 1
7#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
8#define CURRENT_MASK (~(THREAD_SIZE - 1))
9
10#define EXCEPTION_STACK_ORDER 0
11#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
12
13#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
14#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
15
16#define IRQ_STACK_ORDER 2
17#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
18
19#define STACKFAULT_STACK 1
20#define DOUBLEFAULT_STACK 2
21#define NMI_STACK 3
22#define DEBUG_STACK 4
23#define MCE_STACK 5
24#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
25
26#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
27#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
28
29/*
30 * Set __PAGE_OFFSET to the most negative possible address +
31 * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
32 * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
33 * what Xen requires.
34 */
35#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
36
37#define __PHYSICAL_START CONFIG_PHYSICAL_START
38#define __KERNEL_ALIGN 0x200000
39
40/*
41 * Make sure kernel is aligned to 2MB address. Catching it at compile
42 * time is better. Change your config file and compile the kernel
43 * for a 2MB aligned address (CONFIG_PHYSICAL_START)
44 */
45#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
46#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
47#endif
48
49#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
50#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
51
52/* See Documentation/x86_64/mm.txt for a description of the memory map. */
53#define __PHYSICAL_MASK_SHIFT 46
54#define __VIRTUAL_MASK_SHIFT 48
55
56/*
57 * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
58 * arch/x86/kernel/head_64.S), and it is mapped here:
59 */
60#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
61#define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL)
62
63#ifndef __ASSEMBLY__
64void clear_page(void *page);
65void copy_page(void *to, void *from);
66
67/* duplicated to the one in bootmem.h */
68extern unsigned long max_pfn;
69extern unsigned long phys_base;
70
71extern unsigned long __phys_addr(unsigned long);
72#define __phys_reloc_hide(x) (x)
73
74#define vmemmap ((struct page *)VMEMMAP_START)
75
76extern unsigned long init_memory_mapping(unsigned long start,
77 unsigned long end);
78
79extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
80extern void free_initmem(void);
81
82extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
83extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
84
85#endif /* !__ASSEMBLY__ */
86
87#ifdef CONFIG_FLATMEM
88#define pfn_valid(pfn) ((pfn) < max_pfn)
89#endif
90
91#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
new file mode 100644
index 000000000000..2c52ff767584
--- /dev/null
+++ b/arch/x86/include/asm/page_types.h
@@ -0,0 +1,63 @@
1#ifndef _ASM_X86_PAGE_DEFS_H
2#define _ASM_X86_PAGE_DEFS_H
3
4#include <linux/const.h>
5
6/* PAGE_SHIFT determines the page size */
7#define PAGE_SHIFT 12
8#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
9#define PAGE_MASK (~(PAGE_SIZE-1))
10
11#define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
12#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
13
14/* Cast PAGE_MASK to a signed type so that it is sign-extended if
15 virtual addresses are 32-bits but physical addresses are larger
16 (ie, 32-bit PAE). */
17#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
18
19/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
20#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
21
22/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
23#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
24
25#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
26#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
27
28#define HPAGE_SHIFT PMD_SHIFT
29#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
30#define HPAGE_MASK (~(HPAGE_SIZE - 1))
31#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
32
33#define HUGE_MAX_HSTATE 2
34
35#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
36
37#define VM_DATA_DEFAULT_FLAGS \
38 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
39 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
40
41#ifdef CONFIG_X86_64
42#include <asm/page_64_types.h>
43#else
44#include <asm/page_32_types.h>
45#endif /* CONFIG_X86_64 */
46
47#ifndef __ASSEMBLY__
48
49struct pgprot;
50
51extern int page_is_ram(unsigned long pagenr);
52extern int devmem_is_allowed(unsigned long pagenr);
53extern void map_devmem(unsigned long pfn, unsigned long size,
54 struct pgprot vma_prot);
55extern void unmap_devmem(unsigned long pfn, unsigned long size,
56 struct pgprot vma_prot);
57
58extern unsigned long max_low_pfn_mapped;
59extern unsigned long max_pfn_mapped;
60
61#endif /* !__ASSEMBLY__ */
62
63#endif /* _ASM_X86_PAGE_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index b788dfd20483..0617d5cc9712 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -4,7 +4,7 @@
4 * para-virtualization: those hooks are defined here. */ 4 * para-virtualization: those hooks are defined here. */
5 5
6#ifdef CONFIG_PARAVIRT 6#ifdef CONFIG_PARAVIRT
7#include <asm/page.h> 7#include <asm/pgtable_types.h>
8#include <asm/asm.h> 8#include <asm/asm.h>
9 9
10/* Bitmask of what can be clobbered: usually at least eax. */ 10/* Bitmask of what can be clobbered: usually at least eax. */
diff --git a/arch/x86/include/asm/pgtable-2level-defs.h b/arch/x86/include/asm/pgtable-2level_types.h
index d77db8990eaa..09ae67efcebd 100644
--- a/arch/x86/include/asm/pgtable-2level-defs.h
+++ b/arch/x86/include/asm/pgtable-2level_types.h
@@ -1,6 +1,21 @@
1#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H 1#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
2#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H 2#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
3 3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6
7typedef unsigned long pteval_t;
8typedef unsigned long pmdval_t;
9typedef unsigned long pudval_t;
10typedef unsigned long pgdval_t;
11typedef unsigned long pgprotval_t;
12
13typedef union {
14 pteval_t pte;
15 pteval_t pte_low;
16} pte_t;
17#endif /* !__ASSEMBLY__ */
18
4#define SHARED_KERNEL_PMD 0 19#define SHARED_KERNEL_PMD 0
5 20
6/* 21/*
diff --git a/arch/x86/include/asm/pgtable-3level-defs.h b/arch/x86/include/asm/pgtable-3level_types.h
index 62561367653c..bcc89625ebe5 100644
--- a/arch/x86/include/asm/pgtable-3level-defs.h
+++ b/arch/x86/include/asm/pgtable-3level_types.h
@@ -1,6 +1,23 @@
1#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H 1#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
2#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H 2#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
3 3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6
7typedef u64 pteval_t;
8typedef u64 pmdval_t;
9typedef u64 pudval_t;
10typedef u64 pgdval_t;
11typedef u64 pgprotval_t;
12
13typedef union {
14 struct {
15 unsigned long pte_low, pte_high;
16 };
17 pteval_t pte;
18} pte_t;
19#endif /* !__ASSEMBLY__ */
20
4#ifdef CONFIG_PARAVIRT 21#ifdef CONFIG_PARAVIRT
5#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd) 22#define SHARED_KERNEL_PMD (pv_info.shared_kernel_pmd)
6#else 23#else
@@ -25,4 +42,5 @@
25 */ 42 */
26#define PTRS_PER_PTE 512 43#define PTRS_PER_PTE 512
27 44
45
28#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ 46#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 8fef0f6bfbb6..62024ff897d9 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -3,164 +3,7 @@
3 3
4#include <asm/page.h> 4#include <asm/page.h>
5 5
6#define FIRST_USER_ADDRESS 0 6#include <asm/pgtable_types.h>
7
8#define _PAGE_BIT_PRESENT 0 /* is present */
9#define _PAGE_BIT_RW 1 /* writeable */
10#define _PAGE_BIT_USER 2 /* userspace addressable */
11#define _PAGE_BIT_PWT 3 /* page write through */
12#define _PAGE_BIT_PCD 4 /* page cache disabled */
13#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
14#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
15#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16#define _PAGE_BIT_PAT 7 /* on 4KB pages */
17#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
18#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
19#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
20#define _PAGE_BIT_UNUSED3 11
21#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
22#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
23#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
24#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
25
26/* If _PAGE_BIT_PRESENT is clear, we use these: */
27/* - if the user mapped it with PROT_NONE; pte_present gives true */
28#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
29/* - set: nonlinear file mapping, saved PTE; unset:swap */
30#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
31
32#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
33#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
34#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
35#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
36#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
37#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
38#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
39#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
40#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
41#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
42#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
43#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
44#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
45#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
46#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
47#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
48#define __HAVE_ARCH_PTE_SPECIAL
49
50#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
51#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
52#else
53#define _PAGE_NX (_AT(pteval_t, 0))
54#endif
55
56#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
57#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
58
59#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
60 _PAGE_ACCESSED | _PAGE_DIRTY)
61#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
62 _PAGE_DIRTY)
63
64/* Set of bits not changed in pte_modify */
65#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
66 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
67
68#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
69#define _PAGE_CACHE_WB (0)
70#define _PAGE_CACHE_WC (_PAGE_PWT)
71#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
72#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
73
74#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
75#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
76 _PAGE_ACCESSED | _PAGE_NX)
77
78#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
79 _PAGE_USER | _PAGE_ACCESSED)
80#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED | _PAGE_NX)
82#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
83 _PAGE_ACCESSED)
84#define PAGE_COPY PAGE_COPY_NOEXEC
85#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 _PAGE_ACCESSED | _PAGE_NX)
87#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
88 _PAGE_ACCESSED)
89
90#define __PAGE_KERNEL_EXEC \
91 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
92#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
93
94#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
95#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
96#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
97#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
98#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
99#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
100#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
101#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
102#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
103#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
104#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
105
106#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
107#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
108#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
109#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
110
111#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
112#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
113#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
114#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
115#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
116#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
117#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
118#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
119#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
120#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
121#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
122#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
123#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
124
125#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
126#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
127#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
128#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
129
130/* xwr */
131#define __P000 PAGE_NONE
132#define __P001 PAGE_READONLY
133#define __P010 PAGE_COPY
134#define __P011 PAGE_COPY
135#define __P100 PAGE_READONLY_EXEC
136#define __P101 PAGE_READONLY_EXEC
137#define __P110 PAGE_COPY_EXEC
138#define __P111 PAGE_COPY_EXEC
139
140#define __S000 PAGE_NONE
141#define __S001 PAGE_READONLY
142#define __S010 PAGE_SHARED
143#define __S011 PAGE_SHARED
144#define __S100 PAGE_READONLY_EXEC
145#define __S101 PAGE_READONLY_EXEC
146#define __S110 PAGE_SHARED_EXEC
147#define __S111 PAGE_SHARED_EXEC
148
149/*
150 * early identity mapping pte attrib macros.
151 */
152#ifdef CONFIG_X86_64
153#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
154#else
155/*
156 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
157 * bits are combined, this will alow user to access the high address mapped
158 * VDSO in the presence of CONFIG_COMPAT_VDSO
159 */
160#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
161#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
162#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
163#endif
164 7
165/* 8/*
166 * Macro to mark a page protection value as UC- 9 * Macro to mark a page protection value as UC-
@@ -172,9 +15,6 @@
172 15
173#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
174 17
175#define pgprot_writecombine pgprot_writecombine
176extern pgprot_t pgprot_writecombine(pgprot_t prot);
177
178/* 18/*
179 * ZERO_PAGE is a global shared page that is always zero: used 19 * ZERO_PAGE is a global shared page that is always zero: used
180 * for zero-mapped memory areas etc.. 20 * for zero-mapped memory areas etc..
@@ -185,6 +25,66 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
185extern spinlock_t pgd_lock; 25extern spinlock_t pgd_lock;
186extern struct list_head pgd_list; 26extern struct list_head pgd_list;
187 27
28#ifdef CONFIG_PARAVIRT
29#include <asm/paravirt.h>
30#else /* !CONFIG_PARAVIRT */
31#define set_pte(ptep, pte) native_set_pte(ptep, pte)
32#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
33
34#define set_pte_present(mm, addr, ptep, pte) \
35 native_set_pte_present(mm, addr, ptep, pte)
36#define set_pte_atomic(ptep, pte) \
37 native_set_pte_atomic(ptep, pte)
38
39#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
40
41#ifndef __PAGETABLE_PUD_FOLDED
42#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
43#define pgd_clear(pgd) native_pgd_clear(pgd)
44#endif
45
46#ifndef set_pud
47# define set_pud(pudp, pud) native_set_pud(pudp, pud)
48#endif
49
50#ifndef __PAGETABLE_PMD_FOLDED
51#define pud_clear(pud) native_pud_clear(pud)
52#endif
53
54#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
55#define pmd_clear(pmd) native_pmd_clear(pmd)
56
57#define pte_update(mm, addr, ptep) do { } while (0)
58#define pte_update_defer(mm, addr, ptep) do { } while (0)
59
60static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
61{
62 native_pagetable_setup_start(base);
63}
64
65static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
66{
67 native_pagetable_setup_done(base);
68}
69
70#define pgd_val(x) native_pgd_val(x)
71#define __pgd(x) native_make_pgd(x)
72
73#ifndef __PAGETABLE_PUD_FOLDED
74#define pud_val(x) native_pud_val(x)
75#define __pud(x) native_make_pud(x)
76#endif
77
78#ifndef __PAGETABLE_PMD_FOLDED
79#define pmd_val(x) native_pmd_val(x)
80#define __pmd(x) native_make_pmd(x)
81#endif
82
83#define pte_val(x) native_pte_val(x)
84#define __pte(x) native_make_pte(x)
85
86#endif /* CONFIG_PARAVIRT */
87
188/* 88/*
189 * The following only work if pte_present() is true. 89 * The following only work if pte_present() is true.
190 * Undefined behaviour if not.. 90 * Undefined behaviour if not..
@@ -316,8 +216,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
316 return pte_set_flags(pte, _PAGE_SPECIAL); 216 return pte_set_flags(pte, _PAGE_SPECIAL);
317} 217}
318 218
319extern pteval_t __supported_pte_mask;
320
321/* 219/*
322 * Mask out unsupported bits in a present pgprot. Non-present pgprots 220 * Mask out unsupported bits in a present pgprot. Non-present pgprots
323 * can use those bits for other purposes, so leave them be. 221 * can use those bits for other purposes, so leave them be.
@@ -390,75 +288,6 @@ static inline int is_new_memtype_allowed(unsigned long flags,
390 return 1; 288 return 1;
391} 289}
392 290
393#ifndef __ASSEMBLY__
394/* Indicate that x86 has its own track and untrack pfn vma functions */
395#define __HAVE_PFNMAP_TRACKING
396
397#define __HAVE_PHYS_MEM_ACCESS_PROT
398struct file;
399pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
400 unsigned long size, pgprot_t vma_prot);
401int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
402 unsigned long size, pgprot_t *vma_prot);
403#endif
404
405/* Install a pte for a particular vaddr in kernel space. */
406void set_pte_vaddr(unsigned long vaddr, pte_t pte);
407
408#ifdef CONFIG_X86_32
409extern void native_pagetable_setup_start(pgd_t *base);
410extern void native_pagetable_setup_done(pgd_t *base);
411#else
412static inline void native_pagetable_setup_start(pgd_t *base) {}
413static inline void native_pagetable_setup_done(pgd_t *base) {}
414#endif
415
416struct seq_file;
417extern void arch_report_meminfo(struct seq_file *m);
418
419#ifdef CONFIG_PARAVIRT
420#include <asm/paravirt.h>
421#else /* !CONFIG_PARAVIRT */
422#define set_pte(ptep, pte) native_set_pte(ptep, pte)
423#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
424
425#define set_pte_present(mm, addr, ptep, pte) \
426 native_set_pte_present(mm, addr, ptep, pte)
427#define set_pte_atomic(ptep, pte) \
428 native_set_pte_atomic(ptep, pte)
429
430#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
431
432#ifndef __PAGETABLE_PUD_FOLDED
433#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
434#define pgd_clear(pgd) native_pgd_clear(pgd)
435#endif
436
437#ifndef set_pud
438# define set_pud(pudp, pud) native_set_pud(pudp, pud)
439#endif
440
441#ifndef __PAGETABLE_PMD_FOLDED
442#define pud_clear(pud) native_pud_clear(pud)
443#endif
444
445#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
446#define pmd_clear(pmd) native_pmd_clear(pmd)
447
448#define pte_update(mm, addr, ptep) do { } while (0)
449#define pte_update_defer(mm, addr, ptep) do { } while (0)
450
451static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
452{
453 native_pagetable_setup_start(base);
454}
455
456static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
457{
458 native_pagetable_setup_done(base);
459}
460#endif /* CONFIG_PARAVIRT */
461
462#endif /* __ASSEMBLY__ */ 291#endif /* __ASSEMBLY__ */
463 292
464#ifdef CONFIG_X86_32 293#ifdef CONFIG_X86_32
@@ -600,7 +429,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
600 429
601static inline int pud_large(pud_t pud) 430static inline int pud_large(pud_t pud)
602{ 431{
603 return (pud_flags(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == 432 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
604 (_PAGE_PSE | _PAGE_PRESENT); 433 (_PAGE_PSE | _PAGE_PRESENT);
605} 434}
606 435
@@ -608,6 +437,11 @@ static inline int pud_bad(pud_t pud)
608{ 437{
609 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; 438 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
610} 439}
440#else
441static inline int pud_large(pud_t pud)
442{
443 return 0;
444}
611#endif /* PAGETABLE_LEVELS > 2 */ 445#endif /* PAGETABLE_LEVELS > 2 */
612 446
613#if PAGETABLE_LEVELS > 3 447#if PAGETABLE_LEVELS > 3
@@ -676,28 +510,6 @@ static inline int pgd_none(pgd_t pgd)
676 510
677#ifndef __ASSEMBLY__ 511#ifndef __ASSEMBLY__
678 512
679enum {
680 PG_LEVEL_NONE,
681 PG_LEVEL_4K,
682 PG_LEVEL_2M,
683 PG_LEVEL_1G,
684 PG_LEVEL_NUM
685};
686
687#ifdef CONFIG_PROC_FS
688extern void update_page_count(int level, unsigned long pages);
689#else
690static inline void update_page_count(int level, unsigned long pages) { }
691#endif
692
693/*
694 * Helper function that returns the kernel pagetable entry controlling
695 * the virtual address 'address'. NULL means no pagetable entry present.
696 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
697 * as a pte too.
698 */
699extern pte_t *lookup_address(unsigned long address, unsigned int *level);
700
701/* local pte updates need not use xchg for locking */ 513/* local pte updates need not use xchg for locking */
702static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) 514static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
703{ 515{
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 1952bb762aac..97612fc7632f 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_PGTABLE_32_H 1#ifndef _ASM_X86_PGTABLE_32_H
2#define _ASM_X86_PGTABLE_32_H 2#define _ASM_X86_PGTABLE_32_H
3 3
4#include <asm/pgtable_32_types.h>
4 5
5/* 6/*
6 * The Linux memory management assumes a three-level page table setup. On 7 * The Linux memory management assumes a three-level page table setup. On
@@ -33,47 +34,6 @@ void paging_init(void);
33 34
34extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); 35extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
35 36
36/*
37 * The Linux x86 paging architecture is 'compile-time dual-mode', it
38 * implements both the traditional 2-level x86 page tables and the
39 * newer 3-level PAE-mode page tables.
40 */
41#ifdef CONFIG_X86_PAE
42# include <asm/pgtable-3level-defs.h>
43# define PMD_SIZE (1UL << PMD_SHIFT)
44# define PMD_MASK (~(PMD_SIZE - 1))
45#else
46# include <asm/pgtable-2level-defs.h>
47#endif
48
49#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
50#define PGDIR_MASK (~(PGDIR_SIZE - 1))
51
52/* Just any arbitrary offset to the start of the vmalloc VM area: the
53 * current 8MB value just means that there will be a 8MB "hole" after the
54 * physical memory until the kernel virtual memory starts. That means that
55 * any out-of-bounds memory accesses will hopefully be caught.
56 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
57 * area for the same reason. ;)
58 */
59#define VMALLOC_OFFSET (8 * 1024 * 1024)
60#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
61#ifdef CONFIG_X86_PAE
62#define LAST_PKMAP 512
63#else
64#define LAST_PKMAP 1024
65#endif
66
67#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
68 & PMD_MASK)
69
70#ifdef CONFIG_HIGHMEM
71# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
72#else
73# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
74#endif
75
76#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
77 37
78/* 38/*
79 * Define this if things work differently on an i386 and an i486: 39 * Define this if things work differently on an i386 and an i486:
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
new file mode 100644
index 000000000000..bd8df3b2fe04
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_32_types.h
@@ -0,0 +1,46 @@
1#ifndef _ASM_X86_PGTABLE_32_DEFS_H
2#define _ASM_X86_PGTABLE_32_DEFS_H
3
4/*
5 * The Linux x86 paging architecture is 'compile-time dual-mode', it
6 * implements both the traditional 2-level x86 page tables and the
7 * newer 3-level PAE-mode page tables.
8 */
9#ifdef CONFIG_X86_PAE
10# include <asm/pgtable-3level_types.h>
11# define PMD_SIZE (1UL << PMD_SHIFT)
12# define PMD_MASK (~(PMD_SIZE - 1))
13#else
14# include <asm/pgtable-2level_types.h>
15#endif
16
17#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
18#define PGDIR_MASK (~(PGDIR_SIZE - 1))
19
20/* Just any arbitrary offset to the start of the vmalloc VM area: the
21 * current 8MB value just means that there will be a 8MB "hole" after the
22 * physical memory until the kernel virtual memory starts. That means that
23 * any out-of-bounds memory accesses will hopefully be caught.
24 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
25 * area for the same reason. ;)
26 */
27#define VMALLOC_OFFSET (8 * 1024 * 1024)
28#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
29#ifdef CONFIG_X86_PAE
30#define LAST_PKMAP 512
31#else
32#define LAST_PKMAP 1024
33#endif
34
35#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
36 & PMD_MASK)
37
38#ifdef CONFIG_HIGHMEM
39# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
40#else
41# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
42#endif
43
44#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
45
46#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 1c4e247c51fd..6b87bc6d5018 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -2,6 +2,8 @@
2#define _ASM_X86_PGTABLE_64_H 2#define _ASM_X86_PGTABLE_64_H
3 3
4#include <linux/const.h> 4#include <linux/const.h>
5#include <asm/pgtable_64_types.h>
6
5#ifndef __ASSEMBLY__ 7#ifndef __ASSEMBLY__
6 8
7/* 9/*
@@ -25,32 +27,6 @@ extern void paging_init(void);
25 27
26#endif /* !__ASSEMBLY__ */ 28#endif /* !__ASSEMBLY__ */
27 29
28#define SHARED_KERNEL_PMD 0
29
30/*
31 * PGDIR_SHIFT determines what a top-level page table entry can map
32 */
33#define PGDIR_SHIFT 39
34#define PTRS_PER_PGD 512
35
36/*
37 * 3rd level page
38 */
39#define PUD_SHIFT 30
40#define PTRS_PER_PUD 512
41
42/*
43 * PMD_SHIFT determines the size of the area a middle-level
44 * page table can map
45 */
46#define PMD_SHIFT 21
47#define PTRS_PER_PMD 512
48
49/*
50 * entries per page directory level
51 */
52#define PTRS_PER_PTE 512
53
54#ifndef __ASSEMBLY__ 30#ifndef __ASSEMBLY__
55 31
56#define pte_ERROR(e) \ 32#define pte_ERROR(e) \
@@ -130,26 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
130 native_set_pgd(pgd, native_make_pgd(0)); 106 native_set_pgd(pgd, native_make_pgd(0));
131} 107}
132 108
133#endif /* !__ASSEMBLY__ */
134
135#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
136#define PMD_MASK (~(PMD_SIZE - 1))
137#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
138#define PUD_MASK (~(PUD_SIZE - 1))
139#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
140#define PGDIR_MASK (~(PGDIR_SIZE - 1))
141
142
143#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
144#define VMALLOC_START _AC(0xffffc20000000000, UL)
145#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
146#define VMEMMAP_START _AC(0xffffe20000000000, UL)
147#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
148#define MODULES_END _AC(0xffffffffff000000, UL)
149#define MODULES_LEN (MODULES_END - MODULES_VADDR)
150
151#ifndef __ASSEMBLY__
152
153/* 109/*
154 * Conversion functions: convert a page and protection to a page entry, 110 * Conversion functions: convert a page and protection to a page entry,
155 * and a page entry and page directory to the page they refer to. 111 * and a page entry and page directory to the page they refer to.
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
new file mode 100644
index 000000000000..2f59135c6f2a
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -0,0 +1,62 @@
1#ifndef _ASM_X86_PGTABLE_64_DEFS_H
2#define _ASM_X86_PGTABLE_64_DEFS_H
3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6
7/*
8 * These are used to make use of C type-checking..
9 */
10typedef unsigned long pteval_t;
11typedef unsigned long pmdval_t;
12typedef unsigned long pudval_t;
13typedef unsigned long pgdval_t;
14typedef unsigned long pgprotval_t;
15
16typedef struct { pteval_t pte; } pte_t;
17
18#endif /* !__ASSEMBLY__ */
19
20#define SHARED_KERNEL_PMD 0
21
22/*
23 * PGDIR_SHIFT determines what a top-level page table entry can map
24 */
25#define PGDIR_SHIFT 39
26#define PTRS_PER_PGD 512
27
28/*
29 * 3rd level page
30 */
31#define PUD_SHIFT 30
32#define PTRS_PER_PUD 512
33
34/*
35 * PMD_SHIFT determines the size of the area a middle-level
36 * page table can map
37 */
38#define PMD_SHIFT 21
39#define PTRS_PER_PMD 512
40
41/*
42 * entries per page directory level
43 */
44#define PTRS_PER_PTE 512
45
46#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
47#define PMD_MASK (~(PMD_SIZE - 1))
48#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
49#define PUD_MASK (~(PUD_SIZE - 1))
50#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
51#define PGDIR_MASK (~(PGDIR_SIZE - 1))
52
53
54#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
55#define VMALLOC_START _AC(0xffffc20000000000, UL)
56#define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
57#define VMEMMAP_START _AC(0xffffe20000000000, UL)
58#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
59#define MODULES_END _AC(0xffffffffff000000, UL)
60#define MODULES_LEN (MODULES_END - MODULES_VADDR)
61
62#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
new file mode 100644
index 000000000000..a7452f10930e
--- /dev/null
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -0,0 +1,321 @@
1#ifndef _ASM_X86_PGTABLE_DEFS_H
2#define _ASM_X86_PGTABLE_DEFS_H
3
4#include <linux/const.h>
5
6#define FIRST_USER_ADDRESS 0
7
8#define _PAGE_BIT_PRESENT 0 /* is present */
9#define _PAGE_BIT_RW 1 /* writeable */
10#define _PAGE_BIT_USER 2 /* userspace addressable */
11#define _PAGE_BIT_PWT 3 /* page write through */
12#define _PAGE_BIT_PCD 4 /* page cache disabled */
13#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
14#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
15#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
16#define _PAGE_BIT_PAT 7 /* on 4KB pages */
17#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
18#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
19#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
20#define _PAGE_BIT_UNUSED3 11
21#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
22#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
23#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
24#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
25
26/* If _PAGE_BIT_PRESENT is clear, we use these: */
27/* - if the user mapped it with PROT_NONE; pte_present gives true */
28#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
29/* - set: nonlinear file mapping, saved PTE; unset:swap */
30#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
31
32#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
33#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
34#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
35#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
36#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
37#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
38#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
39#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
40#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
41#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
42#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
43#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
44#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
45#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
46#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
47#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
48#define __HAVE_ARCH_PTE_SPECIAL
49
50#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
51#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
52#else
53#define _PAGE_NX (_AT(pteval_t, 0))
54#endif
55
56#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
57#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
58
59#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
60 _PAGE_ACCESSED | _PAGE_DIRTY)
61#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
62 _PAGE_DIRTY)
63
64/* Set of bits not changed in pte_modify */
65#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
66 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
67
68#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
69#define _PAGE_CACHE_WB (0)
70#define _PAGE_CACHE_WC (_PAGE_PWT)
71#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
72#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
73
74#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
75#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
76 _PAGE_ACCESSED | _PAGE_NX)
77
78#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
79 _PAGE_USER | _PAGE_ACCESSED)
80#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED | _PAGE_NX)
82#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
83 _PAGE_ACCESSED)
84#define PAGE_COPY PAGE_COPY_NOEXEC
85#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 _PAGE_ACCESSED | _PAGE_NX)
87#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
88 _PAGE_ACCESSED)
89
90#define __PAGE_KERNEL_EXEC \
91 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
92#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
93
94#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
95#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
96#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
97#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
98#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
99#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
100#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
101#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
102#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
103#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
104#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
105
106#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
107#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
108#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
109#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
110
111#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
112#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
113#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
114#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
115#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
116#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
117#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
118#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
119#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
120#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
121#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
122#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
123#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
124
125#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
126#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
127#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
128#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
129
130/* xwr */
131#define __P000 PAGE_NONE
132#define __P001 PAGE_READONLY
133#define __P010 PAGE_COPY
134#define __P011 PAGE_COPY
135#define __P100 PAGE_READONLY_EXEC
136#define __P101 PAGE_READONLY_EXEC
137#define __P110 PAGE_COPY_EXEC
138#define __P111 PAGE_COPY_EXEC
139
140#define __S000 PAGE_NONE
141#define __S001 PAGE_READONLY
142#define __S010 PAGE_SHARED
143#define __S011 PAGE_SHARED
144#define __S100 PAGE_READONLY_EXEC
145#define __S101 PAGE_READONLY_EXEC
146#define __S110 PAGE_SHARED_EXEC
147#define __S111 PAGE_SHARED_EXEC
148
149/*
150 * early identity mapping pte attrib macros.
151 */
152#ifdef CONFIG_X86_64
153#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
154#else
155/*
156 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
157 * bits are combined, this will alow user to access the high address mapped
158 * VDSO in the presence of CONFIG_COMPAT_VDSO
159 */
160#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
161#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
162#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
163#endif
164
165#ifdef CONFIG_X86_32
166# include "pgtable_32_types.h"
167#else
168# include "pgtable_64_types.h"
169#endif
170
171#ifndef __ASSEMBLY__
172
173#include <linux/types.h>
174
175typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
176
177typedef struct { pgdval_t pgd; } pgd_t;
178
179static inline pgd_t native_make_pgd(pgdval_t val)
180{
181 return (pgd_t) { val };
182}
183
184static inline pgdval_t native_pgd_val(pgd_t pgd)
185{
186 return pgd.pgd;
187}
188
189static inline pgdval_t pgd_flags(pgd_t pgd)
190{
191 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
192}
193
194#if PAGETABLE_LEVELS > 3
195typedef struct { pudval_t pud; } pud_t;
196
197static inline pud_t native_make_pud(pmdval_t val)
198{
199 return (pud_t) { val };
200}
201
202static inline pudval_t native_pud_val(pud_t pud)
203{
204 return pud.pud;
205}
206#else
207#include <asm-generic/pgtable-nopud.h>
208
209static inline pudval_t native_pud_val(pud_t pud)
210{
211 return native_pgd_val(pud.pgd);
212}
213#endif
214
215#if PAGETABLE_LEVELS > 2
216typedef struct { pmdval_t pmd; } pmd_t;
217
218static inline pmd_t native_make_pmd(pmdval_t val)
219{
220 return (pmd_t) { val };
221}
222
223static inline pmdval_t native_pmd_val(pmd_t pmd)
224{
225 return pmd.pmd;
226}
227#else
228#include <asm-generic/pgtable-nopmd.h>
229
230static inline pmdval_t native_pmd_val(pmd_t pmd)
231{
232 return native_pgd_val(pmd.pud.pgd);
233}
234#endif
235
236static inline pudval_t pud_flags(pud_t pud)
237{
238 return native_pud_val(pud) & PTE_FLAGS_MASK;
239}
240
241static inline pmdval_t pmd_flags(pmd_t pmd)
242{
243 return native_pmd_val(pmd) & PTE_FLAGS_MASK;
244}
245
246static inline pte_t native_make_pte(pteval_t val)
247{
248 return (pte_t) { .pte = val };
249}
250
251static inline pteval_t native_pte_val(pte_t pte)
252{
253 return pte.pte;
254}
255
256static inline pteval_t pte_flags(pte_t pte)
257{
258 return native_pte_val(pte) & PTE_FLAGS_MASK;
259}
260
261#define pgprot_val(x) ((x).pgprot)
262#define __pgprot(x) ((pgprot_t) { (x) } )
263
264
265typedef struct page *pgtable_t;
266
267extern pteval_t __supported_pte_mask;
268extern int nx_enabled;
269
270#define pgprot_writecombine pgprot_writecombine
271extern pgprot_t pgprot_writecombine(pgprot_t prot);
272
273/* Indicate that x86 has its own track and untrack pfn vma functions */
274#define __HAVE_PFNMAP_TRACKING
275
276#define __HAVE_PHYS_MEM_ACCESS_PROT
277struct file;
278pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
279 unsigned long size, pgprot_t vma_prot);
280int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
281 unsigned long size, pgprot_t *vma_prot);
282
283/* Install a pte for a particular vaddr in kernel space. */
284void set_pte_vaddr(unsigned long vaddr, pte_t pte);
285
286#ifdef CONFIG_X86_32
287extern void native_pagetable_setup_start(pgd_t *base);
288extern void native_pagetable_setup_done(pgd_t *base);
289#else
290static inline void native_pagetable_setup_start(pgd_t *base) {}
291static inline void native_pagetable_setup_done(pgd_t *base) {}
292#endif
293
294struct seq_file;
295extern void arch_report_meminfo(struct seq_file *m);
296
297enum {
298 PG_LEVEL_NONE,
299 PG_LEVEL_4K,
300 PG_LEVEL_2M,
301 PG_LEVEL_1G,
302 PG_LEVEL_NUM
303};
304
305#ifdef CONFIG_PROC_FS
306extern void update_page_count(int level, unsigned long pages);
307#else
308static inline void update_page_count(int level, unsigned long pages) { }
309#endif
310
311/*
312 * Helper function that returns the kernel pagetable entry controlling
313 * the virtual address 'address'. NULL means no pagetable entry present.
314 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
315 * as a pte too.
316 */
317extern pte_t *lookup_address(unsigned long address, unsigned int *level);
318
319#endif /* !__ASSEMBLY__ */
320
321#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a0133838b67c..dabab1a19ddd 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -16,6 +16,7 @@ struct mm_struct;
16#include <asm/cpufeature.h> 16#include <asm/cpufeature.h>
17#include <asm/system.h> 17#include <asm/system.h>
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/pgtable_types.h>
19#include <asm/percpu.h> 20#include <asm/percpu.h>
20#include <asm/msr.h> 21#include <asm/msr.h>
21#include <asm/desc_defs.h> 22#include <asm/desc_defs.h>
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 6f5a38c7f900..98e3c2bc7563 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1747,10 +1747,11 @@ static void __cpuinit voyager_smp_prepare_boot_cpu(void)
1747 int cpu = smp_processor_id(); 1747 int cpu = smp_processor_id();
1748 switch_to_new_gdt(cpu); 1748 switch_to_new_gdt(cpu);
1749 1749
1750 cpu_online_map = cpumask_of_cpu(smp_processor_id()); 1750 cpu_set(cpu, cpu_online_map);
1751 cpu_callout_map = cpumask_of_cpu(smp_processor_id()); 1751 cpu_set(cpu, cpu_callout_map);
1752 cpu_callin_map = CPU_MASK_NONE; 1752 cpu_set(cpu, cpu_possible_map);
1753 cpu_present_map = cpumask_of_cpu(smp_processor_id()); 1753 cpu_set(cpu, cpu_present_map);
1754
1754} 1755}
1755 1756
1756static int __cpuinit voyager_cpu_up(unsigned int cpu) 1757static int __cpuinit voyager_cpu_up(unsigned int cpu)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 94c4e7262197..29644175490f 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -851,6 +851,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
851 return; 851 return;
852 } 852 }
853 853
854 /* kprobes don't want to hook the spurious faults. */
854 if (unlikely(notify_page_fault(regs))) 855 if (unlikely(notify_page_fault(regs)))
855 return; 856 return;
856 /* 857 /*