diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-19 08:47:57 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-09-19 08:47:57 -0400 |
commit | 40d743b8c16a8cf6e30c1d941aa6147f9550ea75 (patch) | |
tree | 9fcdf9a06b18a275253048d1ea7c9803cec38845 /arch/powerpc/include/asm/pgtable-ppc64.h | |
parent | 7da18afa423f167e7ef3c9728e584d8bf05bd55a (diff) | |
parent | 83e686ea0291ee93b87dcdc00b96443b80de56c9 (diff) |
Merge branch 'for-rmk' of git://linux-arm.org/linux-2.6
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc64.h')
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 67 |
1 files changed, 47 insertions, 20 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 8cd083c61503..806abe7a3fa5 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -5,11 +5,6 @@ | |||
5 | * the ppc64 hashed page table. | 5 | * the ppc64 hashed page table. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef __ASSEMBLY__ | ||
9 | #include <linux/stddef.h> | ||
10 | #include <asm/tlbflush.h> | ||
11 | #endif /* __ASSEMBLY__ */ | ||
12 | |||
13 | #ifdef CONFIG_PPC_64K_PAGES | 8 | #ifdef CONFIG_PPC_64K_PAGES |
14 | #include <asm/pgtable-ppc64-64k.h> | 9 | #include <asm/pgtable-ppc64-64k.h> |
15 | #else | 10 | #else |
@@ -38,26 +33,47 @@ | |||
38 | #endif | 33 | #endif |
39 | 34 | ||
40 | /* | 35 | /* |
41 | * Define the address range of the vmalloc VM area. | 36 | * Define the address range of the kernel non-linear virtual area |
37 | */ | ||
38 | |||
39 | #ifdef CONFIG_PPC_BOOK3E | ||
40 | #define KERN_VIRT_START ASM_CONST(0x8000000000000000) | ||
41 | #else | ||
42 | #define KERN_VIRT_START ASM_CONST(0xD000000000000000) | ||
43 | #endif | ||
44 | #define KERN_VIRT_SIZE PGTABLE_RANGE | ||
45 | |||
46 | /* | ||
47 | * The vmalloc space starts at the beginning of that region, and | ||
48 | * occupies half of it on hash CPUs and a quarter of it on Book3E | ||
49 | * (we keep a quarter for the virtual memmap) | ||
42 | */ | 50 | */ |
43 | #define VMALLOC_START ASM_CONST(0xD000000000000000) | 51 | #define VMALLOC_START KERN_VIRT_START |
44 | #define VMALLOC_SIZE (PGTABLE_RANGE >> 1) | 52 | #ifdef CONFIG_PPC_BOOK3E |
45 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | 53 | #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2) |
54 | #else | ||
55 | #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) | ||
56 | #endif | ||
57 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | ||
46 | 58 | ||
47 | /* | 59 | /* |
48 | * Define the address ranges for MMIO and IO space : | 60 | * The second half of the kernel virtual space is used for IO mappings, |
61 | * it's itself carved into the PIO region (ISA and PHB IO space) and | ||
62 | * the ioremap space | ||
49 | * | 63 | * |
50 | * ISA_IO_BASE = VMALLOC_END, 64K reserved area | 64 | * ISA_IO_BASE = KERN_IO_START, 64K reserved area |
51 | * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces | 65 | * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces |
52 | * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE | 66 | * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE |
53 | */ | 67 | */ |
68 | #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) | ||
54 | #define FULL_IO_SIZE 0x80000000ul | 69 | #define FULL_IO_SIZE 0x80000000ul |
55 | #define ISA_IO_BASE (VMALLOC_END) | 70 | #define ISA_IO_BASE (KERN_IO_START) |
56 | #define ISA_IO_END (VMALLOC_END + 0x10000ul) | 71 | #define ISA_IO_END (KERN_IO_START + 0x10000ul) |
57 | #define PHB_IO_BASE (ISA_IO_END) | 72 | #define PHB_IO_BASE (ISA_IO_END) |
58 | #define PHB_IO_END (VMALLOC_END + FULL_IO_SIZE) | 73 | #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) |
59 | #define IOREMAP_BASE (PHB_IO_END) | 74 | #define IOREMAP_BASE (PHB_IO_END) |
60 | #define IOREMAP_END (VMALLOC_START + PGTABLE_RANGE) | 75 | #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE) |
76 | |||
61 | 77 | ||
62 | /* | 78 | /* |
63 | * Region IDs | 79 | * Region IDs |
@@ -68,23 +84,32 @@ | |||
68 | 84 | ||
69 | #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) | 85 | #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) |
70 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) | 86 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) |
71 | #define VMEMMAP_REGION_ID (0xfUL) | 87 | #define VMEMMAP_REGION_ID (0xfUL) /* Server only */ |
72 | #define USER_REGION_ID (0UL) | 88 | #define USER_REGION_ID (0UL) |
73 | 89 | ||
74 | /* | 90 | /* |
75 | * Defines the address of the vmemap area, in its own region | 91 | * Defines the address of the vmemap area, in its own region on |
92 | * hash table CPUs and after the vmalloc space on Book3E | ||
76 | */ | 93 | */ |
94 | #ifdef CONFIG_PPC_BOOK3E | ||
95 | #define VMEMMAP_BASE VMALLOC_END | ||
96 | #define VMEMMAP_END KERN_IO_START | ||
97 | #else | ||
77 | #define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) | 98 | #define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) |
99 | #endif | ||
78 | #define vmemmap ((struct page *)VMEMMAP_BASE) | 100 | #define vmemmap ((struct page *)VMEMMAP_BASE) |
79 | 101 | ||
80 | 102 | ||
81 | /* | 103 | /* |
82 | * Include the PTE bits definitions | 104 | * Include the PTE bits definitions |
83 | */ | 105 | */ |
106 | #ifdef CONFIG_PPC_BOOK3S | ||
84 | #include <asm/pte-hash64.h> | 107 | #include <asm/pte-hash64.h> |
108 | #else | ||
109 | #include <asm/pte-book3e.h> | ||
110 | #endif | ||
85 | #include <asm/pte-common.h> | 111 | #include <asm/pte-common.h> |
86 | 112 | ||
87 | |||
88 | #ifdef CONFIG_PPC_MM_SLICES | 113 | #ifdef CONFIG_PPC_MM_SLICES |
89 | #define HAVE_ARCH_UNMAPPED_AREA | 114 | #define HAVE_ARCH_UNMAPPED_AREA |
90 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 115 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
@@ -92,6 +117,9 @@ | |||
92 | 117 | ||
93 | #ifndef __ASSEMBLY__ | 118 | #ifndef __ASSEMBLY__ |
94 | 119 | ||
120 | #include <linux/stddef.h> | ||
121 | #include <asm/tlbflush.h> | ||
122 | |||
95 | /* | 123 | /* |
96 | * This is the default implementation of various PTE accessors, it's | 124 | * This is the default implementation of various PTE accessors, it's |
97 | * used in all cases except Book3S with 64K pages where we have a | 125 | * used in all cases except Book3S with 64K pages where we have a |
@@ -285,8 +313,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |||
285 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) | 313 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) |
286 | { | 314 | { |
287 | unsigned long bits = pte_val(entry) & | 315 | unsigned long bits = pte_val(entry) & |
288 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | | 316 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); |
289 | _PAGE_EXEC | _PAGE_HWEXEC); | ||
290 | 317 | ||
291 | #ifdef PTE_ATOMIC_UPDATES | 318 | #ifdef PTE_ATOMIC_UPDATES |
292 | unsigned long old, tmp; | 319 | unsigned long old, tmp; |