aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-27 21:59:34 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-19 20:25:06 -0400
commit57e2a99f74b0d3720c97a6aadb57ae6aad3c61ea (patch)
tree4268a98ad222dbcf790749aed52417eb0a3a2a35 /arch
parent0257c99cdfaca53a881339e1cbca638c61569b05 (diff)
powerpc: Add memory management headers for new 64-bit BookE
This adds the PTE and pgtable format definitions, along with changes to the kernel memory map and other definitions related to implementing support for 64-bit Book3E. This also shields some asm-offset bits that are currently only relevant on 32-bit We also move the definition of the "linux" page size constants to the common mmu.h file and add a few sizes that are relevant to embedded processors. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h27
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h20
-rw-r--r--arch/powerpc/include/asm/mmu.h37
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/page_64.h10
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h61
-rw-r--r--arch/powerpc/include/asm/pte-book3e.h70
-rw-r--r--arch/powerpc/include/asm/pte-common.h3
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c8
10 files changed, 205 insertions, 40 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 42a39b4aacec..6ddbe48d07fa 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -170,6 +170,33 @@ typedef struct {
170 unsigned int active; 170 unsigned int active;
171 unsigned long vdso_base; 171 unsigned long vdso_base;
172} mm_context_t; 172} mm_context_t;
173
174/* Page size definitions, common between 32 and 64-bit
175 *
176 * shift : is the "PAGE_SHIFT" value for that page size
177 * penc : is the pte encoding mask
178 *
179 */
180struct mmu_psize_def
181{
182 unsigned int shift; /* number of bits */
183 unsigned int enc; /* PTE encoding */
184};
185extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
186
187/* The page sizes use the same names as 64-bit hash but are
188 * constants
189 */
190#if defined(CONFIG_PPC_4K_PAGES)
191#define mmu_virtual_psize MMU_PAGE_4K
192#elif defined(CONFIG_PPC_64K_PAGES)
193#define mmu_virtual_psize MMU_PAGE_64K
194#else
195#error Unsupported page size
196#endif
197
198extern int mmu_linear_psize;
199
173#endif /* !__ASSEMBLY__ */ 200#endif /* !__ASSEMBLY__ */
174 201
175#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ 202#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 98c104a09961..b537903b9fca 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -139,26 +139,6 @@ struct mmu_psize_def
139#endif /* __ASSEMBLY__ */ 139#endif /* __ASSEMBLY__ */
140 140
141/* 141/*
142 * The kernel use the constants below to index in the page sizes array.
143 * The use of fixed constants for this purpose is better for performances
144 * of the low level hash refill handlers.
145 *
146 * A non supported page size has a "shift" field set to 0
147 *
148 * Any new page size being implemented can get a new entry in here. Whether
149 * the kernel will use it or not is a different matter though. The actual page
150 * size used by hugetlbfs is not defined here and may be made variable
151 */
152
153#define MMU_PAGE_4K 0 /* 4K */
154#define MMU_PAGE_64K 1 /* 64K */
155#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
156#define MMU_PAGE_1M 3 /* 1M */
157#define MMU_PAGE_16M 4 /* 16M */
158#define MMU_PAGE_16G 5 /* 16G */
159#define MMU_PAGE_COUNT 6
160
161/*
162 * Segment sizes. 142 * Segment sizes.
163 * These are the values used by hardware in the B field of 143 * These are the values used by hardware in the B field of
164 * SLB entries and the first dword of MMU hashtable entries. 144 * SLB entries and the first dword of MMU hashtable entries.
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index fb57ded592f9..2fcfefc60894 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -17,6 +17,7 @@
17#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) 17#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) 18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
20#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
20 21
21/* 22/*
22 * This is individual features 23 * This is individual features
@@ -73,6 +74,41 @@ extern void early_init_mmu_secondary(void);
73 74
74#endif /* !__ASSEMBLY__ */ 75#endif /* !__ASSEMBLY__ */
75 76
77/* The kernel use the constants below to index in the page sizes array.
78 * The use of fixed constants for this purpose is better for performances
79 * of the low level hash refill handlers.
80 *
81 * A non supported page size has a "shift" field set to 0
82 *
83 * Any new page size being implemented can get a new entry in here. Whether
84 * the kernel will use it or not is a different matter though. The actual page
85 * size used by hugetlbfs is not defined here and may be made variable
86 *
87 * Note: This array ended up being a false good idea as it's growing to the
88 * point where I wonder if we should replace it with something different,
89 * to think about, feedback welcome. --BenH.
90 */
91
92/* There are #define as they have to be used in assembly
93 *
94 * WARNING: If you change this list, make sure to update the array of
95 * names currently in arch/powerpc/mm/hugetlbpage.c or bad things will
96 * happen
97 */
98#define MMU_PAGE_4K 0
99#define MMU_PAGE_16K 1
100#define MMU_PAGE_64K 2
101#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */
102#define MMU_PAGE_256K 4
103#define MMU_PAGE_1M 5
104#define MMU_PAGE_8M 6
105#define MMU_PAGE_16M 7
106#define MMU_PAGE_256M 8
107#define MMU_PAGE_1G 9
108#define MMU_PAGE_16G 10
109#define MMU_PAGE_64G 11
110#define MMU_PAGE_COUNT 12
111
76 112
77#if defined(CONFIG_PPC_STD_MMU_64) 113#if defined(CONFIG_PPC_STD_MMU_64)
78/* 64-bit classic hash table MMU */ 114/* 64-bit classic hash table MMU */
@@ -94,5 +130,6 @@ extern void early_init_mmu_secondary(void);
94# include <asm/mmu-8xx.h> 130# include <asm/mmu-8xx.h>
95#endif 131#endif
96 132
133
97#endif /* __KERNEL__ */ 134#endif /* __KERNEL__ */
98#endif /* _ASM_POWERPC_MMU_H_ */ 135#endif /* _ASM_POWERPC_MMU_H_ */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 4940662ee87e..ff24254990e1 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -139,7 +139,11 @@ extern phys_addr_t kernstart_addr;
139 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for 139 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
140 * "kernelness", use is_kernel_addr() - it should do what you want. 140 * "kernelness", use is_kernel_addr() - it should do what you want.
141 */ 141 */
142#ifdef CONFIG_PPC_BOOK3E_64
143#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
144#else
142#define is_kernel_addr(x) ((x) >= PAGE_OFFSET) 145#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
146#endif
143 147
144#ifndef __ASSEMBLY__ 148#ifndef __ASSEMBLY__
145 149
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 5817a3b747e5..3f17b83f55a1 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -135,12 +135,22 @@ extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
135#endif /* __ASSEMBLY__ */ 135#endif /* __ASSEMBLY__ */
136#else 136#else
137#define slice_init() 137#define slice_init()
138#ifdef CONFIG_PPC_STD_MMU_64
138#define get_slice_psize(mm, addr) ((mm)->context.user_psize) 139#define get_slice_psize(mm, addr) ((mm)->context.user_psize)
139#define slice_set_user_psize(mm, psize) \ 140#define slice_set_user_psize(mm, psize) \
140do { \ 141do { \
141 (mm)->context.user_psize = (psize); \ 142 (mm)->context.user_psize = (psize); \
142 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \ 143 (mm)->context.sllp = SLB_VSID_USER | mmu_psize_defs[(psize)].sllp; \
143} while (0) 144} while (0)
145#else /* CONFIG_PPC_STD_MMU_64 */
146#ifdef CONFIG_PPC_64K_PAGES
147#define get_slice_psize(mm, addr) MMU_PAGE_64K
148#else /* CONFIG_PPC_64K_PAGES */
149#define get_slice_psize(mm, addr) MMU_PAGE_4K
150#endif /* !CONFIG_PPC_64K_PAGES */
151#define slice_set_user_psize(mm, psize) do { BUG(); } while(0)
152#endif /* !CONFIG_PPC_STD_MMU_64 */
153
144#define slice_set_range_psize(mm, start, len, psize) \ 154#define slice_set_range_psize(mm, start, len, psize) \
145 slice_set_user_psize((mm), (psize)) 155 slice_set_user_psize((mm), (psize))
146#define slice_mm_new_context(mm) 1 156#define slice_mm_new_context(mm) 1
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 8cd083c61503..7254c5a3187c 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -5,11 +5,6 @@
5 * the ppc64 hashed page table. 5 * the ppc64 hashed page table.
6 */ 6 */
7 7
8#ifndef __ASSEMBLY__
9#include <linux/stddef.h>
10#include <asm/tlbflush.h>
11#endif /* __ASSEMBLY__ */
12
13#ifdef CONFIG_PPC_64K_PAGES 8#ifdef CONFIG_PPC_64K_PAGES
14#include <asm/pgtable-ppc64-64k.h> 9#include <asm/pgtable-ppc64-64k.h>
15#else 10#else
@@ -38,26 +33,46 @@
38#endif 33#endif
39 34
40/* 35/*
41 * Define the address range of the vmalloc VM area. 36 * Define the address range of the kernel non-linear virtual area
37 */
38
39#ifdef CONFIG_PPC_BOOK3E
40#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
41#else
42#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
43#endif
44#define KERN_VIRT_SIZE PGTABLE_RANGE
45
46/*
47 * The vmalloc space starts at the beginning of that region, and
48 * occupies half of it on hash CPUs and a quarter of it on Book3E
42 */ 49 */
43#define VMALLOC_START ASM_CONST(0xD000000000000000) 50#define VMALLOC_START KERN_VIRT_START
44#define VMALLOC_SIZE (PGTABLE_RANGE >> 1) 51#ifdef CONFIG_PPC_BOOK3E
45#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 52#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
53#else
54#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
55#endif
56#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
46 57
47/* 58/*
48 * Define the address ranges for MMIO and IO space : 59 * The second half of the kernel virtual space is used for IO mappings,
60 * it's itself carved into the PIO region (ISA and PHB IO space) and
61 * the ioremap space
49 * 62 *
50 * ISA_IO_BASE = VMALLOC_END, 64K reserved area 63 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
51 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces 64 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
52 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE 65 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
53 */ 66 */
67#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
54#define FULL_IO_SIZE 0x80000000ul 68#define FULL_IO_SIZE 0x80000000ul
55#define ISA_IO_BASE (VMALLOC_END) 69#define ISA_IO_BASE (KERN_IO_START)
56#define ISA_IO_END (VMALLOC_END + 0x10000ul) 70#define ISA_IO_END (KERN_IO_START + 0x10000ul)
57#define PHB_IO_BASE (ISA_IO_END) 71#define PHB_IO_BASE (ISA_IO_END)
58#define PHB_IO_END (VMALLOC_END + FULL_IO_SIZE) 72#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
59#define IOREMAP_BASE (PHB_IO_END) 73#define IOREMAP_BASE (PHB_IO_END)
60#define IOREMAP_END (VMALLOC_START + PGTABLE_RANGE) 74#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
75
61 76
62/* 77/*
63 * Region IDs 78 * Region IDs
@@ -72,19 +87,28 @@
72#define USER_REGION_ID (0UL) 87#define USER_REGION_ID (0UL)
73 88
74/* 89/*
75 * Defines the address of the vmemap area, in its own region 90 * Defines the address of the vmemap area, in its own region on
91 * hash table CPUs and after the vmalloc space on Book3E
76 */ 92 */
93#ifdef CONFIG_PPC_BOOK3E
94#define VMEMMAP_BASE VMALLOC_END
95#define VMEMMAP_END KERN_IO_START
96#else
77#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT) 97#define VMEMMAP_BASE (VMEMMAP_REGION_ID << REGION_SHIFT)
98#endif
78#define vmemmap ((struct page *)VMEMMAP_BASE) 99#define vmemmap ((struct page *)VMEMMAP_BASE)
79 100
80 101
81/* 102/*
82 * Include the PTE bits definitions 103 * Include the PTE bits definitions
83 */ 104 */
105#ifdef CONFIG_PPC_BOOK3S
84#include <asm/pte-hash64.h> 106#include <asm/pte-hash64.h>
107#else
108#include <asm/pte-book3e.h>
109#endif
85#include <asm/pte-common.h> 110#include <asm/pte-common.h>
86 111
87
88#ifdef CONFIG_PPC_MM_SLICES 112#ifdef CONFIG_PPC_MM_SLICES
89#define HAVE_ARCH_UNMAPPED_AREA 113#define HAVE_ARCH_UNMAPPED_AREA
90#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 114#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -92,6 +116,9 @@
92 116
93#ifndef __ASSEMBLY__ 117#ifndef __ASSEMBLY__
94 118
119#include <linux/stddef.h>
120#include <asm/tlbflush.h>
121
95/* 122/*
96 * This is the default implementation of various PTE accessors, it's 123 * This is the default implementation of various PTE accessors, it's
97 * used in all cases except Book3S with 64K pages where we have a 124 * used in all cases except Book3S with 64K pages where we have a
diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h
new file mode 100644
index 000000000000..1d27c77d7704
--- /dev/null
+++ b/arch/powerpc/include/asm/pte-book3e.h
@@ -0,0 +1,70 @@
1#ifndef _ASM_POWERPC_PTE_BOOK3E_H
2#define _ASM_POWERPC_PTE_BOOK3E_H
3#ifdef __KERNEL__
4
5/* PTE bit definitions for processors compliant to the Book3E
6 * architecture 2.06 or later. The position of the PTE bits
7 * matches the HW definition of the optional Embedded Page Table
8 * category.
9 */
10
11/* Architected bits */
12#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */
13#define _PAGE_FILE 0x000002 /* (!present only) software: pte holds file offset */
14#define _PAGE_SW1 0x000002
15#define _PAGE_BAP_SR 0x000004
16#define _PAGE_BAP_UR 0x000008
17#define _PAGE_BAP_SW 0x000010
18#define _PAGE_BAP_UW 0x000020
19#define _PAGE_BAP_SX 0x000040
20#define _PAGE_BAP_UX 0x000080
21#define _PAGE_PSIZE_MSK 0x000f00
22#define _PAGE_PSIZE_4K 0x000200
23#define _PAGE_PSIZE_64K 0x000600
24#define _PAGE_PSIZE_1M 0x000a00
25#define _PAGE_PSIZE_16M 0x000e00
26#define _PAGE_DIRTY 0x001000 /* C: page changed */
27#define _PAGE_SW0 0x002000
28#define _PAGE_U3 0x004000
29#define _PAGE_U2 0x008000
30#define _PAGE_U1 0x010000
31#define _PAGE_U0 0x020000
32#define _PAGE_ACCESSED 0x040000
33#define _PAGE_LENDIAN 0x080000
34#define _PAGE_GUARDED 0x100000
35#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */
36#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */
37#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */
38
39/* "Higher level" linux bit combinations */
40#define _PAGE_EXEC _PAGE_BAP_SX /* Can be executed from potentially */
41#define _PAGE_HWEXEC _PAGE_BAP_UX /* .. and was cache cleaned */
42#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
43#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
44#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
45#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */
46
47#define _PAGE_HASHPTE 0
48#define _PAGE_BUSY 0
49
50#define _PAGE_SPECIAL _PAGE_SW0
51
52/* Flags to be preserved on PTE modifications */
53#define _PAGE_HPTEFLAGS _PAGE_BUSY
54
55/* Base page size */
56#ifdef CONFIG_PPC_64K_PAGES
57#define _PAGE_PSIZE _PAGE_PSIZE_64K
58#define PTE_RPN_SHIFT (28)
59#else
60#define _PAGE_PSIZE _PAGE_PSIZE_4K
61#define PTE_RPN_SHIFT (24)
62#endif
63
64/* On 32-bit, we never clear the top part of the PTE */
65#ifdef CONFIG_PPC32
66#define _PTE_NONE_MASK 0xffffffff00000000ULL
67#endif
68
69#endif /* __KERNEL__ */
70#endif /* _ASM_POWERPC_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index a7e210b6b48c..8bb6464ba619 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -34,6 +34,9 @@
34#ifndef _PAGE_4K_PFN 34#ifndef _PAGE_4K_PFN
35#define _PAGE_4K_PFN 0 35#define _PAGE_4K_PFN 0
36#endif 36#endif
37#ifndef _PAGE_SAO
38#define _PAGE_SAO 0
39#endif
37#ifndef _PAGE_PSIZE 40#ifndef _PAGE_PSIZE
38#define _PAGE_PSIZE 0 41#define _PAGE_PSIZE 0
39#endif 42#endif
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 561b64652311..0a9f30b54952 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -52,9 +52,11 @@
52#include <linux/kvm_host.h> 52#include <linux/kvm_host.h>
53#endif 53#endif
54 54
55#ifdef CONFIG_PPC32
55#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 56#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
56#include "head_booke.h" 57#include "head_booke.h"
57#endif 58#endif
59#endif
58 60
59#if defined(CONFIG_FSL_BOOKE) 61#if defined(CONFIG_FSL_BOOKE)
60#include "../mm/mmu_decl.h" 62#include "../mm/mmu_decl.h"
@@ -260,6 +262,7 @@ int main(void)
260 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); 262 DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
261#endif /* CONFIG_PPC64 */ 263#endif /* CONFIG_PPC64 */
262 264
265#if defined(CONFIG_PPC32)
263#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 266#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
264 DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); 267 DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
265 DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); 268 DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
@@ -278,7 +281,7 @@ int main(void)
278 DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); 281 DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
279 DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit)); 282 DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
280#endif 283#endif
281 284#endif
282 DEFINE(CLONE_VM, CLONE_VM); 285 DEFINE(CLONE_VM, CLONE_VM);
283 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); 286 DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
284 287
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index c46ef2ffa3d9..90df6ffe3a43 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -57,8 +57,10 @@ unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
57#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize]) 57#define HUGEPTE_CACHE_NAME(psize) (huge_pgtable_cache_name[psize])
58 58
59static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = { 59static const char *huge_pgtable_cache_name[MMU_PAGE_COUNT] = {
60 "unused_4K", "hugepte_cache_64K", "unused_64K_AP", 60 [MMU_PAGE_64K] = "hugepte_cache_64K",
61 "hugepte_cache_1M", "hugepte_cache_16M", "hugepte_cache_16G" 61 [MMU_PAGE_1M] = "hugepte_cache_1M",
62 [MMU_PAGE_16M] = "hugepte_cache_16M",
63 [MMU_PAGE_16G] = "hugepte_cache_16G",
62}; 64};
63 65
64/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() 66/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
@@ -700,6 +702,8 @@ static void __init set_huge_psize(int psize)
700 if (mmu_huge_psizes[psize] || 702 if (mmu_huge_psizes[psize] ||
701 mmu_psize_defs[psize].shift == PAGE_SHIFT) 703 mmu_psize_defs[psize].shift == PAGE_SHIFT)
702 return; 704 return;
705 if (WARN_ON(HUGEPTE_CACHE_NAME(psize) == NULL))
706 return;
703 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT); 707 hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
704 708
705 switch (mmu_psize_defs[psize].shift) { 709 switch (mmu_psize_defs[psize].shift) {