aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-09-09 22:52:57 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-09-17 02:31:53 -0400
commit78f1dbde9fd020419313c2a0c3b602ea2427118f (patch)
tree9a5c34555b6fba6809ec33798e1aa53ce1ce0f53 /arch/powerpc/include
parentf033d659c3b931d8b2a16625155e20304e173c9f (diff)
powerpc/mm: Make some of the PGTABLE_RANGE dependency explicit
slice array size and slice mask size depend on PGTABLE_RANGE. Reviewed-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include')
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h15
-rw-r--r--arch/powerpc/include/asm/mmu.h9
-rw-r--r--arch/powerpc/include/asm/page_64.h12
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h17
-rw-r--r--arch/powerpc/include/asm/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/tlbflush.h3
6 files changed, 31 insertions, 35 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 3e887467a6d..9673f73eb8d 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -16,6 +16,13 @@
16#include <asm/page.h> 16#include <asm/page.h>
17 17
18/* 18/*
19 * This is necessary to get the definition of PGTABLE_RANGE which we
20 * need for various slices related matters. Note that this isn't the
21 * complete pgtable.h but only a portion of it.
22 */
23#include <asm/pgtable-ppc64.h>
24
25/*
19 * Segment table 26 * Segment table
20 */ 27 */
21 28
@@ -414,6 +421,8 @@ extern void slb_set_size(u16 size);
414 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \ 421 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
415 add rt,rt,rx 422 add rt,rt,rx
416 423
424/* 4 bits per slice and we have one slice per 1TB */
425#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
417 426
418#ifndef __ASSEMBLY__ 427#ifndef __ASSEMBLY__
419 428
@@ -458,11 +467,7 @@ typedef struct {
458 467
459#ifdef CONFIG_PPC_MM_SLICES 468#ifdef CONFIG_PPC_MM_SLICES
460 u64 low_slices_psize; /* SLB page size encodings */ 469 u64 low_slices_psize; /* SLB page size encodings */
461 /* 470 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
462 * Right now we support 64TB and 4 bits for each
463 * 1TB slice we need 32 bytes for 64TB.
464 */
465 unsigned char high_slices_psize[32]; /* 4 bits per slice for now */
466#else 471#else
467 u16 sllp; /* SLB page size encoding */ 472 u16 sllp; /* SLB page size encoding */
468#endif 473#endif
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index e8a26db2e8f..5e38eedea21 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -146,6 +146,15 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
146extern u64 ppc64_rma_size; 146extern u64 ppc64_rma_size;
147#endif /* CONFIG_PPC64 */ 147#endif /* CONFIG_PPC64 */
148 148
149struct mm_struct;
150#ifdef CONFIG_DEBUG_VM
151extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
152#else /* CONFIG_DEBUG_VM */
153static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
154{
155}
156#endif /* !CONFIG_DEBUG_VM */
157
149#endif /* !__ASSEMBLY__ */ 158#endif /* !__ASSEMBLY__ */
150 159
151/* The kernel use the constants below to index in the page sizes array. 160/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 6c9bef4cb6a..cd915d6b093 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -78,14 +78,18 @@ extern u64 ppc64_pft_size;
78#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT) 78#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
79#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT) 79#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
80 80
81/*
82 * 1 bit per slice and we have one slice per 1TB
83 * Right now we support only 64TB.
84 * IF we change this we will have to change the type
85 * of high_slices
86 */
87#define SLICE_MASK_SIZE 8
88
81#ifndef __ASSEMBLY__ 89#ifndef __ASSEMBLY__
82 90
83struct slice_mask { 91struct slice_mask {
84 u16 low_slices; 92 u16 low_slices;
85 /*
86 * This should be derived out of PGTABLE_RANGE. For the current
87 * max 64TB, u64 should be ok.
88 */
89 u64 high_slices; 93 u64 high_slices;
90}; 94};
91 95
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 8af1cf27fd4..0182c203e41 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -21,17 +21,6 @@
21#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 21#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
22 22
23 23
24/* Some sanity checking */
25#if TASK_SIZE_USER64 > PGTABLE_RANGE
26#error TASK_SIZE_USER64 exceeds pagetable range
27#endif
28
29#ifdef CONFIG_PPC_STD_MMU_64
30#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
31#error TASK_SIZE_USER64 exceeds user VSID range
32#endif
33#endif
34
35/* 24/*
36 * Define the address range of the kernel non-linear virtual area 25 * Define the address range of the kernel non-linear virtual area
37 */ 26 */
@@ -117,9 +106,6 @@
117 106
118#ifndef __ASSEMBLY__ 107#ifndef __ASSEMBLY__
119 108
120#include <linux/stddef.h>
121#include <asm/tlbflush.h>
122
123/* 109/*
124 * This is the default implementation of various PTE accessors, it's 110 * This is the default implementation of various PTE accessors, it's
125 * used in all cases except Book3S with 64K pages where we have a 111 * used in all cases except Book3S with 64K pages where we have a
@@ -198,7 +184,8 @@
198/* to find an entry in a kernel page-table-directory */ 184/* to find an entry in a kernel page-table-directory */
199/* This now only contains the vmalloc pages */ 185/* This now only contains the vmalloc pages */
200#define pgd_offset_k(address) pgd_offset(&init_mm, address) 186#define pgd_offset_k(address) pgd_offset(&init_mm, address)
201 187extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
188 pte_t *ptep, unsigned long pte, int huge);
202 189
203/* Atomic PTE updates */ 190/* Atomic PTE updates */
204static inline unsigned long pte_update(struct mm_struct *mm, 191static inline unsigned long pte_update(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 2e0e4110f7a..a9cbd3ba5c3 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -9,14 +9,6 @@
9 9
10struct mm_struct; 10struct mm_struct;
11 11
12#ifdef CONFIG_DEBUG_VM
13extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
14#else /* CONFIG_DEBUG_VM */
15static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
16{
17}
18#endif /* !CONFIG_DEBUG_VM */
19
20#endif /* !__ASSEMBLY__ */ 12#endif /* !__ASSEMBLY__ */
21 13
22#if defined(CONFIG_PPC64) 14#if defined(CONFIG_PPC64)
@@ -27,6 +19,8 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
27 19
28#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
29 21
22#include <asm/tlbflush.h>
23
30/* Generic accessors to PTE bits */ 24/* Generic accessors to PTE bits */
31static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 25static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
32static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 26static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index fc02d1dee95..61a59271665 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -103,9 +103,6 @@ DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
103 103
104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 104extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
105 105
106extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
107 pte_t *ptep, unsigned long pte, int huge);
108
109#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE 106#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
110 107
111static inline void arch_enter_lazy_mmu_mode(void) 108static inline void arch_enter_lazy_mmu_mode(void)