diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-01 17:28:35 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-12-01 17:28:35 -0500 |
commit | 5a7b4193e564d1611ecf1cd859aed60d5612d78f (patch) | |
tree | 8831669121df3d50845718b848d7c6e4bc51be26 /arch/powerpc | |
parent | 86f9e097f340fd0fbd37afe92bd5453f5a84cbca (diff) |
Revert "powerpc/mm: Fix bug in pagetable cache cleanup with CONFIG_PPC_SUBPAGE_PROT"
This reverts commit c045256d146800ea1d741a8e9e377dada6b7e195.
It breaks build when CONFIG_PPC_SUBPAGE_PROT is not set. I will
commit a fixed version separately
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 35 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgalloc-64.h | 5 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pte-hash64-64k.h | 37 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_hash64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/subpage-prot.c | 15 |
6 files changed, 49 insertions, 51 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 9d9551840f4a..7514ec2f8540 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -373,38 +373,6 @@ extern void slb_set_size(u16 size); | |||
373 | 373 | ||
374 | #ifndef __ASSEMBLY__ | 374 | #ifndef __ASSEMBLY__ |
375 | 375 | ||
376 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
377 | /* | ||
378 | * For the sub-page protection option, we extend the PGD with one of | ||
379 | * these. Basically we have a 3-level tree, with the top level being | ||
380 | * the protptrs array. To optimize speed and memory consumption when | ||
381 | * only addresses < 4GB are being protected, pointers to the first | ||
382 | * four pages of sub-page protection words are stored in the low_prot | ||
383 | * array. | ||
384 | * Each page of sub-page protection words protects 1GB (4 bytes | ||
385 | * protects 64k). For the 3-level tree, each page of pointers then | ||
386 | * protects 8TB. | ||
387 | */ | ||
388 | struct subpage_prot_table { | ||
389 | unsigned long maxaddr; /* only addresses < this are protected */ | ||
390 | unsigned int **protptrs[2]; | ||
391 | unsigned int *low_prot[4]; | ||
392 | }; | ||
393 | |||
394 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | ||
395 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | ||
396 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | ||
397 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | ||
398 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | ||
399 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | ||
400 | |||
401 | extern void subpage_prot_free(struct mm_struct *mm); | ||
402 | extern void subpage_prot_init_new_context(struct mm_struct *mm); | ||
403 | #else | ||
404 | static inline void subpage_prot_free(pgd_t *pgd) {} | ||
405 | static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } | ||
406 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
407 | |||
408 | typedef unsigned long mm_context_id_t; | 376 | typedef unsigned long mm_context_id_t; |
409 | 377 | ||
410 | typedef struct { | 378 | typedef struct { |
@@ -418,9 +386,6 @@ typedef struct { | |||
418 | u16 sllp; /* SLB page size encoding */ | 386 | u16 sllp; /* SLB page size encoding */ |
419 | #endif | 387 | #endif |
420 | unsigned long vdso_base; | 388 | unsigned long vdso_base; |
421 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
422 | struct subpage_prot_table spt; | ||
423 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
424 | } mm_context_t; | 389 | } mm_context_t; |
425 | 390 | ||
426 | 391 | ||
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 605f5c5398d1..5c1cd73dafa8 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -28,6 +28,10 @@ | |||
28 | */ | 28 | */ |
29 | #define MAX_PGTABLE_INDEX_SIZE 0xf | 29 | #define MAX_PGTABLE_INDEX_SIZE 0xf |
30 | 30 | ||
31 | #ifndef CONFIG_PPC_SUBPAGE_PROT | ||
32 | static inline void subpage_prot_free(pgd_t *pgd) {} | ||
33 | #endif | ||
34 | |||
31 | extern struct kmem_cache *pgtable_cache[]; | 35 | extern struct kmem_cache *pgtable_cache[]; |
32 | #define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) | 36 | #define PGT_CACHE(shift) (pgtable_cache[(shift)-1]) |
33 | 37 | ||
@@ -38,6 +42,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
38 | 42 | ||
39 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 43 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
40 | { | 44 | { |
45 | subpage_prot_free(pgd); | ||
41 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); | 46 | kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); |
42 | } | 47 | } |
43 | 48 | ||
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index c4490f9c67c4..82b72207c51c 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
@@ -76,4 +76,41 @@ | |||
76 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ | 76 | remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ |
77 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) | 77 | __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) |
78 | 78 | ||
79 | |||
80 | #ifdef CONFIG_PPC_SUBPAGE_PROT | ||
81 | /* | ||
82 | * For the sub-page protection option, we extend the PGD with one of | ||
83 | * these. Basically we have a 3-level tree, with the top level being | ||
84 | * the protptrs array. To optimize speed and memory consumption when | ||
85 | * only addresses < 4GB are being protected, pointers to the first | ||
86 | * four pages of sub-page protection words are stored in the low_prot | ||
87 | * array. | ||
88 | * Each page of sub-page protection words protects 1GB (4 bytes | ||
89 | * protects 64k). For the 3-level tree, each page of pointers then | ||
90 | * protects 8TB. | ||
91 | */ | ||
92 | struct subpage_prot_table { | ||
93 | unsigned long maxaddr; /* only addresses < this are protected */ | ||
94 | unsigned int **protptrs[2]; | ||
95 | unsigned int *low_prot[4]; | ||
96 | }; | ||
97 | |||
98 | #undef PGD_TABLE_SIZE | ||
99 | #define PGD_TABLE_SIZE ((sizeof(pgd_t) << PGD_INDEX_SIZE) + \ | ||
100 | sizeof(struct subpage_prot_table)) | ||
101 | |||
102 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | ||
103 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | ||
104 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | ||
105 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | ||
106 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | ||
107 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | ||
108 | |||
109 | extern void subpage_prot_free(pgd_t *pgd); | ||
110 | |||
111 | static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) | ||
112 | { | ||
113 | return (struct subpage_prot_table *)(pgd + PTRS_PER_PGD); | ||
114 | } | ||
115 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | ||
79 | #endif /* __ASSEMBLY__ */ | 116 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 50f867d657df..6810128aba30 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -835,9 +835,9 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) | |||
835 | * Result is 0: full permissions, _PAGE_RW: read-only, | 835 | * Result is 0: full permissions, _PAGE_RW: read-only, |
836 | * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. | 836 | * _PAGE_USER or _PAGE_USER|_PAGE_RW: no access. |
837 | */ | 837 | */ |
838 | static int subpage_protection(struct mm_struct *mm, unsigned long ea) | 838 | static int subpage_protection(pgd_t *pgdir, unsigned long ea) |
839 | { | 839 | { |
840 | struct subpage_prot_table *spt = &mm->context.spt; | 840 | struct subpage_prot_table *spt = pgd_subpage_prot(pgdir); |
841 | u32 spp = 0; | 841 | u32 spp = 0; |
842 | u32 **sbpm, *sbpp; | 842 | u32 **sbpm, *sbpp; |
843 | 843 | ||
@@ -865,7 +865,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea) | |||
865 | } | 865 | } |
866 | 866 | ||
867 | #else /* CONFIG_PPC_SUBPAGE_PROT */ | 867 | #else /* CONFIG_PPC_SUBPAGE_PROT */ |
868 | static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) | 868 | static inline int subpage_protection(pgd_t *pgdir, unsigned long ea) |
869 | { | 869 | { |
870 | return 0; | 870 | return 0; |
871 | } | 871 | } |
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index b910d37aea1a..b9e4cc2c2057 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
@@ -76,7 +76,6 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
76 | */ | 76 | */ |
77 | if (slice_mm_new_context(mm)) | 77 | if (slice_mm_new_context(mm)) |
78 | slice_set_user_psize(mm, mmu_virtual_psize); | 78 | slice_set_user_psize(mm, mmu_virtual_psize); |
79 | subpage_prot_init_new_context(mm); | ||
80 | mm->context.id = index; | 79 | mm->context.id = index; |
81 | 80 | ||
82 | return 0; | 81 | return 0; |
@@ -93,6 +92,5 @@ EXPORT_SYMBOL_GPL(__destroy_context); | |||
93 | void destroy_context(struct mm_struct *mm) | 92 | void destroy_context(struct mm_struct *mm) |
94 | { | 93 | { |
95 | __destroy_context(mm->context.id); | 94 | __destroy_context(mm->context.id); |
96 | subpage_prot_free(mm); | ||
97 | mm->context.id = NO_CONTEXT; | 95 | mm->context.id = NO_CONTEXT; |
98 | } | 96 | } |
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index a040b81e93bd..4cafc0c33d0a 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c | |||
@@ -24,9 +24,9 @@ | |||
24 | * Also makes sure that the subpage_prot_table structure is | 24 | * Also makes sure that the subpage_prot_table structure is |
25 | * reinitialized for the next user. | 25 | * reinitialized for the next user. |
26 | */ | 26 | */ |
27 | void subpage_prot_free(struct mm_struct *mm) | 27 | void subpage_prot_free(pgd_t *pgd) |
28 | { | 28 | { |
29 | struct subpage_prot_table *spt = &mm->context.spt; | 29 | struct subpage_prot_table *spt = pgd_subpage_prot(pgd); |
30 | unsigned long i, j, addr; | 30 | unsigned long i, j, addr; |
31 | u32 **p; | 31 | u32 **p; |
32 | 32 | ||
@@ -51,13 +51,6 @@ void subpage_prot_free(struct mm_struct *mm) | |||
51 | spt->maxaddr = 0; | 51 | spt->maxaddr = 0; |
52 | } | 52 | } |
53 | 53 | ||
54 | void subpage_prot_init_new_context(struct mm_struct *mm) | ||
55 | { | ||
56 | struct subpage_prot_table *spt = &mm->context.spt; | ||
57 | |||
58 | memset(spt, 0, sizeof(*spt)); | ||
59 | } | ||
60 | |||
61 | static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, | 54 | static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, |
62 | int npages) | 55 | int npages) |
63 | { | 56 | { |
@@ -94,7 +87,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr, | |||
94 | static void subpage_prot_clear(unsigned long addr, unsigned long len) | 87 | static void subpage_prot_clear(unsigned long addr, unsigned long len) |
95 | { | 88 | { |
96 | struct mm_struct *mm = current->mm; | 89 | struct mm_struct *mm = current->mm; |
97 | struct subpage_prot_table *spt = &mm->context.spt; | 90 | struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); |
98 | u32 **spm, *spp; | 91 | u32 **spm, *spp; |
99 | int i, nw; | 92 | int i, nw; |
100 | unsigned long next, limit; | 93 | unsigned long next, limit; |
@@ -143,7 +136,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len) | |||
143 | long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) | 136 | long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map) |
144 | { | 137 | { |
145 | struct mm_struct *mm = current->mm; | 138 | struct mm_struct *mm = current->mm; |
146 | struct subpage_prot_table *spt = &mm->context.spt; | 139 | struct subpage_prot_table *spt = pgd_subpage_prot(mm->pgd); |
147 | u32 **spm, *spp; | 140 | u32 **spm, *spp; |
148 | int i, nw; | 141 | int i, nw; |
149 | unsigned long next, limit; | 142 | unsigned long next, limit; |