diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2013-11-18 04:28:10 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-12-08 19:40:28 -0500 |
commit | c8c06f5a0dde0fed260c54d550962187f266ed0d (patch) | |
tree | f9fb794ebe4f9ebf0b32a8aabe53c99f275d7cc8 /arch/powerpc/mm | |
parent | 92c08a0d522c7e62c01a63e42597f0c2b02c4245 (diff) |
powerpc/mm: Free up _PAGE_COHERENCE for numa fault use later
Set memory coherence always on hash64 config. If
a platform cannot have memory coherence always set they
can infer that from _PAGE_NO_CACHE and _PAGE_WRITETHRU
like in lpar. So we dont' really need a separate bit
for tracking _PAGE_COHERENCE.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/hugepage-hash64.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage-hash64.c | 4 |
4 files changed, 25 insertions, 7 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index d3cbda62857b..1136d26a95ae 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -148,7 +148,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
148 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | 148 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ |
149 | andc r0,r30,r0 /* r0 = pte & ~r0 */ | 149 | andc r0,r30,r0 /* r0 = pte & ~r0 */ |
150 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | 150 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ |
151 | ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ | 151 | /* |
152 | * Always add "C" bit for perf. Memory coherence is always enabled | ||
153 | */ | ||
154 | ori r3,r3,HPTE_R_C | HPTE_R_M | ||
152 | 155 | ||
153 | /* We eventually do the icache sync here (maybe inline that | 156 | /* We eventually do the icache sync here (maybe inline that |
154 | * code rather than call a C function...) | 157 | * code rather than call a C function...) |
@@ -457,7 +460,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
457 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | 460 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ |
458 | andc r0,r3,r0 /* r0 = pte & ~r0 */ | 461 | andc r0,r3,r0 /* r0 = pte & ~r0 */ |
459 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | 462 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ |
460 | ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ | 463 | /* |
464 | * Always add "C" bit for perf. Memory coherence is always enabled | ||
465 | */ | ||
466 | ori r3,r3,HPTE_R_C | HPTE_R_M | ||
461 | 467 | ||
462 | /* We eventually do the icache sync here (maybe inline that | 468 | /* We eventually do the icache sync here (maybe inline that |
463 | * code rather than call a C function...) | 469 | * code rather than call a C function...) |
@@ -795,7 +801,10 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | |||
795 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ | 801 | and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/ |
796 | andc r0,r30,r0 /* r0 = pte & ~r0 */ | 802 | andc r0,r30,r0 /* r0 = pte & ~r0 */ |
797 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ | 803 | rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */ |
798 | ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */ | 804 | /* |
805 | * Always add "C" bit for perf. Memory coherence is always enabled | ||
806 | */ | ||
807 | ori r3,r3,HPTE_R_C | HPTE_R_M | ||
799 | 808 | ||
800 | /* We eventually do the icache sync here (maybe inline that | 809 | /* We eventually do the icache sync here (maybe inline that |
801 | * code rather than call a C function...) | 810 | * code rather than call a C function...) |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 6176b3cdf579..de6881259aef 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -169,9 +169,10 @@ static unsigned long htab_convert_pte_flags(unsigned long pteflags) | |||
169 | if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && | 169 | if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) && |
170 | (pteflags & _PAGE_DIRTY))) | 170 | (pteflags & _PAGE_DIRTY))) |
171 | rflags |= 1; | 171 | rflags |= 1; |
172 | 172 | /* | |
173 | /* Always add C */ | 173 | * Always add "C" bit for perf. Memory coherence is always enabled |
174 | return rflags | HPTE_R_C; | 174 | */ |
175 | return rflags | HPTE_R_C | HPTE_R_M; | ||
175 | } | 176 | } |
176 | 177 | ||
177 | int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | 178 | int htab_bolt_mapping(unsigned long vstart, unsigned long vend, |
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index 34de9e0cdc34..826893fcb3a7 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c | |||
@@ -127,7 +127,11 @@ repeat: | |||
127 | 127 | ||
128 | /* Add in WIMG bits */ | 128 | /* Add in WIMG bits */ |
129 | rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | | 129 | rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | |
130 | _PAGE_COHERENT | _PAGE_GUARDED)); | 130 | _PAGE_GUARDED)); |
131 | /* | ||
132 | * enable the memory coherence always | ||
133 | */ | ||
134 | rflags |= HPTE_R_M; | ||
131 | 135 | ||
132 | /* Insert into the hash table, primary slot */ | 136 | /* Insert into the hash table, primary slot */ |
133 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, | 137 | slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0, |
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c index 0b7fb6761015..a5bcf9301196 100644 --- a/arch/powerpc/mm/hugetlbpage-hash64.c +++ b/arch/powerpc/mm/hugetlbpage-hash64.c | |||
@@ -99,6 +99,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, | |||
99 | /* Add in WIMG bits */ | 99 | /* Add in WIMG bits */ |
100 | rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | | 100 | rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | |
101 | _PAGE_COHERENT | _PAGE_GUARDED)); | 101 | _PAGE_COHERENT | _PAGE_GUARDED)); |
102 | /* | ||
103 | * enable the memory coherence always | ||
104 | */ | ||
105 | rflags |= HPTE_R_M; | ||
102 | 106 | ||
103 | slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, | 107 | slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, |
104 | mmu_psize, ssize); | 108 | mmu_psize, ssize); |