aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-10-28 21:05:18 -0400
committerPaul Mackerras <paulus@samba.org>2007-10-28 23:34:14 -0400
commitf6ab0b922c3423b88c0e6e3e2c5fc1e58d83055d (patch)
treefd1eedbaed6a5d837f319a6ba9010fbba80560da /arch
parent2a397e82c7db18019e408f953dd58dc1963a328c (diff)
[POWERPC] powerpc: Fix demotion of segments to 4K pages
When demoting a process to use 4K HW pages (instead of 64K), which happens under various circumstances such as doing cache inhibited mappings on machines that do not support 64K CI pages, the assembly hash code calls back into the C function flush_hash_page(). This function prototype was recently changed to accomodate for 1T segments but the assembly call site was not updated, causing applications that do demotion to hang. In addition, when updating the per-CPU PACA for the new sizes, we didn't properly update the slice "map", thus causing the SLB miss code to re-insert segments for the wrong size. This fixes both and adds a warning comment next to the C implementation to try to avoid problems next time someone changes it. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/mm/hash_low_64.S5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
2 files changed, 7 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index ad253b959030..e935edd6b72b 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -331,7 +331,7 @@ htab_pte_insert_failure:
331 *****************************************************************************/ 331 *****************************************************************************/
332 332
333/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, 333/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
334 * pte_t *ptep, unsigned long trap, int local) 334 * pte_t *ptep, unsigned long trap, int local, int ssize)
335 */ 335 */
336 336
337/* 337/*
@@ -557,7 +557,8 @@ htab_inval_old_hpte:
557 mr r4,r31 /* PTE.pte */ 557 mr r4,r31 /* PTE.pte */
558 li r5,0 /* PTE.hidx */ 558 li r5,0 /* PTE.hidx */
559 li r6,MMU_PAGE_64K /* psize */ 559 li r6,MMU_PAGE_64K /* psize */
560 ld r7,STK_PARM(r8)(r1) /* local */ 560 ld r7,STK_PARM(r9)(r1) /* ssize */
561 ld r8,STK_PARM(r8)(r1) /* local */
561 bl .flush_hash_page 562 bl .flush_hash_page
562 b htab_insert_pte 563 b htab_insert_pte
563 564
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index c78dc912411f..c5a603fdb22d 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -791,8 +791,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
791 } 791 }
792 if (user_region) { 792 if (user_region) {
793 if (psize != get_paca()->context.user_psize) { 793 if (psize != get_paca()->context.user_psize) {
794 get_paca()->context.user_psize = 794 get_paca()->context = mm->context;
795 mm->context.user_psize;
796 slb_flush_and_rebolt(); 795 slb_flush_and_rebolt();
797 } 796 }
798 } else if (get_paca()->vmalloc_sllp != 797 } else if (get_paca()->vmalloc_sllp !=
@@ -885,6 +884,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
885 local_irq_restore(flags); 884 local_irq_restore(flags);
886} 885}
887 886
887/* WARNING: This is called from hash_low_64.S, if you change this prototype,
888 * do not forget to update the assembly call site !
889 */
888void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, 890void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
889 int local) 891 int local)
890{ 892{