aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_low_32.S86
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c1
3 files changed, 73 insertions, 18 deletions
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index c41d658176ac..7bffb70b9fe2 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -75,7 +75,7 @@ _GLOBAL(hash_page_sync)
75 * Returns to the caller if the access is illegal or there is no 75 * Returns to the caller if the access is illegal or there is no
76 * mapping for the address. Otherwise it places an appropriate PTE 76 * mapping for the address. Otherwise it places an appropriate PTE
77 * in the hash table and returns from the exception. 77 * in the hash table and returns from the exception.
78 * Uses r0, r3 - r8, ctr, lr. 78 * Uses r0, r3 - r8, r10, ctr, lr.
79 */ 79 */
80 .text 80 .text
81_GLOBAL(hash_page) 81_GLOBAL(hash_page)
@@ -106,9 +106,15 @@ _GLOBAL(hash_page)
106 addi r5,r5,swapper_pg_dir@l /* kernel page table */ 106 addi r5,r5,swapper_pg_dir@l /* kernel page table */
107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
108112: add r5,r5,r7 /* convert to phys addr */ 108112: add r5,r5,r7 /* convert to phys addr */
109#ifndef CONFIG_PTE_64BIT
109 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ 110 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
110 lwz r8,0(r5) /* get pmd entry */ 111 lwz r8,0(r5) /* get pmd entry */
111 rlwinm. r8,r8,0,0,19 /* extract address of pte page */ 112 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
113#else
114 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
115 lwzx r8,r8,r5 /* Get L1 entry */
116 rlwinm. r8,r8,0,0,20 /* extract pt base address */
117#endif
112#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
113 beq- hash_page_out /* return if no mapping */ 119 beq- hash_page_out /* return if no mapping */
114#else 120#else
@@ -118,7 +124,11 @@ _GLOBAL(hash_page)
118 to the address following the rfi. */ 124 to the address following the rfi. */
119 beqlr- 125 beqlr-
120#endif 126#endif
127#ifndef CONFIG_PTE_64BIT
121 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ 128 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
129#else
130 rlwimi r8,r4,23,20,28 /* compute pte address */
131#endif
122 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ 132 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
123 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE 133 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
124 134
@@ -127,9 +137,15 @@ _GLOBAL(hash_page)
127 * because almost always, there won't be a permission violation 137 * because almost always, there won't be a permission violation
128 * and there won't already be an HPTE, and thus we will have 138 * and there won't already be an HPTE, and thus we will have
129 * to update the PTE to set _PAGE_HASHPTE. -- paulus. 139 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
140 *
141 * If PTE_64BIT is set, the low word is the flags word; use that
142 * word for locking since it contains all the interesting bits.
130 */ 143 */
144#if (PTE_FLAGS_OFFSET != 0)
145 addi r8,r8,PTE_FLAGS_OFFSET
146#endif
131retry: 147retry:
132 lwarx r6,0,r8 /* get linux-style pte */ 148 lwarx r6,0,r8 /* get linux-style pte, flag word */
133 andc. r5,r3,r6 /* check access & ~permission */ 149 andc. r5,r3,r6 /* check access & ~permission */
134#ifdef CONFIG_SMP 150#ifdef CONFIG_SMP
135 bne- hash_page_out /* return if access not permitted */ 151 bne- hash_page_out /* return if access not permitted */
@@ -137,6 +153,15 @@ retry:
137 bnelr- 153 bnelr-
138#endif 154#endif
139 or r5,r0,r6 /* set accessed/dirty bits */ 155 or r5,r0,r6 /* set accessed/dirty bits */
156#ifdef CONFIG_PTE_64BIT
157#ifdef CONFIG_SMP
158 subf r10,r6,r8 /* create false data dependency */
159 subi r10,r10,PTE_FLAGS_OFFSET
160 lwzx r10,r6,r10 /* Get upper PTE word */
161#else
162 lwz r10,-PTE_FLAGS_OFFSET(r8)
163#endif /* CONFIG_SMP */
164#endif /* CONFIG_PTE_64BIT */
140 stwcx. r5,0,r8 /* attempt to update PTE */ 165 stwcx. r5,0,r8 /* attempt to update PTE */
141 bne- retry /* retry if someone got there first */ 166 bne- retry /* retry if someone got there first */
142 167
@@ -203,9 +228,9 @@ _GLOBAL(add_hash_page)
203 * we can't take a hash table miss (assuming the code is 228 * we can't take a hash table miss (assuming the code is
204 * covered by a BAT). -- paulus 229 * covered by a BAT). -- paulus
205 */ 230 */
206 mfmsr r10 231 mfmsr r9
207 SYNC 232 SYNC
208 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 233 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
209 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 234 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
210 mtmsr r0 235 mtmsr r0
211 SYNC_601 236 SYNC_601
@@ -214,14 +239,14 @@ _GLOBAL(add_hash_page)
214 tophys(r7,0) 239 tophys(r7,0)
215 240
216#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
217 addis r9,r7,mmu_hash_lock@ha 242 addis r6,r7,mmu_hash_lock@ha
218 addi r9,r9,mmu_hash_lock@l 243 addi r6,r6,mmu_hash_lock@l
21910: lwarx r0,0,r9 /* take the mmu_hash_lock */ 24410: lwarx r0,0,r6 /* take the mmu_hash_lock */
220 cmpi 0,r0,0 245 cmpi 0,r0,0
221 bne- 11f 246 bne- 11f
222 stwcx. r8,0,r9 247 stwcx. r8,0,r6
223 beq+ 12f 248 beq+ 12f
22411: lwz r0,0(r9) 24911: lwz r0,0(r6)
225 cmpi 0,r0,0 250 cmpi 0,r0,0
226 beq 10b 251 beq 10b
227 b 11b 252 b 11b
@@ -234,10 +259,24 @@ _GLOBAL(add_hash_page)
234 * HPTE, so we just unlock and return. 259 * HPTE, so we just unlock and return.
235 */ 260 */
236 mr r8,r5 261 mr r8,r5
262#ifndef CONFIG_PTE_64BIT
237 rlwimi r8,r4,22,20,29 263 rlwimi r8,r4,22,20,29
264#else
265 rlwimi r8,r4,23,20,28
266 addi r8,r8,PTE_FLAGS_OFFSET
267#endif
2381: lwarx r6,0,r8 2681: lwarx r6,0,r8
239 andi. r0,r6,_PAGE_HASHPTE 269 andi. r0,r6,_PAGE_HASHPTE
240 bne 9f /* if HASHPTE already set, done */ 270 bne 9f /* if HASHPTE already set, done */
271#ifdef CONFIG_PTE_64BIT
272#ifdef CONFIG_SMP
273 subf r10,r6,r8 /* create false data dependency */
274 subi r10,r10,PTE_FLAGS_OFFSET
275 lwzx r10,r6,r10 /* Get upper PTE word */
276#else
277 lwz r10,-PTE_FLAGS_OFFSET(r8)
278#endif /* CONFIG_SMP */
279#endif /* CONFIG_PTE_64BIT */
241 ori r5,r6,_PAGE_HASHPTE 280 ori r5,r6,_PAGE_HASHPTE
242 stwcx. r5,0,r8 281 stwcx. r5,0,r8
243 bne- 1b 282 bne- 1b
@@ -246,13 +285,15 @@ _GLOBAL(add_hash_page)
246 285
2479: 2869:
248#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
288 addis r6,r7,mmu_hash_lock@ha
289 addi r6,r6,mmu_hash_lock@l
249 eieio 290 eieio
250 li r0,0 291 li r0,0
251 stw r0,0(r9) /* clear mmu_hash_lock */ 292 stw r0,0(r6) /* clear mmu_hash_lock */
252#endif 293#endif
253 294
254 /* reenable interrupts and DR */ 295 /* reenable interrupts and DR */
255 mtmsr r10 296 mtmsr r9
256 SYNC_601 297 SYNC_601
257 isync 298 isync
258 299
@@ -267,7 +308,8 @@ _GLOBAL(add_hash_page)
267 * r5 contains the linux PTE, r6 contains the old value of the 308 * r5 contains the linux PTE, r6 contains the old value of the
268 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the 309 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
269 * offset to be added to addresses (0 if the MMU is on, 310 * offset to be added to addresses (0 if the MMU is on,
270 * -KERNELBASE if it is off). 311 * -KERNELBASE if it is off). r10 contains the upper half of
312 * the PTE if CONFIG_PTE_64BIT.
271 * On SMP, the caller should have the mmu_hash_lock held. 313 * On SMP, the caller should have the mmu_hash_lock held.
272 * We assume that the caller has (or will) set the _PAGE_HASHPTE 314 * We assume that the caller has (or will) set the _PAGE_HASHPTE
273 * bit in the linux PTE in memory. The value passed in r6 should 315 * bit in the linux PTE in memory. The value passed in r6 should
@@ -313,6 +355,11 @@ _GLOBAL(create_hpte)
313BEGIN_FTR_SECTION 355BEGIN_FTR_SECTION
314 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ 356 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
315END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) 357END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
358#ifdef CONFIG_PTE_64BIT
359 /* Put the XPN bits into the PTE */
360 rlwimi r8,r10,8,20,22
361 rlwimi r8,r10,2,29,29
362#endif
316 363
317 /* Construct the high word of the PPC-style PTE (r5) */ 364 /* Construct the high word of the PPC-style PTE (r5) */
318 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 365 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
@@ -499,14 +546,18 @@ _GLOBAL(flush_hash_pages)
499 isync 546 isync
500 547
501 /* First find a PTE in the range that has _PAGE_HASHPTE set */ 548 /* First find a PTE in the range that has _PAGE_HASHPTE set */
549#ifndef CONFIG_PTE_64BIT
502 rlwimi r5,r4,22,20,29 550 rlwimi r5,r4,22,20,29
5031: lwz r0,0(r5) 551#else
552 rlwimi r5,r4,23,20,28
553#endif
5541: lwz r0,PTE_FLAGS_OFFSET(r5)
504 cmpwi cr1,r6,1 555 cmpwi cr1,r6,1
505 andi. r0,r0,_PAGE_HASHPTE 556 andi. r0,r0,_PAGE_HASHPTE
506 bne 2f 557 bne 2f
507 ble cr1,19f 558 ble cr1,19f
508 addi r4,r4,0x1000 559 addi r4,r4,0x1000
509 addi r5,r5,4 560 addi r5,r5,PTE_SIZE
510 addi r6,r6,-1 561 addi r6,r6,-1
511 b 1b 562 b 1b
512 563
@@ -545,7 +596,10 @@ _GLOBAL(flush_hash_pages)
545 * already clear, we're done (for this pte). If not, 596 * already clear, we're done (for this pte). If not,
546 * clear it (atomically) and proceed. -- paulus. 597 * clear it (atomically) and proceed. -- paulus.
547 */ 598 */
54833: lwarx r8,0,r5 /* fetch the pte */ 599#if (PTE_FLAGS_OFFSET != 0)
600 addi r5,r5,PTE_FLAGS_OFFSET
601#endif
60233: lwarx r8,0,r5 /* fetch the pte flags word */
549 andi. r0,r8,_PAGE_HASHPTE 603 andi. r0,r8,_PAGE_HASHPTE
550 beq 8f /* done if HASHPTE is already clear */ 604 beq 8f /* done if HASHPTE is already clear */
551 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ 605 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
@@ -590,7 +644,7 @@ _GLOBAL(flush_hash_patch_B)
590 644
5918: ble cr1,9f /* if all ptes checked */ 6458: ble cr1,9f /* if all ptes checked */
59281: addi r6,r6,-1 64681: addi r6,r6,-1
593 addi r5,r5,4 /* advance to next pte */ 647 addi r5,r5,PTE_SIZE
594 addi r4,r4,0x1000 648 addi r4,r4,0x1000
595 lwz r0,0(r5) /* check next pte */ 649 lwz r0,0(r5) /* check next pte */
596 cmpwi cr1,r6,1 650 cmpwi cr1,r6,1
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 2001abdb1912..c31d6d26f0b5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -73,7 +73,7 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
73#endif /* HAVE_TLBCAM */ 73#endif /* HAVE_TLBCAM */
74 74
75#ifdef CONFIG_PTE_64BIT 75#ifdef CONFIG_PTE_64BIT
76/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */ 76/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
77#define PGDIR_ORDER 1 77#define PGDIR_ORDER 1
78#else 78#else
79#define PGDIR_ORDER 0 79#define PGDIR_ORDER 0
@@ -288,7 +288,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
288} 288}
289 289
290/* 290/*
291 * Map in all of physical memory starting at KERNELBASE. 291 * Map in a big chunk of physical memory starting at KERNELBASE.
292 */ 292 */
293void __init mapin_ram(void) 293void __init mapin_ram(void)
294{ 294{
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index eb4b512d65fa..f9a47fee3927 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -45,6 +45,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
45 flush_hash_pages(mm->context.id, addr, ptephys, 1); 45 flush_hash_pages(mm->context.id, addr, ptephys, 1);
46 } 46 }
47} 47}
48EXPORT_SYMBOL(flush_hash_entry);
48 49
49/* 50/*
50 * Called by ptep_set_access_flags, must flush on CPUs for which the 51 * Called by ptep_set_access_flags, must flush on CPUs for which the