aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c20
-rw-r--r--arch/powerpc/mm/hash_low_64.S24
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/numa.c10
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S6
6 files changed, 42 insertions, 22 deletions
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 757c0bed9a91..b42f76c4948d 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,
399#endif 399#endif
400} 400}
401EXPORT_SYMBOL(__dma_sync_page); 401EXPORT_SYMBOL(__dma_sync_page);
402
403/*
404 * Return the PFN for a given cpu virtual address returned by
405 * __dma_alloc_coherent. This is used by dma_mmap_coherent()
406 */
407unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
408{
409 /* This should always be populated, so we don't test every
410 * level. If that fails, we'll have a nice crash which
411 * will be as good as a BUG_ON()
412 */
413 pgd_t *pgd = pgd_offset_k(cpu_addr);
414 pud_t *pud = pud_offset(pgd, cpu_addr);
415 pmd_t *pmd = pmd_offset(pud, cpu_addr);
416 pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
417
418 if (pte_none(*ptep) || !pte_present(*ptep))
419 return 0;
420 return pte_pfn(*ptep);
421}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 3079f6b44cf5..5b7dd4ea02b5 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -192,8 +192,8 @@ htab_insert_pte:
192 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ 192 rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
193 193
194 /* Call ppc_md.hpte_insert */ 194 /* Call ppc_md.hpte_insert */
195 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ 195 ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
196 mr r4,r29 /* Retreive va */ 196 mr r4,r29 /* Retrieve va */
197 li r7,0 /* !bolted, !secondary */ 197 li r7,0 /* !bolted, !secondary */
198 li r8,MMU_PAGE_4K /* page size */ 198 li r8,MMU_PAGE_4K /* page size */
199 ld r9,STK_PARM(r9)(r1) /* segment size */ 199 ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -215,8 +215,8 @@ _GLOBAL(htab_call_hpte_insert1)
215 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 215 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
216 216
217 /* Call ppc_md.hpte_insert */ 217 /* Call ppc_md.hpte_insert */
218 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ 218 ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
219 mr r4,r29 /* Retreive va */ 219 mr r4,r29 /* Retrieve va */
220 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 220 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
221 li r8,MMU_PAGE_4K /* page size */ 221 li r8,MMU_PAGE_4K /* page size */
222 ld r9,STK_PARM(r9)(r1) /* segment size */ 222 ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -495,8 +495,8 @@ htab_special_pfn:
495 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 495 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
496 496
497 /* Call ppc_md.hpte_insert */ 497 /* Call ppc_md.hpte_insert */
498 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ 498 ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
499 mr r4,r29 /* Retreive va */ 499 mr r4,r29 /* Retrieve va */
500 li r7,0 /* !bolted, !secondary */ 500 li r7,0 /* !bolted, !secondary */
501 li r8,MMU_PAGE_4K /* page size */ 501 li r8,MMU_PAGE_4K /* page size */
502 ld r9,STK_PARM(r9)(r1) /* segment size */ 502 ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -522,8 +522,8 @@ _GLOBAL(htab_call_hpte_insert1)
522 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 522 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
523 523
524 /* Call ppc_md.hpte_insert */ 524 /* Call ppc_md.hpte_insert */
525 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ 525 ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
526 mr r4,r29 /* Retreive va */ 526 mr r4,r29 /* Retrieve va */
527 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 527 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
528 li r8,MMU_PAGE_4K /* page size */ 528 li r8,MMU_PAGE_4K /* page size */
529 ld r9,STK_PARM(r9)(r1) /* segment size */ 529 ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -813,8 +813,8 @@ ht64_insert_pte:
813 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ 813 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
814 814
815 /* Call ppc_md.hpte_insert */ 815 /* Call ppc_md.hpte_insert */
816 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ 816 ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
817 mr r4,r29 /* Retreive va */ 817 mr r4,r29 /* Retrieve va */
818 li r7,0 /* !bolted, !secondary */ 818 li r7,0 /* !bolted, !secondary */
819 li r8,MMU_PAGE_64K 819 li r8,MMU_PAGE_64K
820 ld r9,STK_PARM(r9)(r1) /* segment size */ 820 ld r9,STK_PARM(r9)(r1) /* segment size */
@@ -836,8 +836,8 @@ _GLOBAL(ht64_call_hpte_insert1)
836 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ 836 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
837 837
838 /* Call ppc_md.hpte_insert */ 838 /* Call ppc_md.hpte_insert */
839 ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ 839 ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
840 mr r4,r29 /* Retreive va */ 840 mr r4,r29 /* Retrieve va */
841 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 841 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
842 li r8,MMU_PAGE_64K 842 li r8,MMU_PAGE_64K
843 ld r9,STK_PARM(r9)(r1) /* segment size */ 843 ld r9,STK_PARM(r9)(r1) /* segment size */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index a5991facddce..58a022d0f463 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -753,7 +753,7 @@ void __cpuinit early_init_mmu_secondary(void)
753 mtspr(SPRN_SDR1, _SDR1); 753 mtspr(SPRN_SDR1, _SDR1);
754 754
755 /* Initialize STAB/SLB. We use a virtual address as it works 755 /* Initialize STAB/SLB. We use a virtual address as it works
756 * in real mode on pSeries and we want a virutal address on 756 * in real mode on pSeries and we want a virtual address on
757 * iSeries anyway 757 * iSeries anyway
758 */ 758 */
759 if (cpu_has_feature(CPU_FTR_SLB)) 759 if (cpu_has_feature(CPU_FTR_SLB))
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index a66499650909..57e545b84bf1 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -424,7 +424,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
424 clear_page(page); 424 clear_page(page);
425 425
426 /* 426 /*
427 * We shouldnt have to do this, but some versions of glibc 427 * We shouldn't have to do this, but some versions of glibc
428 * require it (ld.so assumes zero filled pages are icache clean) 428 * require it (ld.so assumes zero filled pages are icache clean)
429 * - Anton 429 * - Anton
430 */ 430 */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0dc95c0aa3be..5ec1dad2a19d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -440,11 +440,11 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
440} 440}
441 441
442/* 442/*
443 * Retreive and validate the ibm,dynamic-memory property of the device tree. 443 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
444 * 444 *
445 * The layout of the ibm,dynamic-memory property is a number N of memblock 445 * The layout of the ibm,dynamic-memory property is a number N of memblock
446 * list entries followed by N memblock list entries. Each memblock list entry 446 * list entries followed by N memblock list entries. Each memblock list entry
447 * contains information as layed out in the of_drconf_cell struct above. 447 * contains information as laid out in the of_drconf_cell struct above.
448 */ 448 */
449static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) 449static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
450{ 450{
@@ -468,7 +468,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
468} 468}
469 469
470/* 470/*
471 * Retreive and validate the ibm,lmb-size property for drconf memory 471 * Retrieve and validate the ibm,lmb-size property for drconf memory
472 * from the device tree. 472 * from the device tree.
473 */ 473 */
474static u64 of_get_lmb_size(struct device_node *memory) 474static u64 of_get_lmb_size(struct device_node *memory)
@@ -490,7 +490,7 @@ struct assoc_arrays {
490}; 490};
491 491
492/* 492/*
493 * Retreive and validate the list of associativity arrays for drconf 493 * Retrieve and validate the list of associativity arrays for drconf
494 * memory from the ibm,associativity-lookup-arrays property of the 494 * memory from the ibm,associativity-lookup-arrays property of the
495 * device tree.. 495 * device tree..
496 * 496 *
@@ -604,7 +604,7 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
604 * Returns the size the region should have to enforce the memory limit. 604 * Returns the size the region should have to enforce the memory limit.
605 * This will either be the original value of size, a truncated value, 605 * This will either be the original value of size, a truncated value,
606 * or zero. If the returned value of size is 0 the region should be 606 * or zero. If the returned value of size is 0 the region should be
607 * discarded as it lies wholy above the memory limit. 607 * discarded as it lies wholly above the memory limit.
608 */ 608 */
609static unsigned long __init numa_enforce_memory_limit(unsigned long start, 609static unsigned long __init numa_enforce_memory_limit(unsigned long start,
610 unsigned long size) 610 unsigned long size)
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 8526bd9d2aa3..af0892209417 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -192,7 +192,7 @@ normal_tlb_miss:
192 or r10,r15,r14 192 or r10,r15,r14
193 193
194BEGIN_MMU_FTR_SECTION 194BEGIN_MMU_FTR_SECTION
195 /* Set the TLB reservation and seach for existing entry. Then load 195 /* Set the TLB reservation and search for existing entry. Then load
196 * the entry. 196 * the entry.
197 */ 197 */
198 PPC_TLBSRX_DOT(0,r16) 198 PPC_TLBSRX_DOT(0,r16)
@@ -425,13 +425,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
425 425
426virt_page_table_tlb_miss_fault: 426virt_page_table_tlb_miss_fault:
427 /* If we fault here, things are a little bit tricky. We need to call 427 /* If we fault here, things are a little bit tricky. We need to call
428 * either data or instruction store fault, and we need to retreive 428 * either data or instruction store fault, and we need to retrieve
429 * the original fault address and ESR (for data). 429 * the original fault address and ESR (for data).
430 * 430 *
431 * The thing is, we know that in normal circumstances, this is 431 * The thing is, we know that in normal circumstances, this is
432 * always called as a second level tlb miss for SW load or as a first 432 * always called as a second level tlb miss for SW load or as a first
433 * level TLB miss for HW load, so we should be able to peek at the 433 * level TLB miss for HW load, so we should be able to peek at the
434 * relevant informations in the first exception frame in the PACA. 434 * relevant information in the first exception frame in the PACA.
435 * 435 *
436 * However, we do need to double check that, because we may just hit 436 * However, we do need to double check that, because we may just hit
437 * a stray kernel pointer or a userland attack trying to hit those 437 * a stray kernel pointer or a userland attack trying to hit those