aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/mem.c24
-rw-r--r--arch/powerpc/mm/pgtable_32.c5
3 files changed, 12 insertions, 20 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 1bb20d841080..8c77c791f87e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -1014,7 +1014,6 @@ repeat:
1014 1014
1015 /* Primary is full, try the secondary */ 1015 /* Primary is full, try the secondary */
1016 if (unlikely(slot == -1)) { 1016 if (unlikely(slot == -1)) {
1017 new_pte |= _PAGE_F_SECOND;
1018 hpte_group = ((~hash & htab_hash_mask) * 1017 hpte_group = ((~hash & htab_hash_mask) *
1019 HPTES_PER_GROUP) & ~0x7UL; 1018 HPTES_PER_GROUP) & ~0x7UL;
1020 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 1019 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
@@ -1033,7 +1032,7 @@ repeat:
1033 if (unlikely(slot == -2)) 1032 if (unlikely(slot == -2))
1034 panic("hash_huge_page: pte_insert failed\n"); 1033 panic("hash_huge_page: pte_insert failed\n");
1035 1034
1036 new_pte |= (slot << 12) & _PAGE_F_GIX; 1035 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
1037 } 1036 }
1038 1037
1039 /* 1038 /*
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d1c0758c5611..77b4637097e9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,10 +61,6 @@ unsigned long memory_limit;
61extern void hash_preload(struct mm_struct *mm, unsigned long ea, 61extern void hash_preload(struct mm_struct *mm, unsigned long ea,
62 unsigned long access, unsigned long trap); 62 unsigned long access, unsigned long trap);
63 63
64/*
65 * This is called by /dev/mem to know if a given address has to
66 * be mapped non-cacheable or not
67 */
68int page_is_ram(unsigned long pfn) 64int page_is_ram(unsigned long pfn)
69{ 65{
70 unsigned long paddr = (pfn << PAGE_SHIFT); 66 unsigned long paddr = (pfn << PAGE_SHIFT);
@@ -490,19 +486,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
490 !cpu_has_feature(CPU_FTR_NOEXECUTE) && 486 !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
491 pfn_valid(pfn)) { 487 pfn_valid(pfn)) {
492 struct page *page = pfn_to_page(pfn); 488 struct page *page = pfn_to_page(pfn);
489#ifdef CONFIG_8xx
490 /* On 8xx, cache control instructions (particularly
491 * "dcbst" from flush_dcache_icache) fault as write
492 * operation if there is an unpopulated TLB entry
493 * for the address in question. To workaround that,
494 * we invalidate the TLB here, thus avoiding dcbst
495 * misbehaviour.
496 */
497 _tlbie(address);
498#endif
493 if (!PageReserved(page) 499 if (!PageReserved(page)
494 && !test_bit(PG_arch_1, &page->flags)) { 500 && !test_bit(PG_arch_1, &page->flags)) {
495 if (vma->vm_mm == current->active_mm) { 501 if (vma->vm_mm == current->active_mm) {
496#ifdef CONFIG_8xx
497 /* On 8xx, cache control instructions (particularly
498 * "dcbst" from flush_dcache_icache) fault as write
499 * operation if there is an unpopulated TLB entry
500 * for the address in question. To workaround that,
501 * we invalidate the TLB here, thus avoiding dcbst
502 * misbehaviour.
503 */
504 _tlbie(address);
505#endif
506 __flush_dcache_icache((void *) address); 502 __flush_dcache_icache((void *) address);
507 } else 503 } else
508 flush_dcache_icache_page(page); 504 flush_dcache_icache_page(page);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 1891dbeeb8e9..bd02272bcb0f 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -294,11 +294,8 @@ void __init mapin_ram(void)
294 } 294 }
295} 295}
296 296
297/* is x a power of 2? */
298#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
299
300/* is x a power of 4? */ 297/* is x a power of 4? */
301#define is_power_of_4(x) ((x) != 0 && (((x) & (x-1)) == 0) && (ffs(x) & 1)) 298#define is_power_of_4(x) is_power_of_2(x) && (ffs(x) & 1))
302 299
303/* 300/*
304 * Set up a mapping for a block of I/O. 301 * Set up a mapping for a block of I/O.