aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/hugetlbpage.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/hugetlbpage.c')
-rw-r--r--arch/powerpc/mm/hugetlbpage.c168
1 files changed, 2 insertions, 166 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a7161c07886d..1bf065546fa1 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -7,29 +7,17 @@
7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> 7 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 */ 8 */
9 9
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/io.h>
13#include <linux/hugetlb.h> 12#include <linux/hugetlb.h>
14#include <linux/pagemap.h> 13#include <asm/pgtable.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/sysctl.h>
18#include <asm/mman.h>
19#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
20#include <asm/tlb.h> 15#include <asm/tlb.h>
21#include <asm/tlbflush.h>
22#include <asm/mmu_context.h>
23#include <asm/machdep.h>
24#include <asm/cputable.h>
25#include <asm/spu.h>
26 16
27#define PAGE_SHIFT_64K 16 17#define PAGE_SHIFT_64K 16
28#define PAGE_SHIFT_16M 24 18#define PAGE_SHIFT_16M 24
29#define PAGE_SHIFT_16G 34 19#define PAGE_SHIFT_16G 34
30 20
31#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
32#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
33#define MAX_NUMBER_GPAGES 1024 21#define MAX_NUMBER_GPAGES 1024
34 22
35/* Tracks the 16G pages after the device tree is scanned and before the 23/* Tracks the 16G pages after the device tree is scanned and before the
@@ -502,158 +490,6 @@ unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
502 return 1UL << mmu_psize_to_shift(psize); 490 return 1UL << mmu_psize_to_shift(psize);
503} 491}
504 492
505/*
506 * Called by asm hashtable.S for doing lazy icache flush
507 */
508static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
509 pte_t pte, int trap, unsigned long sz)
510{
511 struct page *page;
512 int i;
513
514 if (!pfn_valid(pte_pfn(pte)))
515 return rflags;
516
517 page = pte_page(pte);
518
519 /* page is dirty */
520 if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
521 if (trap == 0x400) {
522 for (i = 0; i < (sz / PAGE_SIZE); i++)
523 __flush_dcache_icache(page_address(page+i));
524 set_bit(PG_arch_1, &page->flags);
525 } else {
526 rflags |= HPTE_R_N;
527 }
528 }
529 return rflags;
530}
531
532int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
533 pte_t *ptep, unsigned long trap, int local, int ssize,
534 unsigned int shift, unsigned int mmu_psize)
535{
536 unsigned long old_pte, new_pte;
537 unsigned long va, rflags, pa, sz;
538 long slot;
539 int err = 1;
540
541 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
542
543 /* Search the Linux page table for a match with va */
544 va = hpt_va(ea, vsid, ssize);
545
546 /*
547 * Check the user's access rights to the page. If access should be
548 * prevented then send the problem up to do_page_fault.
549 */
550 if (unlikely(access & ~pte_val(*ptep)))
551 goto out;
552 /*
553 * At this point, we have a pte (old_pte) which can be used to build
554 * or update an HPTE. There are 2 cases:
555 *
556 * 1. There is a valid (present) pte with no associated HPTE (this is
557 * the most common case)
558 * 2. There is a valid (present) pte with an associated HPTE. The
559 * current values of the pp bits in the HPTE prevent access
560 * because we are doing software DIRTY bit management and the
561 * page is currently not DIRTY.
562 */
563
564
565 do {
566 old_pte = pte_val(*ptep);
567 if (old_pte & _PAGE_BUSY)
568 goto out;
569 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
570 } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
571 old_pte, new_pte));
572
573 rflags = 0x2 | (!(new_pte & _PAGE_RW));
574 /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
575 rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
576 sz = ((1UL) << shift);
577 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
578 /* No CPU has hugepages but lacks no execute, so we
579 * don't need to worry about that case */
580 rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
581 trap, sz);
582
583 /* Check if pte already has an hpte (case 2) */
584 if (unlikely(old_pte & _PAGE_HASHPTE)) {
585 /* There MIGHT be an HPTE for this pte */
586 unsigned long hash, slot;
587
588 hash = hpt_hash(va, shift, ssize);
589 if (old_pte & _PAGE_F_SECOND)
590 hash = ~hash;
591 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
592 slot += (old_pte & _PAGE_F_GIX) >> 12;
593
594 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
595 ssize, local) == -1)
596 old_pte &= ~_PAGE_HPTEFLAGS;
597 }
598
599 if (likely(!(old_pte & _PAGE_HASHPTE))) {
600 unsigned long hash = hpt_hash(va, shift, ssize);
601 unsigned long hpte_group;
602
603 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
604
605repeat:
606 hpte_group = ((hash & htab_hash_mask) *
607 HPTES_PER_GROUP) & ~0x7UL;
608
609 /* clear HPTE slot informations in new PTE */
610#ifdef CONFIG_PPC_64K_PAGES
611 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
612#else
613 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
614#endif
615 /* Add in WIMG bits */
616 rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
617 _PAGE_COHERENT | _PAGE_GUARDED));
618
619 /* Insert into the hash table, primary slot */
620 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
621 mmu_psize, ssize);
622
623 /* Primary is full, try the secondary */
624 if (unlikely(slot == -1)) {
625 hpte_group = ((~hash & htab_hash_mask) *
626 HPTES_PER_GROUP) & ~0x7UL;
627 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
628 HPTE_V_SECONDARY,
629 mmu_psize, ssize);
630 if (slot == -1) {
631 if (mftb() & 0x1)
632 hpte_group = ((hash & htab_hash_mask) *
633 HPTES_PER_GROUP)&~0x7UL;
634
635 ppc_md.hpte_remove(hpte_group);
636 goto repeat;
637 }
638 }
639
640 if (unlikely(slot == -2))
641 panic("hash_huge_page: pte_insert failed\n");
642
643 new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
644 }
645
646 /*
647 * No need to use ldarx/stdcx here
648 */
649 *ptep = __pte(new_pte & ~_PAGE_BUSY);
650
651 err = 0;
652
653 out:
654 return err;
655}
656
657static int __init add_huge_page_size(unsigned long long size) 493static int __init add_huge_page_size(unsigned long long size)
658{ 494{
659 int shift = __ffs(size); 495 int shift = __ffs(size);