diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-11-30 22:36:53 -0500 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-12-13 23:19:14 -0500 |
commit | e34aa03ca48d0c7982530436ce996f374b65913c (patch) | |
tree | 6d66b8a7067eea95aebabb1d4770d66d0cda6049 /arch/powerpc/include/asm/nohash | |
parent | 26a344aea48c99cfd80d292a470a480e1c2bd5d9 (diff) |
powerpc/mm: Move THP headers around
We support THP only with book3s_64 and 64K page size. Move
THP details to hash64-64k.h to clarify the same.
Acked-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/include/asm/nohash')
-rw-r--r-- | arch/powerpc/include/asm/nohash/64/pgtable.h | 253 |
1 files changed, 5 insertions, 248 deletions
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h index c24e03f22655..d635a924d652 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h | |||
@@ -154,6 +154,11 @@ static inline void pmd_clear(pmd_t *pmdp) | |||
154 | *pmdp = __pmd(0); | 154 | *pmdp = __pmd(0); |
155 | } | 155 | } |
156 | 156 | ||
157 | static inline pte_t pmd_pte(pmd_t pmd) | ||
158 | { | ||
159 | return __pte(pmd_val(pmd)); | ||
160 | } | ||
161 | |||
157 | #define pmd_none(pmd) (!pmd_val(pmd)) | 162 | #define pmd_none(pmd) (!pmd_val(pmd)) |
158 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ | 163 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ |
159 | || (pmd_val(pmd) & PMD_BAD_BITS)) | 164 | || (pmd_val(pmd) & PMD_BAD_BITS)) |
@@ -389,252 +394,4 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); | |||
389 | void pgtable_cache_init(void); | 394 | void pgtable_cache_init(void); |
390 | #endif /* __ASSEMBLY__ */ | 395 | #endif /* __ASSEMBLY__ */ |
391 | 396 | ||
392 | /* | ||
393 | * THP pages can't be special. So use the _PAGE_SPECIAL | ||
394 | */ | ||
395 | #define _PAGE_SPLITTING _PAGE_SPECIAL | ||
396 | |||
397 | /* | ||
398 | * We need to differentiate between explicit huge page and THP huge | ||
399 | * page, since THP huge page also need to track real subpage details | ||
400 | */ | ||
401 | #define _PAGE_THP_HUGE _PAGE_4K_PFN | ||
402 | |||
403 | /* | ||
404 | * set of bits not changed in pmd_modify. | ||
405 | */ | ||
406 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | \ | ||
407 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \ | ||
408 | _PAGE_THP_HUGE) | ||
409 | |||
410 | #ifndef __ASSEMBLY__ | ||
411 | /* | ||
412 | * The linux hugepage PMD now include the pmd entries followed by the address | ||
413 | * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits. | ||
414 | * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per | ||
415 | * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and | ||
416 | * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t. | ||
417 | * | ||
418 | * The last three bits are intentionally left to zero. This memory location | ||
419 | * are also used as normal page PTE pointers. So if we have any pointers | ||
420 | * left around while we collapse a hugepage, we need to make sure | ||
421 | * _PAGE_PRESENT bit of that is zero when we look at them | ||
422 | */ | ||
423 | static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) | ||
424 | { | ||
425 | return (hpte_slot_array[index] >> 3) & 0x1; | ||
426 | } | ||
427 | |||
428 | static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array, | ||
429 | int index) | ||
430 | { | ||
431 | return hpte_slot_array[index] >> 4; | ||
432 | } | ||
433 | |||
434 | static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, | ||
435 | unsigned int index, unsigned int hidx) | ||
436 | { | ||
437 | hpte_slot_array[index] = hidx << 4 | 0x1 << 3; | ||
438 | } | ||
439 | |||
440 | struct page *realmode_pfn_to_page(unsigned long pfn); | ||
441 | |||
442 | static inline char *get_hpte_slot_array(pmd_t *pmdp) | ||
443 | { | ||
444 | /* | ||
445 | * The hpte hindex is stored in the pgtable whose address is in the | ||
446 | * second half of the PMD | ||
447 | * | ||
448 | * Order this load with the test for pmd_trans_huge in the caller | ||
449 | */ | ||
450 | smp_rmb(); | ||
451 | return *(char **)(pmdp + PTRS_PER_PMD); | ||
452 | |||
453 | |||
454 | } | ||
455 | |||
456 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
457 | extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | ||
458 | pmd_t *pmdp, unsigned long old_pmd); | ||
459 | extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); | ||
460 | extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); | ||
461 | extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); | ||
462 | extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, | ||
463 | pmd_t *pmdp, pmd_t pmd); | ||
464 | extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | ||
465 | pmd_t *pmd); | ||
466 | /* | ||
467 | * | ||
468 | * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs | ||
469 | * page. The hugetlbfs page table walking and mangling paths are totally | ||
470 | * separated form the core VM paths and they're differentiated by | ||
471 | * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run. | ||
472 | * | ||
473 | * pmd_trans_huge() is defined as false at build time if | ||
474 | * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build | ||
475 | * time in such case. | ||
476 | * | ||
477 | * For ppc64 we need to differntiate from explicit hugepages from THP, because | ||
478 | * for THP we also track the subpage details at the pmd level. We don't do | ||
479 | * that for explicit huge pages. | ||
480 | * | ||
481 | */ | ||
482 | static inline int pmd_trans_huge(pmd_t pmd) | ||
483 | { | ||
484 | /* | ||
485 | * leaf pte for huge page, bottom two bits != 00 | ||
486 | */ | ||
487 | return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); | ||
488 | } | ||
489 | |||
490 | static inline int pmd_trans_splitting(pmd_t pmd) | ||
491 | { | ||
492 | if (pmd_trans_huge(pmd)) | ||
493 | return pmd_val(pmd) & _PAGE_SPLITTING; | ||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | extern int has_transparent_hugepage(void); | ||
498 | #else | ||
499 | static inline void hpte_do_hugepage_flush(struct mm_struct *mm, | ||
500 | unsigned long addr, pmd_t *pmdp, | ||
501 | unsigned long old_pmd) | ||
502 | { | ||
503 | |||
504 | WARN(1, "%s called with THP disabled\n", __func__); | ||
505 | } | ||
506 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | ||
507 | |||
508 | static inline int pmd_large(pmd_t pmd) | ||
509 | { | ||
510 | /* | ||
511 | * leaf pte for huge page, bottom two bits != 00 | ||
512 | */ | ||
513 | return ((pmd_val(pmd) & 0x3) != 0x0); | ||
514 | } | ||
515 | |||
516 | static inline pte_t pmd_pte(pmd_t pmd) | ||
517 | { | ||
518 | return __pte(pmd_val(pmd)); | ||
519 | } | ||
520 | |||
521 | static inline pmd_t pte_pmd(pte_t pte) | ||
522 | { | ||
523 | return __pmd(pte_val(pte)); | ||
524 | } | ||
525 | |||
526 | static inline pte_t *pmdp_ptep(pmd_t *pmd) | ||
527 | { | ||
528 | return (pte_t *)pmd; | ||
529 | } | ||
530 | |||
531 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | ||
532 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | ||
533 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | ||
534 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | ||
535 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | ||
536 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | ||
537 | #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) | ||
538 | #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) | ||
539 | |||
540 | #define __HAVE_ARCH_PMD_WRITE | ||
541 | #define pmd_write(pmd) pte_write(pmd_pte(pmd)) | ||
542 | |||
543 | static inline pmd_t pmd_mkhuge(pmd_t pmd) | ||
544 | { | ||
545 | /* Do nothing, mk_pmd() does this part. */ | ||
546 | return pmd; | ||
547 | } | ||
548 | |||
549 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | ||
550 | { | ||
551 | return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); | ||
552 | } | ||
553 | |||
554 | static inline pmd_t pmd_mksplitting(pmd_t pmd) | ||
555 | { | ||
556 | return __pmd(pmd_val(pmd) | _PAGE_SPLITTING); | ||
557 | } | ||
558 | |||
559 | #define __HAVE_ARCH_PMD_SAME | ||
560 | static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | ||
561 | { | ||
562 | return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0); | ||
563 | } | ||
564 | |||
565 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | ||
566 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | ||
567 | unsigned long address, pmd_t *pmdp, | ||
568 | pmd_t entry, int dirty); | ||
569 | |||
570 | extern unsigned long pmd_hugepage_update(struct mm_struct *mm, | ||
571 | unsigned long addr, | ||
572 | pmd_t *pmdp, | ||
573 | unsigned long clr, | ||
574 | unsigned long set); | ||
575 | |||
576 | static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, | ||
577 | unsigned long addr, pmd_t *pmdp) | ||
578 | { | ||
579 | unsigned long old; | ||
580 | |||
581 | if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | ||
582 | return 0; | ||
583 | old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); | ||
584 | return ((old & _PAGE_ACCESSED) != 0); | ||
585 | } | ||
586 | |||
587 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | ||
588 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | ||
589 | unsigned long address, pmd_t *pmdp); | ||
590 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | ||
591 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | ||
592 | unsigned long address, pmd_t *pmdp); | ||
593 | |||
594 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR | ||
595 | extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | ||
596 | unsigned long addr, pmd_t *pmdp); | ||
597 | |||
598 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT | ||
599 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, | ||
600 | pmd_t *pmdp) | ||
601 | { | ||
602 | |||
603 | if ((pmd_val(*pmdp) & _PAGE_RW) == 0) | ||
604 | return; | ||
605 | |||
606 | pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); | ||
607 | } | ||
608 | |||
609 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | ||
610 | extern void pmdp_splitting_flush(struct vm_area_struct *vma, | ||
611 | unsigned long address, pmd_t *pmdp); | ||
612 | |||
613 | extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, | ||
614 | unsigned long address, pmd_t *pmdp); | ||
615 | #define pmdp_collapse_flush pmdp_collapse_flush | ||
616 | |||
617 | #define __HAVE_ARCH_PGTABLE_DEPOSIT | ||
618 | extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | ||
619 | pgtable_t pgtable); | ||
620 | #define __HAVE_ARCH_PGTABLE_WITHDRAW | ||
621 | extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); | ||
622 | |||
623 | #define __HAVE_ARCH_PMDP_INVALIDATE | ||
624 | extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | ||
625 | pmd_t *pmdp); | ||
626 | |||
627 | #define pmd_move_must_withdraw pmd_move_must_withdraw | ||
628 | struct spinlock; | ||
629 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | ||
630 | struct spinlock *old_pmd_ptl) | ||
631 | { | ||
632 | /* | ||
633 | * Archs like ppc64 use pgtable to store per pmd | ||
634 | * specific information. So when we switch the pmd, | ||
635 | * we should also withdraw and deposit the pgtable | ||
636 | */ | ||
637 | return true; | ||
638 | } | ||
639 | #endif /* __ASSEMBLY__ */ | ||
640 | #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */ | 397 | #endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */ |