diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 13:31:36 -0500 |
commit | 8dcd175bc3d50b78413c56d5b17d4bddd77412ef (patch) | |
tree | 2c2fb25759b43f2e73830f07ef3b444d76825280 /include/linux/hugetlb.h | |
parent | afe6fe7036c6efdcb46cabc64bec9b6e4a005210 (diff) | |
parent | fff04900ea79915939ef6a3aad78fca6511a3034 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton:
- a few misc things
- ocfs2 updates
- most of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (159 commits)
tools/testing/selftests/proc/proc-self-syscall.c: remove duplicate include
proc: more robust bulk read test
proc: test /proc/*/maps, smaps, smaps_rollup, statm
proc: use seq_puts() everywhere
proc: read kernel cpu stat pointer once
proc: remove unused argument in proc_pid_lookup()
fs/proc/thread_self.c: code cleanup for proc_setup_thread_self()
fs/proc/self.c: code cleanup for proc_setup_self()
proc: return exit code 4 for skipped tests
mm,mremap: bail out earlier in mremap_to under map pressure
mm/sparse: fix a bad comparison
mm/memory.c: do_fault: avoid usage of stale vm_area_struct
writeback: fix inode cgroup switching comment
mm/huge_memory.c: fix "orig_pud" set but not used
mm/hotplug: fix an imbalance with DEBUG_PAGEALLOC
mm/memcontrol.c: fix bad line in comment
mm/cma.c: cma_declare_contiguous: correct err handling
mm/page_ext.c: fix an imbalance with kmemleak
mm/compaction: pass pgdat to too_many_isolated() instead of zone
mm: remove zone_lru_lock() function, access ->lru_lock directly
...
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r-- | include/linux/hugetlb.h | 70 |
1 files changed, 67 insertions, 3 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 087fd5f48c91..ea35263eb76b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -371,6 +371,8 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, | |||
371 | nodemask_t *nmask); | 371 | nodemask_t *nmask); |
372 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, | 372 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, |
373 | unsigned long address); | 373 | unsigned long address); |
374 | struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, | ||
375 | int nid, nodemask_t *nmask); | ||
374 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, | 376 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
375 | pgoff_t idx); | 377 | pgoff_t idx); |
376 | 378 | ||
@@ -493,17 +495,54 @@ static inline pgoff_t basepage_index(struct page *page) | |||
493 | extern int dissolve_free_huge_page(struct page *page); | 495 | extern int dissolve_free_huge_page(struct page *page); |
494 | extern int dissolve_free_huge_pages(unsigned long start_pfn, | 496 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
495 | unsigned long end_pfn); | 497 | unsigned long end_pfn); |
496 | static inline bool hugepage_migration_supported(struct hstate *h) | 498 | |
497 | { | ||
498 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | 499 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
500 | #ifndef arch_hugetlb_migration_supported | ||
501 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | ||
502 | { | ||
499 | if ((huge_page_shift(h) == PMD_SHIFT) || | 503 | if ((huge_page_shift(h) == PMD_SHIFT) || |
500 | (huge_page_shift(h) == PGDIR_SHIFT)) | 504 | (huge_page_shift(h) == PUD_SHIFT) || |
505 | (huge_page_shift(h) == PGDIR_SHIFT)) | ||
501 | return true; | 506 | return true; |
502 | else | 507 | else |
503 | return false; | 508 | return false; |
509 | } | ||
510 | #endif | ||
504 | #else | 511 | #else |
512 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | ||
513 | { | ||
505 | return false; | 514 | return false; |
515 | } | ||
506 | #endif | 516 | #endif |
517 | |||
518 | static inline bool hugepage_migration_supported(struct hstate *h) | ||
519 | { | ||
520 | return arch_hugetlb_migration_supported(h); | ||
521 | } | ||
522 | |||
523 | /* | ||
524 | * Movability check is different as compared to migration check. | ||
525 | * It determines whether or not a huge page should be placed on | ||
526 | * movable zone or not. Movability of any huge page should be | ||
527 | * required only if huge page size is supported for migration. | ||
528 | * There wont be any reason for the huge page to be movable if | ||
529 | * it is not migratable to start with. Also the size of the huge | ||
530 | * page should be large enough to be placed under a movable zone | ||
531 | * and still feasible enough to be migratable. Just the presence | ||
532 | * in movable zone does not make the migration feasible. | ||
533 | * | ||
534 | * So even though large huge page sizes like the gigantic ones | ||
535 | * are migratable they should not be movable because its not | ||
536 | * feasible to migrate them from movable zone. | ||
537 | */ | ||
538 | static inline bool hugepage_movable_supported(struct hstate *h) | ||
539 | { | ||
540 | if (!hugepage_migration_supported(h)) | ||
541 | return false; | ||
542 | |||
543 | if (hstate_is_gigantic(h)) | ||
544 | return false; | ||
545 | return true; | ||
507 | } | 546 | } |
508 | 547 | ||
509 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 548 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
@@ -543,6 +582,26 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr | |||
543 | set_huge_pte_at(mm, addr, ptep, pte); | 582 | set_huge_pte_at(mm, addr, ptep, pte); |
544 | } | 583 | } |
545 | #endif | 584 | #endif |
585 | |||
586 | #ifndef huge_ptep_modify_prot_start | ||
587 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start | ||
588 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, | ||
589 | unsigned long addr, pte_t *ptep) | ||
590 | { | ||
591 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | ||
592 | } | ||
593 | #endif | ||
594 | |||
595 | #ifndef huge_ptep_modify_prot_commit | ||
596 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit | ||
597 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, | ||
598 | unsigned long addr, pte_t *ptep, | ||
599 | pte_t old_pte, pte_t pte) | ||
600 | { | ||
601 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | ||
602 | } | ||
603 | #endif | ||
604 | |||
546 | #else /* CONFIG_HUGETLB_PAGE */ | 605 | #else /* CONFIG_HUGETLB_PAGE */ |
547 | struct hstate {}; | 606 | struct hstate {}; |
548 | #define alloc_huge_page(v, a, r) NULL | 607 | #define alloc_huge_page(v, a, r) NULL |
@@ -602,6 +661,11 @@ static inline bool hugepage_migration_supported(struct hstate *h) | |||
602 | return false; | 661 | return false; |
603 | } | 662 | } |
604 | 663 | ||
664 | static inline bool hugepage_movable_supported(struct hstate *h) | ||
665 | { | ||
666 | return false; | ||
667 | } | ||
668 | |||
605 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 669 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
606 | struct mm_struct *mm, pte_t *pte) | 670 | struct mm_struct *mm, pte_t *pte) |
607 | { | 671 | { |