diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-18 22:05:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-18 22:05:00 -0500 |
commit | 86fbf1617a6522379815d34b90fef2aa9068f89d (patch) | |
tree | 5282028f32ec63b16c81968050b3648a766c13c1 /include | |
parent | a36c160cbbf35aaabf47916e5ea39c519aa80068 (diff) | |
parent | 98398c32f6687ee1e1f3ae084effb4b75adb0747 (diff) |
Merge branch 'akpm' (incoming from Andrew)
Merge patches from Andrew Morton:
"23 fixes and a MAINTAINERS update"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (24 commits)
mm/hugetlb: check for pte NULL pointer in __page_check_address()
fix build with make 3.80
mm/mempolicy: fix !vma in new_vma_page()
MAINTAINERS: add Davidlohr as GPT maintainer
mm/memory-failure.c: recheck PageHuge() after hugetlb page migrate successfully
mm/compaction: respect ignore_skip_hint in update_pageblock_skip
mm/mempolicy: correct putback method for isolate pages if failed
mm: add missing dependency in Kconfig
sh: always link in helper functions extracted from libgcc
mm: page_alloc: exclude unreclaimable allocations from zone fairness policy
mm: numa: defer TLB flush for THP migration as long as possible
mm: numa: guarantee that tlb_flush_pending updates are visible before page table updates
mm: fix TLB flush race between migration, and change_protection_range
mm: numa: avoid unnecessary disruption of NUMA hinting during migration
mm: numa: clear numa hinting information on mprotect
sched: numa: skip inaccessible VMAs
mm: numa: avoid unnecessary work on the failure path
mm: numa: ensure anon_vma is locked to prevent parallel THP splits
mm: numa: do not clear PTE for pte_numa update
mm: numa: do not clear PMD during PTE update scan
...
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/pgtable.h | 2 | ||||
-rw-r--r-- | include/linux/migrate.h | 9 | ||||
-rw-r--r-- | include/linux/mm_types.h | 49 | ||||
-rw-r--r-- | include/linux/reboot.h | 1 |
4 files changed, 60 insertions, 1 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f330d28e4d0e..b12079afbd5f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) | |||
217 | #endif | 217 | #endif |
218 | 218 | ||
219 | #ifndef pte_accessible | 219 | #ifndef pte_accessible |
220 | # define pte_accessible(pte) ((void)(pte),1) | 220 | # define pte_accessible(mm, pte) ((void)(pte), 1) |
221 | #endif | 221 | #endif |
222 | 222 | ||
223 | #ifndef flush_tlb_fix_spurious_fault | 223 | #ifndef flush_tlb_fix_spurious_fault |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index f5096b58b20d..b7717d74da7f 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -90,10 +90,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
90 | #endif /* CONFIG_MIGRATION */ | 90 | #endif /* CONFIG_MIGRATION */ |
91 | 91 | ||
92 | #ifdef CONFIG_NUMA_BALANCING | 92 | #ifdef CONFIG_NUMA_BALANCING |
93 | extern bool pmd_trans_migrating(pmd_t pmd); | ||
94 | extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd); | ||
93 | extern int migrate_misplaced_page(struct page *page, | 95 | extern int migrate_misplaced_page(struct page *page, |
94 | struct vm_area_struct *vma, int node); | 96 | struct vm_area_struct *vma, int node); |
95 | extern bool migrate_ratelimited(int node); | 97 | extern bool migrate_ratelimited(int node); |
96 | #else | 98 | #else |
99 | static inline bool pmd_trans_migrating(pmd_t pmd) | ||
100 | { | ||
101 | return false; | ||
102 | } | ||
103 | static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd) | ||
104 | { | ||
105 | } | ||
97 | static inline int migrate_misplaced_page(struct page *page, | 106 | static inline int migrate_misplaced_page(struct page *page, |
98 | struct vm_area_struct *vma, int node) | 107 | struct vm_area_struct *vma, int node) |
99 | { | 108 | { |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bd299418a934..ad0616f2fe2c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -443,6 +443,14 @@ struct mm_struct { | |||
443 | /* numa_scan_seq prevents two threads setting pte_numa */ | 443 | /* numa_scan_seq prevents two threads setting pte_numa */ |
444 | int numa_scan_seq; | 444 | int numa_scan_seq; |
445 | #endif | 445 | #endif |
446 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) | ||
447 | /* | ||
448 | * An operation with batched TLB flushing is going on. Anything that | ||
449 | * can move process memory needs to flush the TLB when moving a | ||
450 | * PROT_NONE or PROT_NUMA mapped page. | ||
451 | */ | ||
452 | bool tlb_flush_pending; | ||
453 | #endif | ||
446 | struct uprobes_state uprobes_state; | 454 | struct uprobes_state uprobes_state; |
447 | }; | 455 | }; |
448 | 456 | ||
@@ -459,4 +467,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) | |||
459 | return mm->cpu_vm_mask_var; | 467 | return mm->cpu_vm_mask_var; |
460 | } | 468 | } |
461 | 469 | ||
470 | #if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) | ||
471 | /* | ||
472 | * Memory barriers to keep this state in sync are graciously provided by | ||
473 | * the page table locks, outside of which no page table modifications happen. | ||
474 | * The barriers below prevent the compiler from re-ordering the instructions | ||
475 | * around the memory barriers that are already present in the code. | ||
476 | */ | ||
477 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | ||
478 | { | ||
479 | barrier(); | ||
480 | return mm->tlb_flush_pending; | ||
481 | } | ||
482 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | ||
483 | { | ||
484 | mm->tlb_flush_pending = true; | ||
485 | |||
486 | /* | ||
487 | * Guarantee that the tlb_flush_pending store does not leak into the | ||
488 | * critical section updating the page tables | ||
489 | */ | ||
490 | smp_mb__before_spinlock(); | ||
491 | } | ||
492 | /* Clearing is done after a TLB flush, which also provides a barrier. */ | ||
493 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | ||
494 | { | ||
495 | barrier(); | ||
496 | mm->tlb_flush_pending = false; | ||
497 | } | ||
498 | #else | ||
499 | static inline bool mm_tlb_flush_pending(struct mm_struct *mm) | ||
500 | { | ||
501 | return false; | ||
502 | } | ||
503 | static inline void set_tlb_flush_pending(struct mm_struct *mm) | ||
504 | { | ||
505 | } | ||
506 | static inline void clear_tlb_flush_pending(struct mm_struct *mm) | ||
507 | { | ||
508 | } | ||
509 | #endif | ||
510 | |||
462 | #endif /* _LINUX_MM_TYPES_H */ | 511 | #endif /* _LINUX_MM_TYPES_H */ |
diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 8e00f9f6f963..9e7db9e73cc1 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h | |||
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *); | |||
43 | * Architecture-specific implementations of sys_reboot commands. | 43 | * Architecture-specific implementations of sys_reboot commands. |
44 | */ | 44 | */ |
45 | 45 | ||
46 | extern void migrate_to_reboot_cpu(void); | ||
46 | extern void machine_restart(char *cmd); | 47 | extern void machine_restart(char *cmd); |
47 | extern void machine_halt(void); | 48 | extern void machine_halt(void); |
48 | extern void machine_power_off(void); | 49 | extern void machine_power_off(void); |