aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-08-10 19:20:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-08-10 19:20:52 -0400
commit27df704d43274578ca097c8a60f265faaacee7fb (patch)
tree011ca4ff99b0dc39b64554aceb5c80071b29930b /include/linux
parent4e082e9ba7cdd7466b1ea81527feb93f4da8c580 (diff)
parente86b298bebf7e799e4b7232e9135799f1947552e (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "21 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (21 commits) userfaultfd: replace ENOSPC with ESRCH in case mm has gone during copy/zeropage zram: rework copy of compressor name in comp_algorithm_store() rmap: do not call mmu_notifier_invalidate_page() under ptl mm: fix list corruptions on shmem shrinklist mm/balloon_compaction.c: don't zero ballooned pages MAINTAINERS: copy virtio on balloon_compaction.c mm: fix KSM data corruption mm: fix MADV_[FREE|DONTNEED] TLB flush miss problem mm: make tlb_flush_pending global mm: refactor TLB gathering API Revert "mm: numa: defer TLB flush for THP migration as long as possible" mm: migrate: fix barriers around tlb_flush_pending mm: migrate: prevent racy access to tlb_flush_pending fault-inject: fix wrong should_fail() decision in task context test_kmod: fix small memory leak on filesystem tests test_kmod: fix the lock in register_test_dev_kmod() test_kmod: fix bug which allows negative values on two config options test_kmod: fix spelling mistake: "EMTPY" -> "EMPTY" userfaultfd: hugetlbfs: remove superfluous page unlock in VM_SHARED case mm: ratelimit PFNs busy info message ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm_types.h64
1 files changed, 38 insertions, 26 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7f384bb62d8e..3cadee0a3508 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -487,14 +487,12 @@ struct mm_struct {
487 /* numa_scan_seq prevents two threads setting pte_numa */ 487 /* numa_scan_seq prevents two threads setting pte_numa */
488 int numa_scan_seq; 488 int numa_scan_seq;
489#endif 489#endif
490#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
491 /* 490 /*
492 * An operation with batched TLB flushing is going on. Anything that 491 * An operation with batched TLB flushing is going on. Anything that
493 * can move process memory needs to flush the TLB when moving a 492 * can move process memory needs to flush the TLB when moving a
494 * PROT_NONE or PROT_NUMA mapped page. 493 * PROT_NONE or PROT_NUMA mapped page.
495 */ 494 */
496 bool tlb_flush_pending; 495 atomic_t tlb_flush_pending;
497#endif
498#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 496#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
499 /* See flush_tlb_batched_pending() */ 497 /* See flush_tlb_batched_pending() */
500 bool tlb_flush_batched; 498 bool tlb_flush_batched;
@@ -522,46 +520,60 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
522 return mm->cpu_vm_mask_var; 520 return mm->cpu_vm_mask_var;
523} 521}
524 522
525#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) 523struct mmu_gather;
524extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
525 unsigned long start, unsigned long end);
526extern void tlb_finish_mmu(struct mmu_gather *tlb,
527 unsigned long start, unsigned long end);
528
526/* 529/*
527 * Memory barriers to keep this state in sync are graciously provided by 530 * Memory barriers to keep this state in sync are graciously provided by
528 * the page table locks, outside of which no page table modifications happen. 531 * the page table locks, outside of which no page table modifications happen.
529 * The barriers below prevent the compiler from re-ordering the instructions 532 * The barriers are used to ensure the order between tlb_flush_pending updates,
530 * around the memory barriers that are already present in the code. 533 * which happen while the lock is not taken, and the PTE updates, which happen
534 * while the lock is taken, are serialized.
531 */ 535 */
532static inline bool mm_tlb_flush_pending(struct mm_struct *mm) 536static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
533{ 537{
534 barrier(); 538 return atomic_read(&mm->tlb_flush_pending) > 0;
535 return mm->tlb_flush_pending; 539}
540
541/*
542 * Returns true if there are two above TLB batching threads in parallel.
543 */
544static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
545{
546 return atomic_read(&mm->tlb_flush_pending) > 1;
547}
548
549static inline void init_tlb_flush_pending(struct mm_struct *mm)
550{
551 atomic_set(&mm->tlb_flush_pending, 0);
536} 552}
537static inline void set_tlb_flush_pending(struct mm_struct *mm) 553
554static inline void inc_tlb_flush_pending(struct mm_struct *mm)
538{ 555{
539 mm->tlb_flush_pending = true; 556 atomic_inc(&mm->tlb_flush_pending);
540 557
541 /* 558 /*
542 * Guarantee that the tlb_flush_pending store does not leak into the 559 * Guarantee that the tlb_flush_pending increase does not leak into the
543 * critical section updating the page tables 560 * critical section updating the page tables
544 */ 561 */
545 smp_mb__before_spinlock(); 562 smp_mb__before_spinlock();
546} 563}
564
547/* Clearing is done after a TLB flush, which also provides a barrier. */ 565/* Clearing is done after a TLB flush, which also provides a barrier. */
548static inline void clear_tlb_flush_pending(struct mm_struct *mm) 566static inline void dec_tlb_flush_pending(struct mm_struct *mm)
549{
550 barrier();
551 mm->tlb_flush_pending = false;
552}
553#else
554static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
555{
556 return false;
557}
558static inline void set_tlb_flush_pending(struct mm_struct *mm)
559{
560}
561static inline void clear_tlb_flush_pending(struct mm_struct *mm)
562{ 567{
568 /*
569 * Guarantee that the tlb_flush_pending does not not leak into the
570 * critical section, since we must order the PTE change and changes to
571 * the pending TLB flush indication. We could have relied on TLB flush
572 * as a memory barrier, but this behavior is not clearly documented.
573 */
574 smp_mb__before_atomic();
575 atomic_dec(&mm->tlb_flush_pending);
563} 576}
564#endif
565 577
566struct vm_fault; 578struct vm_fault;
567 579