aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/linux/mm_types.h44
2 files changed, 45 insertions, 1 deletions
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index a59ff51b0166..b58268a5ddd4 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -220,7 +220,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
220#endif 220#endif
221 221
222#ifndef pte_accessible 222#ifndef pte_accessible
223# define pte_accessible(pte) ((void)(pte),1) 223# define pte_accessible(mm, pte) ((void)(pte), 1)
224#endif 224#endif
225 225
226#ifndef flush_tlb_fix_spurious_fault 226#ifndef flush_tlb_fix_spurious_fault
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4a189ba6b128..49f0ada525a8 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -437,6 +437,14 @@ struct mm_struct {
437 */ 437 */
438 int first_nid; 438 int first_nid;
439#endif 439#endif
440#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
441 /*
442 * An operation with batched TLB flushing is going on. Anything that
443 * can move process memory needs to flush the TLB when moving a
444 * PROT_NONE or PROT_NUMA mapped page.
445 */
446 bool tlb_flush_pending;
447#endif
440 struct uprobes_state uprobes_state; 448 struct uprobes_state uprobes_state;
441}; 449};
442 450
@@ -457,4 +465,40 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
457 return mm->cpu_vm_mask_var; 465 return mm->cpu_vm_mask_var;
458} 466}
459 467
468#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
469/*
470 * Memory barriers to keep this state in sync are graciously provided by
471 * the page table locks, outside of which no page table modifications happen.
472 * The barriers below prevent the compiler from re-ordering the instructions
473 * around the memory barriers that are already present in the code.
474 */
475static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
476{
477 barrier();
478 return mm->tlb_flush_pending;
479}
480static inline void set_tlb_flush_pending(struct mm_struct *mm)
481{
482 mm->tlb_flush_pending = true;
483 barrier();
484}
485/* Clearing is done after a TLB flush, which also provides a barrier. */
486static inline void clear_tlb_flush_pending(struct mm_struct *mm)
487{
488 barrier();
489 mm->tlb_flush_pending = false;
490}
491#else
492static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
493{
494 return false;
495}
496static inline void set_tlb_flush_pending(struct mm_struct *mm)
497{
498}
499static inline void clear_tlb_flush_pending(struct mm_struct *mm)
500{
501}
502#endif
503
460#endif /* _LINUX_MM_TYPES_H */ 504#endif /* _LINUX_MM_TYPES_H */