diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 22:55:54 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 22:55:54 -0400 |
| commit | 0e06f5c0deeef0332a5da2ecb8f1fcf3e024d958 (patch) | |
| tree | e0f0af4aadf10c713c5cf1b65356844b3c9b3215 /include/asm-generic | |
| parent | f7816ad0f878dacd5f0120476f9b836ccf8699ea (diff) | |
| parent | 8f19b0c058d93a678a99dd6fec03af2e769943f2 (diff) | |
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton:
- a few misc bits
- ocfs2
- most(?) of MM
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (125 commits)
thp: fix comments of __pmd_trans_huge_lock()
cgroup: remove unnecessary 0 check from css_from_id()
cgroup: fix idr leak for the first cgroup root
mm: memcontrol: fix documentation for compound parameter
mm: memcontrol: remove BUG_ON in uncharge_list
mm: fix build warnings in <linux/compaction.h>
mm, thp: convert from optimistic swapin collapsing to conservative
mm, thp: fix comment inconsistency for swapin readahead functions
thp: update Documentation/{vm/transhuge,filesystems/proc}.txt
shmem: split huge pages beyond i_size under memory pressure
thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE
khugepaged: add support of collapse for tmpfs/shmem pages
shmem: make shmem_inode_info::lock irq-safe
khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page()
thp: extract khugepaged from mm/huge_memory.c
shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings
shmem: add huge pages support
shmem: get_unmapped_area align huge page
shmem: prepare huge= mount option and sysfs knob
mm, rmap: account shmem thp pages
...
Diffstat (limited to 'include/asm-generic')
| -rw-r--r-- | include/asm-generic/tlb.h | 59 |
1 files changed, 48 insertions, 11 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 9dbb739cafa0..c6d667187608 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
| @@ -107,6 +107,12 @@ struct mmu_gather { | |||
| 107 | struct mmu_gather_batch local; | 107 | struct mmu_gather_batch local; |
| 108 | struct page *__pages[MMU_GATHER_BUNDLE]; | 108 | struct page *__pages[MMU_GATHER_BUNDLE]; |
| 109 | unsigned int batch_count; | 109 | unsigned int batch_count; |
| 110 | /* | ||
| 111 | * __tlb_adjust_range will track the new addr here, | ||
| 112 | * that that we can adjust the range after the flush | ||
| 113 | */ | ||
| 114 | unsigned long addr; | ||
| 115 | int page_size; | ||
| 110 | }; | 116 | }; |
| 111 | 117 | ||
| 112 | #define HAVE_GENERIC_MMU_GATHER | 118 | #define HAVE_GENERIC_MMU_GATHER |
| @@ -115,23 +121,20 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long | |||
| 115 | void tlb_flush_mmu(struct mmu_gather *tlb); | 121 | void tlb_flush_mmu(struct mmu_gather *tlb); |
| 116 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, | 122 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
| 117 | unsigned long end); | 123 | unsigned long end); |
| 118 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); | 124 | extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, |
| 119 | 125 | int page_size); | |
| 120 | /* tlb_remove_page | ||
| 121 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | ||
| 122 | * required. | ||
| 123 | */ | ||
| 124 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
| 125 | { | ||
| 126 | if (!__tlb_remove_page(tlb, page)) | ||
| 127 | tlb_flush_mmu(tlb); | ||
| 128 | } | ||
| 129 | 126 | ||
| 130 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, | 127 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
| 131 | unsigned long address) | 128 | unsigned long address) |
| 132 | { | 129 | { |
| 133 | tlb->start = min(tlb->start, address); | 130 | tlb->start = min(tlb->start, address); |
| 134 | tlb->end = max(tlb->end, address + PAGE_SIZE); | 131 | tlb->end = max(tlb->end, address + PAGE_SIZE); |
| 132 | /* | ||
| 133 | * Track the last address with which we adjusted the range. This | ||
| 134 | * will be used later to adjust again after a mmu_flush due to | ||
| 135 | * failed __tlb_remove_page | ||
| 136 | */ | ||
| 137 | tlb->addr = address; | ||
| 135 | } | 138 | } |
| 136 | 139 | ||
| 137 | static inline void __tlb_reset_range(struct mmu_gather *tlb) | 140 | static inline void __tlb_reset_range(struct mmu_gather *tlb) |
| @@ -144,6 +147,40 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) | |||
| 144 | } | 147 | } |
| 145 | } | 148 | } |
| 146 | 149 | ||
| 150 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, | ||
| 151 | struct page *page, int page_size) | ||
| 152 | { | ||
| 153 | if (__tlb_remove_page_size(tlb, page, page_size)) { | ||
| 154 | tlb_flush_mmu(tlb); | ||
| 155 | tlb->page_size = page_size; | ||
| 156 | __tlb_adjust_range(tlb, tlb->addr); | ||
| 157 | __tlb_remove_page_size(tlb, page, page_size); | ||
| 158 | } | ||
| 159 | } | ||
| 160 | |||
| 161 | static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
| 162 | { | ||
| 163 | return __tlb_remove_page_size(tlb, page, PAGE_SIZE); | ||
| 164 | } | ||
| 165 | |||
| 166 | /* tlb_remove_page | ||
| 167 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | ||
| 168 | * required. | ||
| 169 | */ | ||
| 170 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
| 171 | { | ||
| 172 | return tlb_remove_page_size(tlb, page, PAGE_SIZE); | ||
| 173 | } | ||
| 174 | |||
| 175 | static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) | ||
| 176 | { | ||
| 177 | /* active->nr should be zero when we call this */ | ||
| 178 | VM_BUG_ON_PAGE(tlb->active->nr, page); | ||
| 179 | tlb->page_size = PAGE_SIZE; | ||
| 180 | __tlb_adjust_range(tlb, tlb->addr); | ||
| 181 | return __tlb_remove_page(tlb, page); | ||
| 182 | } | ||
| 183 | |||
| 147 | /* | 184 | /* |
| 148 | * In the case of tlb vma handling, we can optimise these away in the | 185 | * In the case of tlb vma handling, we can optimise these away in the |
| 149 | * case where we're doing a full MM flush. When we're doing a munmap, | 186 | * case where we're doing a full MM flush. When we're doing a munmap, |
