aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/frontswap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/memory.c9
-rw-r--r--mm/migrate.c23
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/swap_state.c18
-rw-r--r--mm/swapfile.c2
8 files changed, 47 insertions, 29 deletions
diff --git a/mm/frontswap.c b/mm/frontswap.c
index 538367ef1372..1b24bdcb3197 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -319,7 +319,7 @@ void __frontswap_invalidate_area(unsigned type)
319 return; 319 return;
320 frontswap_ops->invalidate_area(type); 320 frontswap_ops->invalidate_area(type);
321 atomic_set(&sis->frontswap_pages, 0); 321 atomic_set(&sis->frontswap_pages, 0);
322 memset(sis->frontswap_map, 0, sis->max / sizeof(long)); 322 bitmap_zero(sis->frontswap_map, sis->max);
323 } 323 }
324 clear_bit(type, need_init); 324 clear_bit(type, need_init);
325} 325}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f8feeeca6686..e2bfbf73a551 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2839,7 +2839,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2839 if (ptep) { 2839 if (ptep) {
2840 entry = huge_ptep_get(ptep); 2840 entry = huge_ptep_get(ptep);
2841 if (unlikely(is_hugetlb_entry_migration(entry))) { 2841 if (unlikely(is_hugetlb_entry_migration(entry))) {
2842 migration_entry_wait(mm, (pmd_t *)ptep, address); 2842 migration_entry_wait_huge(mm, ptep);
2843 return 0; 2843 return 0;
2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 2844 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2845 return VM_FAULT_HWPOISON_LARGE | 2845 return VM_FAULT_HWPOISON_LARGE |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 010d6c14129a..194721839cf5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1199,7 +1199,6 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1199 1199
1200 mz = mem_cgroup_zoneinfo(root, nid, zid); 1200 mz = mem_cgroup_zoneinfo(root, nid, zid);
1201 iter = &mz->reclaim_iter[reclaim->priority]; 1201 iter = &mz->reclaim_iter[reclaim->priority];
1202 last_visited = iter->last_visited;
1203 if (prev && reclaim->generation != iter->generation) { 1202 if (prev && reclaim->generation != iter->generation) {
1204 iter->last_visited = NULL; 1203 iter->last_visited = NULL;
1205 goto out_unlock; 1204 goto out_unlock;
@@ -1218,13 +1217,12 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1218 * is alive. 1217 * is alive.
1219 */ 1218 */
1220 dead_count = atomic_read(&root->dead_count); 1219 dead_count = atomic_read(&root->dead_count);
1221 smp_rmb(); 1220 if (dead_count == iter->last_dead_count) {
1222 last_visited = iter->last_visited; 1221 smp_rmb();
1223 if (last_visited) { 1222 last_visited = iter->last_visited;
1224 if ((dead_count != iter->last_dead_count) || 1223 if (last_visited &&
1225 !css_tryget(&last_visited->css)) { 1224 !css_tryget(&last_visited->css))
1226 last_visited = NULL; 1225 last_visited = NULL;
1227 }
1228 } 1226 }
1229 } 1227 }
1230 1228
@@ -3141,8 +3139,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3141 return -ENOMEM; 3139 return -ENOMEM;
3142 } 3140 }
3143 3141
3144 INIT_WORK(&s->memcg_params->destroy,
3145 kmem_cache_destroy_work_func);
3146 s->memcg_params->is_root_cache = true; 3142 s->memcg_params->is_root_cache = true;
3147 3143
3148 /* 3144 /*
diff --git a/mm/memory.c b/mm/memory.c
index 6dc1882fbd72..61a262b08e53 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -220,7 +220,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
220 tlb->start = -1UL; 220 tlb->start = -1UL;
221 tlb->end = 0; 221 tlb->end = 0;
222 tlb->need_flush = 0; 222 tlb->need_flush = 0;
223 tlb->fast_mode = (num_possible_cpus() == 1);
224 tlb->local.next = NULL; 223 tlb->local.next = NULL;
225 tlb->local.nr = 0; 224 tlb->local.nr = 0;
226 tlb->local.max = ARRAY_SIZE(tlb->__pages); 225 tlb->local.max = ARRAY_SIZE(tlb->__pages);
@@ -244,9 +243,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
244 tlb_table_flush(tlb); 243 tlb_table_flush(tlb);
245#endif 244#endif
246 245
247 if (tlb_fast_mode(tlb))
248 return;
249
250 for (batch = &tlb->local; batch; batch = batch->next) { 246 for (batch = &tlb->local; batch; batch = batch->next) {
251 free_pages_and_swap_cache(batch->pages, batch->nr); 247 free_pages_and_swap_cache(batch->pages, batch->nr);
252 batch->nr = 0; 248 batch->nr = 0;
@@ -288,11 +284,6 @@ int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
288 284
289 VM_BUG_ON(!tlb->need_flush); 285 VM_BUG_ON(!tlb->need_flush);
290 286
291 if (tlb_fast_mode(tlb)) {
292 free_page_and_swap_cache(page);
293 return 1; /* avoid calling tlb_flush_mmu() */
294 }
295
296 batch = tlb->active; 287 batch = tlb->active;
297 batch->pages[batch->nr++] = page; 288 batch->pages[batch->nr++] = page;
298 if (batch->nr == batch->max) { 289 if (batch->nr == batch->max) {
diff --git a/mm/migrate.c b/mm/migrate.c
index b1f57501de9c..6f0c24438bba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -200,15 +200,14 @@ static void remove_migration_ptes(struct page *old, struct page *new)
200 * get to the page and wait until migration is finished. 200 * get to the page and wait until migration is finished.
201 * When we return from this function the fault will be retried. 201 * When we return from this function the fault will be retried.
202 */ 202 */
203void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 203static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
204 unsigned long address) 204 spinlock_t *ptl)
205{ 205{
206 pte_t *ptep, pte; 206 pte_t pte;
207 spinlock_t *ptl;
208 swp_entry_t entry; 207 swp_entry_t entry;
209 struct page *page; 208 struct page *page;
210 209
211 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 210 spin_lock(ptl);
212 pte = *ptep; 211 pte = *ptep;
213 if (!is_swap_pte(pte)) 212 if (!is_swap_pte(pte))
214 goto out; 213 goto out;
@@ -236,6 +235,20 @@ out:
236 pte_unmap_unlock(ptep, ptl); 235 pte_unmap_unlock(ptep, ptl);
237} 236}
238 237
238void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
239 unsigned long address)
240{
241 spinlock_t *ptl = pte_lockptr(mm, pmd);
242 pte_t *ptep = pte_offset_map(pmd, address);
243 __migration_entry_wait(mm, ptep, ptl);
244}
245
246void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte)
247{
248 spinlock_t *ptl = &(mm)->page_table_lock;
249 __migration_entry_wait(mm, pte, ptl);
250}
251
239#ifdef CONFIG_BLOCK 252#ifdef CONFIG_BLOCK
240/* Returns true if all buffers are successfully locked */ 253/* Returns true if all buffers are successfully locked */
241static bool buffer_migrate_lock_buffers(struct buffer_head *head, 254static bool buffer_migrate_lock_buffers(struct buffer_head *head,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 378a15bcd649..c3edb624fccf 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1628,6 +1628,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1628 long min = mark; 1628 long min = mark;
1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx]; 1629 long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1630 int o; 1630 int o;
1631 long free_cma = 0;
1631 1632
1632 free_pages -= (1 << order) - 1; 1633 free_pages -= (1 << order) - 1;
1633 if (alloc_flags & ALLOC_HIGH) 1634 if (alloc_flags & ALLOC_HIGH)
@@ -1637,9 +1638,10 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1637#ifdef CONFIG_CMA 1638#ifdef CONFIG_CMA
1638 /* If allocation can't use CMA areas don't use free CMA pages */ 1639 /* If allocation can't use CMA areas don't use free CMA pages */
1639 if (!(alloc_flags & ALLOC_CMA)) 1640 if (!(alloc_flags & ALLOC_CMA))
1640 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 1641 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
1641#endif 1642#endif
1642 if (free_pages <= min + lowmem_reserve) 1643
1644 if (free_pages - free_cma <= min + lowmem_reserve)
1643 return false; 1645 return false;
1644 for (o = 0; o < order; o++) { 1646 for (o = 0; o < order; o++) {
1645 /* At the next order, this order's pages become unavailable */ 1647 /* At the next order, this order's pages become unavailable */
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b3d40dcf3624..f24ab0dff554 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -336,8 +336,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
336 * Swap entry may have been freed since our caller observed it. 336 * Swap entry may have been freed since our caller observed it.
337 */ 337 */
338 err = swapcache_prepare(entry); 338 err = swapcache_prepare(entry);
339 if (err == -EEXIST) { /* seems racy */ 339 if (err == -EEXIST) {
340 radix_tree_preload_end(); 340 radix_tree_preload_end();
341 /*
342 * We might race against get_swap_page() and stumble
343 * across a SWAP_HAS_CACHE swap_map entry whose page
344 * has not been brought into the swapcache yet, while
345 * the other end is scheduled away waiting on discard
346 * I/O completion at scan_swap_map().
347 *
348 * In order to avoid turning this transitory state
349 * into a permanent loop around this -EEXIST case
350 * if !CONFIG_PREEMPT and the I/O completion happens
351 * to be waiting on the CPU waitqueue where we are now
352 * busy looping, we just conditionally invoke the
353 * scheduler here, if there are some more important
354 * tasks to run.
355 */
356 cond_resched();
341 continue; 357 continue;
342 } 358 }
343 if (err) { /* swp entry is obsolete ? */ 359 if (err) { /* swp entry is obsolete ? */
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6c340d908b27..746af55b8455 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2116,7 +2116,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2116 } 2116 }
2117 /* frontswap enabled? set up bit-per-page map for frontswap */ 2117 /* frontswap enabled? set up bit-per-page map for frontswap */
2118 if (frontswap_enabled) 2118 if (frontswap_enabled)
2119 frontswap_map = vzalloc(maxpages / sizeof(long)); 2119 frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long));
2120 2120
2121 if (p->bdev) { 2121 if (p->bdev) {
2122 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2122 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {