aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/vmalloc.c21
-rw-r--r--mm/vmscan.c9
5 files changed, 24 insertions, 16 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6837a1014372..b5b2b15085a8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -22,7 +22,6 @@
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/cpuset.h>
26#include <linux/delay.h> 25#include <linux/delay.h>
27#include <linux/migrate.h> 26#include <linux/migrate.h>
28#include <linux/page-isolation.h> 27#include <linux/page-isolation.h>
@@ -498,8 +497,6 @@ int add_memory(int nid, u64 start, u64 size)
498 /* we online node here. we can't roll back from here. */ 497 /* we online node here. we can't roll back from here. */
499 node_set_online(nid); 498 node_set_online(nid);
500 499
501 cpuset_track_online_nodes();
502
503 if (new_pgdat) { 500 if (new_pgdat) {
504 ret = register_one_node(nid); 501 ret = register_one_node(nid);
505 /* 502 /*
diff --git a/mm/migrate.c b/mm/migrate.c
index 385db89f0c33..1e0d6b237f44 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -522,15 +522,12 @@ static int writeout(struct address_space *mapping, struct page *page)
522 remove_migration_ptes(page, page); 522 remove_migration_ptes(page, page);
523 523
524 rc = mapping->a_ops->writepage(page, &wbc); 524 rc = mapping->a_ops->writepage(page, &wbc);
525 if (rc < 0)
526 /* I/O Error writing */
527 return -EIO;
528 525
529 if (rc != AOP_WRITEPAGE_ACTIVATE) 526 if (rc != AOP_WRITEPAGE_ACTIVATE)
530 /* unlocked. Relock */ 527 /* unlocked. Relock */
531 lock_page(page); 528 lock_page(page);
532 529
533 return -EAGAIN; 530 return (rc < 0) ? -EIO : -EAGAIN;
534} 531}
535 532
536/* 533/*
diff --git a/mm/mlock.c b/mm/mlock.c
index a6da2aee940a..1ada366570cb 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -162,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
162 unsigned long addr = start; 162 unsigned long addr = start;
163 struct page *pages[16]; /* 16 gives a reasonable batch */ 163 struct page *pages[16]; /* 16 gives a reasonable batch */
164 int nr_pages = (end - start) / PAGE_SIZE; 164 int nr_pages = (end - start) / PAGE_SIZE;
165 int ret; 165 int ret = 0;
166 int gup_flags = 0; 166 int gup_flags = 0;
167 167
168 VM_BUG_ON(start & ~PAGE_MASK); 168 VM_BUG_ON(start & ~PAGE_MASK);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ba6b0f5f7fac..30f826d484f0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -324,14 +324,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
324 324
325 BUG_ON(size & ~PAGE_MASK); 325 BUG_ON(size & ~PAGE_MASK);
326 326
327 addr = ALIGN(vstart, align);
328
329 va = kmalloc_node(sizeof(struct vmap_area), 327 va = kmalloc_node(sizeof(struct vmap_area),
330 gfp_mask & GFP_RECLAIM_MASK, node); 328 gfp_mask & GFP_RECLAIM_MASK, node);
331 if (unlikely(!va)) 329 if (unlikely(!va))
332 return ERR_PTR(-ENOMEM); 330 return ERR_PTR(-ENOMEM);
333 331
334retry: 332retry:
333 addr = ALIGN(vstart, align);
334
335 spin_lock(&vmap_area_lock); 335 spin_lock(&vmap_area_lock);
336 /* XXX: could have a last_hole cache */ 336 /* XXX: could have a last_hole cache */
337 n = vmap_area_root.rb_node; 337 n = vmap_area_root.rb_node;
@@ -362,7 +362,7 @@ retry:
362 goto found; 362 goto found;
363 } 363 }
364 364
365 while (addr + size >= first->va_start && addr + size <= vend) { 365 while (addr + size > first->va_start && addr + size <= vend) {
366 addr = ALIGN(first->va_end + PAGE_SIZE, align); 366 addr = ALIGN(first->va_end + PAGE_SIZE, align);
367 367
368 n = rb_next(&first->rb_node); 368 n = rb_next(&first->rb_node);
@@ -522,13 +522,24 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
522} 522}
523 523
524/* 524/*
525 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
526 * is already purging.
527 */
528static void try_purge_vmap_area_lazy(void)
529{
530 unsigned long start = ULONG_MAX, end = 0;
531
532 __purge_vmap_area_lazy(&start, &end, 0, 0);
533}
534
535/*
525 * Kick off a purge of the outstanding lazy areas. 536 * Kick off a purge of the outstanding lazy areas.
526 */ 537 */
527static void purge_vmap_area_lazy(void) 538static void purge_vmap_area_lazy(void)
528{ 539{
529 unsigned long start = ULONG_MAX, end = 0; 540 unsigned long start = ULONG_MAX, end = 0;
530 541
531 __purge_vmap_area_lazy(&start, &end, 0, 0); 542 __purge_vmap_area_lazy(&start, &end, 1, 0);
532} 543}
533 544
534/* 545/*
@@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
539 va->flags |= VM_LAZY_FREE; 550 va->flags |= VM_LAZY_FREE;
540 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
541 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) 552 if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
542 purge_vmap_area_lazy(); 553 try_purge_vmap_area_lazy();
543} 554}
544 555
545static struct vmap_area *find_vmap_area(unsigned long addr) 556static struct vmap_area *find_vmap_area(unsigned long addr)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c141b3e78071..7ea1440b53db 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
623 * Try to allocate it some swap space here. 623 * Try to allocate it some swap space here.
624 */ 624 */
625 if (PageAnon(page) && !PageSwapCache(page)) { 625 if (PageAnon(page) && !PageSwapCache(page)) {
626 if (!(sc->gfp_mask & __GFP_IO))
627 goto keep_locked;
626 switch (try_to_munlock(page)) { 628 switch (try_to_munlock(page)) {
627 case SWAP_FAIL: /* shouldn't happen */ 629 case SWAP_FAIL: /* shouldn't happen */
628 case SWAP_AGAIN: 630 case SWAP_AGAIN:
@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
634 } 636 }
635 if (!add_to_swap(page, GFP_ATOMIC)) 637 if (!add_to_swap(page, GFP_ATOMIC))
636 goto activate_locked; 638 goto activate_locked;
639 may_enter_fs = 1;
637 } 640 }
638#endif /* CONFIG_SWAP */ 641#endif /* CONFIG_SWAP */
639 642
@@ -1386,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1386 file_prio = 200 - sc->swappiness; 1389 file_prio = 200 - sc->swappiness;
1387 1390
1388 /* 1391 /*
1389 * anon recent_rotated[0] 1392 * The amount of pressure on anon vs file pages is inversely
1390 * %anon = 100 * ----------- / ----------------- * IO cost 1393 * proportional to the fraction of recently scanned pages on
1391 * anon + file rotate_sum 1394 * each list that were recently referenced and in active use.
1392 */ 1395 */
1393 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1396 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
1394 ap /= zone->recent_rotated[0] + 1; 1397 ap /= zone->recent_rotated[0] + 1;