diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 15 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 3 | ||||
-rw-r--r-- | mm/migrate.c | 5 | ||||
-rw-r--r-- | mm/mlock.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 21 | ||||
-rw-r--r-- | mm/vmscan.c | 44 |
6 files changed, 39 insertions, 51 deletions
diff --git a/mm/memory.c b/mm/memory.c index 164951c47305..fc031d68327e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3049,3 +3049,18 @@ void print_vma_addr(char *prefix, unsigned long ip) | |||
3049 | } | 3049 | } |
3050 | up_read(¤t->mm->mmap_sem); | 3050 | up_read(¤t->mm->mmap_sem); |
3051 | } | 3051 | } |
3052 | |||
3053 | #ifdef CONFIG_PROVE_LOCKING | ||
3054 | void might_fault(void) | ||
3055 | { | ||
3056 | might_sleep(); | ||
3057 | /* | ||
3058 | * it would be nicer only to annotate paths which are not under | ||
3059 | * pagefault_disable, however that requires a larger audit and | ||
3060 | * providing helpers like get_user_atomic. | ||
3061 | */ | ||
3062 | if (!in_atomic() && current->mm) | ||
3063 | might_lock_read(¤t->mm->mmap_sem); | ||
3064 | } | ||
3065 | EXPORT_SYMBOL(might_fault); | ||
3066 | #endif | ||
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 6837a1014372..b5b2b15085a8 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/highmem.h> | 22 | #include <linux/highmem.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
25 | #include <linux/cpuset.h> | ||
26 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
27 | #include <linux/migrate.h> | 26 | #include <linux/migrate.h> |
28 | #include <linux/page-isolation.h> | 27 | #include <linux/page-isolation.h> |
@@ -498,8 +497,6 @@ int add_memory(int nid, u64 start, u64 size) | |||
498 | /* we online node here. we can't roll back from here. */ | 497 | /* we online node here. we can't roll back from here. */ |
499 | node_set_online(nid); | 498 | node_set_online(nid); |
500 | 499 | ||
501 | cpuset_track_online_nodes(); | ||
502 | |||
503 | if (new_pgdat) { | 500 | if (new_pgdat) { |
504 | ret = register_one_node(nid); | 501 | ret = register_one_node(nid); |
505 | /* | 502 | /* |
diff --git a/mm/migrate.c b/mm/migrate.c index 385db89f0c33..1e0d6b237f44 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -522,15 +522,12 @@ static int writeout(struct address_space *mapping, struct page *page) | |||
522 | remove_migration_ptes(page, page); | 522 | remove_migration_ptes(page, page); |
523 | 523 | ||
524 | rc = mapping->a_ops->writepage(page, &wbc); | 524 | rc = mapping->a_ops->writepage(page, &wbc); |
525 | if (rc < 0) | ||
526 | /* I/O Error writing */ | ||
527 | return -EIO; | ||
528 | 525 | ||
529 | if (rc != AOP_WRITEPAGE_ACTIVATE) | 526 | if (rc != AOP_WRITEPAGE_ACTIVATE) |
530 | /* unlocked. Relock */ | 527 | /* unlocked. Relock */ |
531 | lock_page(page); | 528 | lock_page(page); |
532 | 529 | ||
533 | return -EAGAIN; | 530 | return (rc < 0) ? -EIO : -EAGAIN; |
534 | } | 531 | } |
535 | 532 | ||
536 | /* | 533 | /* |
diff --git a/mm/mlock.c b/mm/mlock.c index a6da2aee940a..1ada366570cb 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -162,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, | |||
162 | unsigned long addr = start; | 162 | unsigned long addr = start; |
163 | struct page *pages[16]; /* 16 gives a reasonable batch */ | 163 | struct page *pages[16]; /* 16 gives a reasonable batch */ |
164 | int nr_pages = (end - start) / PAGE_SIZE; | 164 | int nr_pages = (end - start) / PAGE_SIZE; |
165 | int ret; | 165 | int ret = 0; |
166 | int gup_flags = 0; | 166 | int gup_flags = 0; |
167 | 167 | ||
168 | VM_BUG_ON(start & ~PAGE_MASK); | 168 | VM_BUG_ON(start & ~PAGE_MASK); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ba6b0f5f7fac..30f826d484f0 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -324,14 +324,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
324 | 324 | ||
325 | BUG_ON(size & ~PAGE_MASK); | 325 | BUG_ON(size & ~PAGE_MASK); |
326 | 326 | ||
327 | addr = ALIGN(vstart, align); | ||
328 | |||
329 | va = kmalloc_node(sizeof(struct vmap_area), | 327 | va = kmalloc_node(sizeof(struct vmap_area), |
330 | gfp_mask & GFP_RECLAIM_MASK, node); | 328 | gfp_mask & GFP_RECLAIM_MASK, node); |
331 | if (unlikely(!va)) | 329 | if (unlikely(!va)) |
332 | return ERR_PTR(-ENOMEM); | 330 | return ERR_PTR(-ENOMEM); |
333 | 331 | ||
334 | retry: | 332 | retry: |
333 | addr = ALIGN(vstart, align); | ||
334 | |||
335 | spin_lock(&vmap_area_lock); | 335 | spin_lock(&vmap_area_lock); |
336 | /* XXX: could have a last_hole cache */ | 336 | /* XXX: could have a last_hole cache */ |
337 | n = vmap_area_root.rb_node; | 337 | n = vmap_area_root.rb_node; |
@@ -362,7 +362,7 @@ retry: | |||
362 | goto found; | 362 | goto found; |
363 | } | 363 | } |
364 | 364 | ||
365 | while (addr + size >= first->va_start && addr + size <= vend) { | 365 | while (addr + size > first->va_start && addr + size <= vend) { |
366 | addr = ALIGN(first->va_end + PAGE_SIZE, align); | 366 | addr = ALIGN(first->va_end + PAGE_SIZE, align); |
367 | 367 | ||
368 | n = rb_next(&first->rb_node); | 368 | n = rb_next(&first->rb_node); |
@@ -522,13 +522,24 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
522 | } | 522 | } |
523 | 523 | ||
524 | /* | 524 | /* |
525 | * Kick off a purge of the outstanding lazy areas. Don't bother if somebody | ||
526 | * is already purging. | ||
527 | */ | ||
528 | static void try_purge_vmap_area_lazy(void) | ||
529 | { | ||
530 | unsigned long start = ULONG_MAX, end = 0; | ||
531 | |||
532 | __purge_vmap_area_lazy(&start, &end, 0, 0); | ||
533 | } | ||
534 | |||
535 | /* | ||
525 | * Kick off a purge of the outstanding lazy areas. | 536 | * Kick off a purge of the outstanding lazy areas. |
526 | */ | 537 | */ |
527 | static void purge_vmap_area_lazy(void) | 538 | static void purge_vmap_area_lazy(void) |
528 | { | 539 | { |
529 | unsigned long start = ULONG_MAX, end = 0; | 540 | unsigned long start = ULONG_MAX, end = 0; |
530 | 541 | ||
531 | __purge_vmap_area_lazy(&start, &end, 0, 0); | 542 | __purge_vmap_area_lazy(&start, &end, 1, 0); |
532 | } | 543 | } |
533 | 544 | ||
534 | /* | 545 | /* |
@@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) | |||
539 | va->flags |= VM_LAZY_FREE; | 550 | va->flags |= VM_LAZY_FREE; |
540 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); | 551 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); |
541 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) | 552 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) |
542 | purge_vmap_area_lazy(); | 553 | try_purge_vmap_area_lazy(); |
543 | } | 554 | } |
544 | 555 | ||
545 | static struct vmap_area *find_vmap_area(unsigned long addr) | 556 | static struct vmap_area *find_vmap_area(unsigned long addr) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 3b5860294bb6..7ea1440b53db 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -623,6 +623,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
623 | * Try to allocate it some swap space here. | 623 | * Try to allocate it some swap space here. |
624 | */ | 624 | */ |
625 | if (PageAnon(page) && !PageSwapCache(page)) { | 625 | if (PageAnon(page) && !PageSwapCache(page)) { |
626 | if (!(sc->gfp_mask & __GFP_IO)) | ||
627 | goto keep_locked; | ||
626 | switch (try_to_munlock(page)) { | 628 | switch (try_to_munlock(page)) { |
627 | case SWAP_FAIL: /* shouldn't happen */ | 629 | case SWAP_FAIL: /* shouldn't happen */ |
628 | case SWAP_AGAIN: | 630 | case SWAP_AGAIN: |
@@ -634,6 +636,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
634 | } | 636 | } |
635 | if (!add_to_swap(page, GFP_ATOMIC)) | 637 | if (!add_to_swap(page, GFP_ATOMIC)) |
636 | goto activate_locked; | 638 | goto activate_locked; |
639 | may_enter_fs = 1; | ||
637 | } | 640 | } |
638 | #endif /* CONFIG_SWAP */ | 641 | #endif /* CONFIG_SWAP */ |
639 | 642 | ||
@@ -1386,9 +1389,9 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1386 | file_prio = 200 - sc->swappiness; | 1389 | file_prio = 200 - sc->swappiness; |
1387 | 1390 | ||
1388 | /* | 1391 | /* |
1389 | * anon recent_rotated[0] | 1392 | * The amount of pressure on anon vs file pages is inversely |
1390 | * %anon = 100 * ----------- / ----------------- * IO cost | 1393 | * proportional to the fraction of recently scanned pages on |
1391 | * anon + file rotate_sum | 1394 | * each list that were recently referenced and in active use. |
1392 | */ | 1395 | */ |
1393 | ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); | 1396 | ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); |
1394 | ap /= zone->recent_rotated[0] + 1; | 1397 | ap /= zone->recent_rotated[0] + 1; |
@@ -2368,39 +2371,6 @@ int page_evictable(struct page *page, struct vm_area_struct *vma) | |||
2368 | return 1; | 2371 | return 1; |
2369 | } | 2372 | } |
2370 | 2373 | ||
2371 | static void show_page_path(struct page *page) | ||
2372 | { | ||
2373 | char buf[256]; | ||
2374 | if (page_is_file_cache(page)) { | ||
2375 | struct address_space *mapping = page->mapping; | ||
2376 | struct dentry *dentry; | ||
2377 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | ||
2378 | |||
2379 | spin_lock(&mapping->i_mmap_lock); | ||
2380 | dentry = d_find_alias(mapping->host); | ||
2381 | printk(KERN_INFO "rescued: %s %lu\n", | ||
2382 | dentry_path(dentry, buf, 256), pgoff); | ||
2383 | spin_unlock(&mapping->i_mmap_lock); | ||
2384 | } else { | ||
2385 | #if defined(CONFIG_MM_OWNER) && defined(CONFIG_MMU) | ||
2386 | struct anon_vma *anon_vma; | ||
2387 | struct vm_area_struct *vma; | ||
2388 | |||
2389 | anon_vma = page_lock_anon_vma(page); | ||
2390 | if (!anon_vma) | ||
2391 | return; | ||
2392 | |||
2393 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | ||
2394 | printk(KERN_INFO "rescued: anon %s\n", | ||
2395 | vma->vm_mm->owner->comm); | ||
2396 | break; | ||
2397 | } | ||
2398 | page_unlock_anon_vma(anon_vma); | ||
2399 | #endif | ||
2400 | } | ||
2401 | } | ||
2402 | |||
2403 | |||
2404 | /** | 2374 | /** |
2405 | * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list | 2375 | * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list |
2406 | * @page: page to check evictability and move to appropriate lru list | 2376 | * @page: page to check evictability and move to appropriate lru list |
@@ -2421,8 +2391,6 @@ retry: | |||
2421 | if (page_evictable(page, NULL)) { | 2391 | if (page_evictable(page, NULL)) { |
2422 | enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); | 2392 | enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page); |
2423 | 2393 | ||
2424 | show_page_path(page); | ||
2425 | |||
2426 | __dec_zone_state(zone, NR_UNEVICTABLE); | 2394 | __dec_zone_state(zone, NR_UNEVICTABLE); |
2427 | list_move(&page->lru, &zone->lru[l].list); | 2395 | list_move(&page->lru, &zone->lru[l].list); |
2428 | __inc_zone_state(zone, NR_INACTIVE_ANON + l); | 2396 | __inc_zone_state(zone, NR_INACTIVE_ANON + l); |