diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/backing-dev.c | 74 | ||||
| -rw-r--r-- | mm/dmapool.c | 2 | ||||
| -rw-r--r-- | mm/filemap.c | 73 | ||||
| -rw-r--r-- | mm/highmem.c | 66 | ||||
| -rw-r--r-- | mm/hugetlb.c | 241 | ||||
| -rw-r--r-- | mm/internal.h | 2 | ||||
| -rw-r--r-- | mm/ksm.c | 7 | ||||
| -rw-r--r-- | mm/maccess.c | 2 | ||||
| -rw-r--r-- | mm/memcontrol.c | 488 | ||||
| -rw-r--r-- | mm/memory-failure.c | 184 | ||||
| -rw-r--r-- | mm/memory.c | 35 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 79 | ||||
| -rw-r--r-- | mm/mempolicy.c | 20 | ||||
| -rw-r--r-- | mm/migrate.c | 249 | ||||
| -rw-r--r-- | mm/mmap.c | 2 | ||||
| -rw-r--r-- | mm/mprotect.c | 2 | ||||
| -rw-r--r-- | mm/mremap.c | 4 | ||||
| -rw-r--r-- | mm/nommu.c | 52 | ||||
| -rw-r--r-- | mm/oom_kill.c | 33 | ||||
| -rw-r--r-- | mm/page-writeback.c | 31 | ||||
| -rw-r--r-- | mm/page_alloc.c | 132 | ||||
| -rw-r--r-- | mm/page_isolation.c | 3 | ||||
| -rw-r--r-- | mm/pagewalk.c | 5 | ||||
| -rw-r--r-- | mm/rmap.c | 37 | ||||
| -rw-r--r-- | mm/shmem.c | 17 | ||||
| -rw-r--r-- | mm/slab.c | 2 | ||||
| -rw-r--r-- | mm/slub.c | 7 | ||||
| -rw-r--r-- | mm/swap.c | 1 | ||||
| -rw-r--r-- | mm/swapfile.c | 49 | ||||
| -rw-r--r-- | mm/vmalloc.c | 84 | ||||
| -rw-r--r-- | mm/vmscan.c | 218 | ||||
| -rw-r--r-- | mm/vmstat.c | 44 |
32 files changed, 1602 insertions, 643 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 65d420499a61..027100d30227 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
| @@ -74,11 +74,11 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
| 74 | 74 | ||
| 75 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; | 75 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; |
| 76 | spin_lock(&inode_lock); | 76 | spin_lock(&inode_lock); |
| 77 | list_for_each_entry(inode, &wb->b_dirty, i_list) | 77 | list_for_each_entry(inode, &wb->b_dirty, i_wb_list) |
| 78 | nr_dirty++; | 78 | nr_dirty++; |
| 79 | list_for_each_entry(inode, &wb->b_io, i_list) | 79 | list_for_each_entry(inode, &wb->b_io, i_wb_list) |
| 80 | nr_io++; | 80 | nr_io++; |
| 81 | list_for_each_entry(inode, &wb->b_more_io, i_list) | 81 | list_for_each_entry(inode, &wb->b_more_io, i_wb_list) |
| 82 | nr_more_io++; | 82 | nr_more_io++; |
| 83 | spin_unlock(&inode_lock); | 83 | spin_unlock(&inode_lock); |
| 84 | 84 | ||
| @@ -362,7 +362,7 @@ static int bdi_forker_thread(void *ptr) | |||
| 362 | { | 362 | { |
| 363 | struct bdi_writeback *me = ptr; | 363 | struct bdi_writeback *me = ptr; |
| 364 | 364 | ||
| 365 | current->flags |= PF_FLUSHER | PF_SWAPWRITE; | 365 | current->flags |= PF_SWAPWRITE; |
| 366 | set_freezable(); | 366 | set_freezable(); |
| 367 | 367 | ||
| 368 | /* | 368 | /* |
| @@ -729,6 +729,7 @@ static wait_queue_head_t congestion_wqh[2] = { | |||
| 729 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | 729 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), |
| 730 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | 730 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) |
| 731 | }; | 731 | }; |
| 732 | static atomic_t nr_bdi_congested[2]; | ||
| 732 | 733 | ||
| 733 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | 734 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
| 734 | { | 735 | { |
| @@ -736,7 +737,8 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
| 736 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | 737 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
| 737 | 738 | ||
| 738 | bit = sync ? BDI_sync_congested : BDI_async_congested; | 739 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
| 739 | clear_bit(bit, &bdi->state); | 740 | if (test_and_clear_bit(bit, &bdi->state)) |
| 741 | atomic_dec(&nr_bdi_congested[sync]); | ||
| 740 | smp_mb__after_clear_bit(); | 742 | smp_mb__after_clear_bit(); |
| 741 | if (waitqueue_active(wqh)) | 743 | if (waitqueue_active(wqh)) |
| 742 | wake_up(wqh); | 744 | wake_up(wqh); |
| @@ -748,7 +750,8 @@ void set_bdi_congested(struct backing_dev_info *bdi, int sync) | |||
| 748 | enum bdi_state bit; | 750 | enum bdi_state bit; |
| 749 | 751 | ||
| 750 | bit = sync ? BDI_sync_congested : BDI_async_congested; | 752 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
| 751 | set_bit(bit, &bdi->state); | 753 | if (!test_and_set_bit(bit, &bdi->state)) |
| 754 | atomic_inc(&nr_bdi_congested[sync]); | ||
| 752 | } | 755 | } |
| 753 | EXPORT_SYMBOL(set_bdi_congested); | 756 | EXPORT_SYMBOL(set_bdi_congested); |
| 754 | 757 | ||
| @@ -764,13 +767,72 @@ EXPORT_SYMBOL(set_bdi_congested); | |||
| 764 | long congestion_wait(int sync, long timeout) | 767 | long congestion_wait(int sync, long timeout) |
| 765 | { | 768 | { |
| 766 | long ret; | 769 | long ret; |
| 770 | unsigned long start = jiffies; | ||
| 767 | DEFINE_WAIT(wait); | 771 | DEFINE_WAIT(wait); |
| 768 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | 772 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
| 769 | 773 | ||
| 770 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | 774 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); |
| 771 | ret = io_schedule_timeout(timeout); | 775 | ret = io_schedule_timeout(timeout); |
| 772 | finish_wait(wqh, &wait); | 776 | finish_wait(wqh, &wait); |
| 777 | |||
| 778 | trace_writeback_congestion_wait(jiffies_to_usecs(timeout), | ||
| 779 | jiffies_to_usecs(jiffies - start)); | ||
| 780 | |||
| 773 | return ret; | 781 | return ret; |
| 774 | } | 782 | } |
| 775 | EXPORT_SYMBOL(congestion_wait); | 783 | EXPORT_SYMBOL(congestion_wait); |
| 776 | 784 | ||
| 785 | /** | ||
| 786 | * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes | ||
| 787 | * @zone: A zone to check if it is heavily congested | ||
| 788 | * @sync: SYNC or ASYNC IO | ||
| 789 | * @timeout: timeout in jiffies | ||
| 790 | * | ||
| 791 | * In the event of a congested backing_dev (any backing_dev) and the given | ||
| 792 | * @zone has experienced recent congestion, this waits for up to @timeout | ||
| 793 | * jiffies for either a BDI to exit congestion of the given @sync queue | ||
| 794 | * or a write to complete. | ||
| 795 | * | ||
| 796 | * In the absense of zone congestion, cond_resched() is called to yield | ||
| 797 | * the processor if necessary but otherwise does not sleep. | ||
| 798 | * | ||
| 799 | * The return value is 0 if the sleep is for the full timeout. Otherwise, | ||
| 800 | * it is the number of jiffies that were still remaining when the function | ||
| 801 | * returned. return_value == timeout implies the function did not sleep. | ||
| 802 | */ | ||
| 803 | long wait_iff_congested(struct zone *zone, int sync, long timeout) | ||
| 804 | { | ||
| 805 | long ret; | ||
| 806 | unsigned long start = jiffies; | ||
| 807 | DEFINE_WAIT(wait); | ||
| 808 | wait_queue_head_t *wqh = &congestion_wqh[sync]; | ||
| 809 | |||
| 810 | /* | ||
| 811 | * If there is no congestion, or heavy congestion is not being | ||
| 812 | * encountered in the current zone, yield if necessary instead | ||
| 813 | * of sleeping on the congestion queue | ||
| 814 | */ | ||
| 815 | if (atomic_read(&nr_bdi_congested[sync]) == 0 || | ||
| 816 | !zone_is_reclaim_congested(zone)) { | ||
| 817 | cond_resched(); | ||
| 818 | |||
| 819 | /* In case we scheduled, work out time remaining */ | ||
| 820 | ret = timeout - (jiffies - start); | ||
| 821 | if (ret < 0) | ||
| 822 | ret = 0; | ||
| 823 | |||
| 824 | goto out; | ||
| 825 | } | ||
| 826 | |||
| 827 | /* Sleep until uncongested or a write happens */ | ||
| 828 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | ||
| 829 | ret = io_schedule_timeout(timeout); | ||
| 830 | finish_wait(wqh, &wait); | ||
| 831 | |||
| 832 | out: | ||
| 833 | trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), | ||
| 834 | jiffies_to_usecs(jiffies - start)); | ||
| 835 | |||
| 836 | return ret; | ||
| 837 | } | ||
| 838 | EXPORT_SYMBOL(wait_iff_congested); | ||
diff --git a/mm/dmapool.c b/mm/dmapool.c index 3df063706f53..4df2de77e069 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c | |||
| @@ -311,6 +311,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, | |||
| 311 | size_t offset; | 311 | size_t offset; |
| 312 | void *retval; | 312 | void *retval; |
| 313 | 313 | ||
| 314 | might_sleep_if(mem_flags & __GFP_WAIT); | ||
| 315 | |||
| 314 | spin_lock_irqsave(&pool->lock, flags); | 316 | spin_lock_irqsave(&pool->lock, flags); |
| 315 | restart: | 317 | restart: |
| 316 | list_for_each_entry(page, &pool->page_list, page_list) { | 318 | list_for_each_entry(page, &pool->page_list, page_list) { |
diff --git a/mm/filemap.c b/mm/filemap.c index 3d4df44e4221..ea89840fc65f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -612,6 +612,19 @@ void __lock_page_nosync(struct page *page) | |||
| 612 | TASK_UNINTERRUPTIBLE); | 612 | TASK_UNINTERRUPTIBLE); |
| 613 | } | 613 | } |
| 614 | 614 | ||
| 615 | int __lock_page_or_retry(struct page *page, struct mm_struct *mm, | ||
| 616 | unsigned int flags) | ||
| 617 | { | ||
| 618 | if (!(flags & FAULT_FLAG_ALLOW_RETRY)) { | ||
| 619 | __lock_page(page); | ||
| 620 | return 1; | ||
| 621 | } else { | ||
| 622 | up_read(&mm->mmap_sem); | ||
| 623 | wait_on_page_locked(page); | ||
| 624 | return 0; | ||
| 625 | } | ||
| 626 | } | ||
| 627 | |||
| 615 | /** | 628 | /** |
| 616 | * find_get_page - find and get a page reference | 629 | * find_get_page - find and get a page reference |
| 617 | * @mapping: the address_space to search | 630 | * @mapping: the address_space to search |
| @@ -631,7 +644,9 @@ repeat: | |||
| 631 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); | 644 | pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); |
| 632 | if (pagep) { | 645 | if (pagep) { |
| 633 | page = radix_tree_deref_slot(pagep); | 646 | page = radix_tree_deref_slot(pagep); |
| 634 | if (unlikely(!page || page == RADIX_TREE_RETRY)) | 647 | if (unlikely(!page)) |
| 648 | goto out; | ||
| 649 | if (radix_tree_deref_retry(page)) | ||
| 635 | goto repeat; | 650 | goto repeat; |
| 636 | 651 | ||
| 637 | if (!page_cache_get_speculative(page)) | 652 | if (!page_cache_get_speculative(page)) |
| @@ -647,6 +662,7 @@ repeat: | |||
| 647 | goto repeat; | 662 | goto repeat; |
| 648 | } | 663 | } |
| 649 | } | 664 | } |
| 665 | out: | ||
| 650 | rcu_read_unlock(); | 666 | rcu_read_unlock(); |
| 651 | 667 | ||
| 652 | return page; | 668 | return page; |
| @@ -764,12 +780,11 @@ repeat: | |||
| 764 | page = radix_tree_deref_slot((void **)pages[i]); | 780 | page = radix_tree_deref_slot((void **)pages[i]); |
| 765 | if (unlikely(!page)) | 781 | if (unlikely(!page)) |
| 766 | continue; | 782 | continue; |
| 767 | /* | 783 | if (radix_tree_deref_retry(page)) { |
| 768 | * this can only trigger if nr_found == 1, making livelock | 784 | if (ret) |
| 769 | * a non issue. | 785 | start = pages[ret-1]->index; |
| 770 | */ | ||
| 771 | if (unlikely(page == RADIX_TREE_RETRY)) | ||
| 772 | goto restart; | 786 | goto restart; |
| 787 | } | ||
| 773 | 788 | ||
| 774 | if (!page_cache_get_speculative(page)) | 789 | if (!page_cache_get_speculative(page)) |
| 775 | goto repeat; | 790 | goto repeat; |
| @@ -817,11 +832,7 @@ repeat: | |||
| 817 | page = radix_tree_deref_slot((void **)pages[i]); | 832 | page = radix_tree_deref_slot((void **)pages[i]); |
| 818 | if (unlikely(!page)) | 833 | if (unlikely(!page)) |
| 819 | continue; | 834 | continue; |
| 820 | /* | 835 | if (radix_tree_deref_retry(page)) |
| 821 | * this can only trigger if nr_found == 1, making livelock | ||
| 822 | * a non issue. | ||
| 823 | */ | ||
| 824 | if (unlikely(page == RADIX_TREE_RETRY)) | ||
| 825 | goto restart; | 836 | goto restart; |
| 826 | 837 | ||
| 827 | if (page->mapping == NULL || page->index != index) | 838 | if (page->mapping == NULL || page->index != index) |
| @@ -874,11 +885,7 @@ repeat: | |||
| 874 | page = radix_tree_deref_slot((void **)pages[i]); | 885 | page = radix_tree_deref_slot((void **)pages[i]); |
| 875 | if (unlikely(!page)) | 886 | if (unlikely(!page)) |
| 876 | continue; | 887 | continue; |
| 877 | /* | 888 | if (radix_tree_deref_retry(page)) |
| 878 | * this can only trigger if nr_found == 1, making livelock | ||
| 879 | * a non issue. | ||
| 880 | */ | ||
| 881 | if (unlikely(page == RADIX_TREE_RETRY)) | ||
| 882 | goto restart; | 889 | goto restart; |
| 883 | 890 | ||
| 884 | if (!page_cache_get_speculative(page)) | 891 | if (!page_cache_get_speculative(page)) |
| @@ -1016,6 +1023,9 @@ find_page: | |||
| 1016 | goto page_not_up_to_date; | 1023 | goto page_not_up_to_date; |
| 1017 | if (!trylock_page(page)) | 1024 | if (!trylock_page(page)) |
| 1018 | goto page_not_up_to_date; | 1025 | goto page_not_up_to_date; |
| 1026 | /* Did it get truncated before we got the lock? */ | ||
| 1027 | if (!page->mapping) | ||
| 1028 | goto page_not_up_to_date_locked; | ||
| 1019 | if (!mapping->a_ops->is_partially_uptodate(page, | 1029 | if (!mapping->a_ops->is_partially_uptodate(page, |
| 1020 | desc, offset)) | 1030 | desc, offset)) |
| 1021 | goto page_not_up_to_date_locked; | 1031 | goto page_not_up_to_date_locked; |
| @@ -1539,25 +1549,30 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
| 1539 | * waiting for the lock. | 1549 | * waiting for the lock. |
| 1540 | */ | 1550 | */ |
| 1541 | do_async_mmap_readahead(vma, ra, file, page, offset); | 1551 | do_async_mmap_readahead(vma, ra, file, page, offset); |
| 1542 | lock_page(page); | ||
| 1543 | |||
| 1544 | /* Did it get truncated? */ | ||
| 1545 | if (unlikely(page->mapping != mapping)) { | ||
| 1546 | unlock_page(page); | ||
| 1547 | put_page(page); | ||
| 1548 | goto no_cached_page; | ||
| 1549 | } | ||
| 1550 | } else { | 1552 | } else { |
| 1551 | /* No page in the page cache at all */ | 1553 | /* No page in the page cache at all */ |
| 1552 | do_sync_mmap_readahead(vma, ra, file, offset); | 1554 | do_sync_mmap_readahead(vma, ra, file, offset); |
| 1553 | count_vm_event(PGMAJFAULT); | 1555 | count_vm_event(PGMAJFAULT); |
| 1554 | ret = VM_FAULT_MAJOR; | 1556 | ret = VM_FAULT_MAJOR; |
| 1555 | retry_find: | 1557 | retry_find: |
| 1556 | page = find_lock_page(mapping, offset); | 1558 | page = find_get_page(mapping, offset); |
| 1557 | if (!page) | 1559 | if (!page) |
| 1558 | goto no_cached_page; | 1560 | goto no_cached_page; |
| 1559 | } | 1561 | } |
| 1560 | 1562 | ||
| 1563 | if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { | ||
| 1564 | page_cache_release(page); | ||
| 1565 | return ret | VM_FAULT_RETRY; | ||
| 1566 | } | ||
| 1567 | |||
| 1568 | /* Did it get truncated? */ | ||
| 1569 | if (unlikely(page->mapping != mapping)) { | ||
| 1570 | unlock_page(page); | ||
| 1571 | put_page(page); | ||
| 1572 | goto retry_find; | ||
| 1573 | } | ||
| 1574 | VM_BUG_ON(page->index != offset); | ||
| 1575 | |||
| 1561 | /* | 1576 | /* |
| 1562 | * We have a locked page in the page cache, now we need to check | 1577 | * We have a locked page in the page cache, now we need to check |
| 1563 | * that it's up-to-date. If not, it is going to be due to an error. | 1578 | * that it's up-to-date. If not, it is going to be due to an error. |
| @@ -2177,12 +2192,12 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |||
| 2177 | } | 2192 | } |
| 2178 | 2193 | ||
| 2179 | if (written > 0) { | 2194 | if (written > 0) { |
| 2180 | loff_t end = pos + written; | 2195 | pos += written; |
| 2181 | if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { | 2196 | if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { |
| 2182 | i_size_write(inode, end); | 2197 | i_size_write(inode, pos); |
| 2183 | mark_inode_dirty(inode); | 2198 | mark_inode_dirty(inode); |
| 2184 | } | 2199 | } |
| 2185 | *ppos = end; | 2200 | *ppos = pos; |
| 2186 | } | 2201 | } |
| 2187 | out: | 2202 | out: |
| 2188 | return written; | 2203 | return written; |
diff --git a/mm/highmem.c b/mm/highmem.c index 7a0aa1be4993..693394daa2ed 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
| @@ -29,6 +29,11 @@ | |||
| 29 | #include <linux/kgdb.h> | 29 | #include <linux/kgdb.h> |
| 30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
| 31 | 31 | ||
| 32 | |||
| 33 | #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) | ||
| 34 | DEFINE_PER_CPU(int, __kmap_atomic_idx); | ||
| 35 | #endif | ||
| 36 | |||
| 32 | /* | 37 | /* |
| 33 | * Virtual_count is not a pure "count". | 38 | * Virtual_count is not a pure "count". |
| 34 | * 0 means that it is not mapped, and has not been mapped | 39 | * 0 means that it is not mapped, and has not been mapped |
| @@ -42,6 +47,9 @@ | |||
| 42 | unsigned long totalhigh_pages __read_mostly; | 47 | unsigned long totalhigh_pages __read_mostly; |
| 43 | EXPORT_SYMBOL(totalhigh_pages); | 48 | EXPORT_SYMBOL(totalhigh_pages); |
| 44 | 49 | ||
| 50 | |||
| 51 | EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); | ||
| 52 | |||
| 45 | unsigned int nr_free_highpages (void) | 53 | unsigned int nr_free_highpages (void) |
| 46 | { | 54 | { |
| 47 | pg_data_t *pgdat; | 55 | pg_data_t *pgdat; |
| @@ -422,61 +430,3 @@ void __init page_address_init(void) | |||
| 422 | } | 430 | } |
| 423 | 431 | ||
| 424 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ | 432 | #endif /* defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL) */ |
| 425 | |||
| 426 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
| 427 | |||
| 428 | void debug_kmap_atomic(enum km_type type) | ||
| 429 | { | ||
| 430 | static int warn_count = 10; | ||
| 431 | |||
| 432 | if (unlikely(warn_count < 0)) | ||
| 433 | return; | ||
| 434 | |||
| 435 | if (unlikely(in_interrupt())) { | ||
| 436 | if (in_nmi()) { | ||
| 437 | if (type != KM_NMI && type != KM_NMI_PTE) { | ||
| 438 | WARN_ON(1); | ||
| 439 | warn_count--; | ||
| 440 | } | ||
| 441 | } else if (in_irq()) { | ||
| 442 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
| 443 | type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ && | ||
| 444 | type != KM_BOUNCE_READ && type != KM_IRQ_PTE) { | ||
| 445 | WARN_ON(1); | ||
| 446 | warn_count--; | ||
| 447 | } | ||
| 448 | } else if (!irqs_disabled()) { /* softirq */ | ||
| 449 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
| 450 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | ||
| 451 | type != KM_SKB_SUNRPC_DATA && | ||
| 452 | type != KM_SKB_DATA_SOFTIRQ && | ||
| 453 | type != KM_BOUNCE_READ) { | ||
| 454 | WARN_ON(1); | ||
| 455 | warn_count--; | ||
| 456 | } | ||
| 457 | } | ||
| 458 | } | ||
| 459 | |||
| 460 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | ||
| 461 | type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ || | ||
| 462 | type == KM_IRQ_PTE || type == KM_NMI || | ||
| 463 | type == KM_NMI_PTE ) { | ||
| 464 | if (!irqs_disabled()) { | ||
| 465 | WARN_ON(1); | ||
| 466 | warn_count--; | ||
| 467 | } | ||
| 468 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | ||
| 469 | if (irq_count() == 0 && !irqs_disabled()) { | ||
| 470 | WARN_ON(1); | ||
| 471 | warn_count--; | ||
| 472 | } | ||
| 473 | } | ||
| 474 | #ifdef CONFIG_KGDB_KDB | ||
| 475 | if (unlikely(type == KM_KDB && atomic_read(&kgdb_active) == -1)) { | ||
| 476 | WARN_ON(1); | ||
| 477 | warn_count--; | ||
| 478 | } | ||
| 479 | #endif /* CONFIG_KGDB_KDB */ | ||
| 480 | } | ||
| 481 | |||
| 482 | #endif | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c03273807182..85855240933d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -423,14 +423,14 @@ static void clear_huge_page(struct page *page, | |||
| 423 | } | 423 | } |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | static void copy_gigantic_page(struct page *dst, struct page *src, | 426 | static void copy_user_gigantic_page(struct page *dst, struct page *src, |
| 427 | unsigned long addr, struct vm_area_struct *vma) | 427 | unsigned long addr, struct vm_area_struct *vma) |
| 428 | { | 428 | { |
| 429 | int i; | 429 | int i; |
| 430 | struct hstate *h = hstate_vma(vma); | 430 | struct hstate *h = hstate_vma(vma); |
| 431 | struct page *dst_base = dst; | 431 | struct page *dst_base = dst; |
| 432 | struct page *src_base = src; | 432 | struct page *src_base = src; |
| 433 | might_sleep(); | 433 | |
| 434 | for (i = 0; i < pages_per_huge_page(h); ) { | 434 | for (i = 0; i < pages_per_huge_page(h); ) { |
| 435 | cond_resched(); | 435 | cond_resched(); |
| 436 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); | 436 | copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); |
| @@ -440,14 +440,15 @@ static void copy_gigantic_page(struct page *dst, struct page *src, | |||
| 440 | src = mem_map_next(src, src_base, i); | 440 | src = mem_map_next(src, src_base, i); |
| 441 | } | 441 | } |
| 442 | } | 442 | } |
| 443 | static void copy_huge_page(struct page *dst, struct page *src, | 443 | |
| 444 | static void copy_user_huge_page(struct page *dst, struct page *src, | ||
| 444 | unsigned long addr, struct vm_area_struct *vma) | 445 | unsigned long addr, struct vm_area_struct *vma) |
| 445 | { | 446 | { |
| 446 | int i; | 447 | int i; |
| 447 | struct hstate *h = hstate_vma(vma); | 448 | struct hstate *h = hstate_vma(vma); |
| 448 | 449 | ||
| 449 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { | 450 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { |
| 450 | copy_gigantic_page(dst, src, addr, vma); | 451 | copy_user_gigantic_page(dst, src, addr, vma); |
| 451 | return; | 452 | return; |
| 452 | } | 453 | } |
| 453 | 454 | ||
| @@ -458,6 +459,40 @@ static void copy_huge_page(struct page *dst, struct page *src, | |||
| 458 | } | 459 | } |
| 459 | } | 460 | } |
| 460 | 461 | ||
| 462 | static void copy_gigantic_page(struct page *dst, struct page *src) | ||
| 463 | { | ||
| 464 | int i; | ||
| 465 | struct hstate *h = page_hstate(src); | ||
| 466 | struct page *dst_base = dst; | ||
| 467 | struct page *src_base = src; | ||
| 468 | |||
| 469 | for (i = 0; i < pages_per_huge_page(h); ) { | ||
| 470 | cond_resched(); | ||
| 471 | copy_highpage(dst, src); | ||
| 472 | |||
| 473 | i++; | ||
| 474 | dst = mem_map_next(dst, dst_base, i); | ||
| 475 | src = mem_map_next(src, src_base, i); | ||
| 476 | } | ||
| 477 | } | ||
| 478 | |||
| 479 | void copy_huge_page(struct page *dst, struct page *src) | ||
| 480 | { | ||
| 481 | int i; | ||
| 482 | struct hstate *h = page_hstate(src); | ||
| 483 | |||
| 484 | if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) { | ||
| 485 | copy_gigantic_page(dst, src); | ||
| 486 | return; | ||
| 487 | } | ||
| 488 | |||
| 489 | might_sleep(); | ||
| 490 | for (i = 0; i < pages_per_huge_page(h); i++) { | ||
| 491 | cond_resched(); | ||
| 492 | copy_highpage(dst + i, src + i); | ||
| 493 | } | ||
| 494 | } | ||
| 495 | |||
| 461 | static void enqueue_huge_page(struct hstate *h, struct page *page) | 496 | static void enqueue_huge_page(struct hstate *h, struct page *page) |
| 462 | { | 497 | { |
| 463 | int nid = page_to_nid(page); | 498 | int nid = page_to_nid(page); |
| @@ -466,11 +501,24 @@ static void enqueue_huge_page(struct hstate *h, struct page *page) | |||
| 466 | h->free_huge_pages_node[nid]++; | 501 | h->free_huge_pages_node[nid]++; |
| 467 | } | 502 | } |
| 468 | 503 | ||
| 504 | static struct page *dequeue_huge_page_node(struct hstate *h, int nid) | ||
| 505 | { | ||
| 506 | struct page *page; | ||
| 507 | |||
| 508 | if (list_empty(&h->hugepage_freelists[nid])) | ||
| 509 | return NULL; | ||
| 510 | page = list_entry(h->hugepage_freelists[nid].next, struct page, lru); | ||
| 511 | list_del(&page->lru); | ||
| 512 | set_page_refcounted(page); | ||
| 513 | h->free_huge_pages--; | ||
| 514 | h->free_huge_pages_node[nid]--; | ||
| 515 | return page; | ||
| 516 | } | ||
| 517 | |||
| 469 | static struct page *dequeue_huge_page_vma(struct hstate *h, | 518 | static struct page *dequeue_huge_page_vma(struct hstate *h, |
| 470 | struct vm_area_struct *vma, | 519 | struct vm_area_struct *vma, |
| 471 | unsigned long address, int avoid_reserve) | 520 | unsigned long address, int avoid_reserve) |
| 472 | { | 521 | { |
| 473 | int nid; | ||
| 474 | struct page *page = NULL; | 522 | struct page *page = NULL; |
| 475 | struct mempolicy *mpol; | 523 | struct mempolicy *mpol; |
| 476 | nodemask_t *nodemask; | 524 | nodemask_t *nodemask; |
| @@ -496,19 +544,13 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, | |||
| 496 | 544 | ||
| 497 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 545 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
| 498 | MAX_NR_ZONES - 1, nodemask) { | 546 | MAX_NR_ZONES - 1, nodemask) { |
| 499 | nid = zone_to_nid(zone); | 547 | if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) { |
| 500 | if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) && | 548 | page = dequeue_huge_page_node(h, zone_to_nid(zone)); |
| 501 | !list_empty(&h->hugepage_freelists[nid])) { | 549 | if (page) { |
| 502 | page = list_entry(h->hugepage_freelists[nid].next, | 550 | if (!avoid_reserve) |
| 503 | struct page, lru); | 551 | decrement_hugepage_resv_vma(h, vma); |
| 504 | list_del(&page->lru); | 552 | break; |
| 505 | h->free_huge_pages--; | 553 | } |
| 506 | h->free_huge_pages_node[nid]--; | ||
| 507 | |||
| 508 | if (!avoid_reserve) | ||
| 509 | decrement_hugepage_resv_vma(h, vma); | ||
| 510 | |||
| 511 | break; | ||
| 512 | } | 554 | } |
| 513 | } | 555 | } |
| 514 | err: | 556 | err: |
| @@ -770,11 +812,10 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, | |||
| 770 | return ret; | 812 | return ret; |
| 771 | } | 813 | } |
| 772 | 814 | ||
| 773 | static struct page *alloc_buddy_huge_page(struct hstate *h, | 815 | static struct page *alloc_buddy_huge_page(struct hstate *h, int nid) |
| 774 | struct vm_area_struct *vma, unsigned long address) | ||
| 775 | { | 816 | { |
| 776 | struct page *page; | 817 | struct page *page; |
| 777 | unsigned int nid; | 818 | unsigned int r_nid; |
| 778 | 819 | ||
| 779 | if (h->order >= MAX_ORDER) | 820 | if (h->order >= MAX_ORDER) |
| 780 | return NULL; | 821 | return NULL; |
| @@ -812,9 +853,14 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, | |||
| 812 | } | 853 | } |
| 813 | spin_unlock(&hugetlb_lock); | 854 | spin_unlock(&hugetlb_lock); |
| 814 | 855 | ||
| 815 | page = alloc_pages(htlb_alloc_mask|__GFP_COMP| | 856 | if (nid == NUMA_NO_NODE) |
| 816 | __GFP_REPEAT|__GFP_NOWARN, | 857 | page = alloc_pages(htlb_alloc_mask|__GFP_COMP| |
| 817 | huge_page_order(h)); | 858 | __GFP_REPEAT|__GFP_NOWARN, |
| 859 | huge_page_order(h)); | ||
| 860 | else | ||
| 861 | page = alloc_pages_exact_node(nid, | ||
| 862 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| | ||
| 863 | __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h)); | ||
| 818 | 864 | ||
| 819 | if (page && arch_prepare_hugepage(page)) { | 865 | if (page && arch_prepare_hugepage(page)) { |
| 820 | __free_pages(page, huge_page_order(h)); | 866 | __free_pages(page, huge_page_order(h)); |
| @@ -823,19 +869,13 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, | |||
| 823 | 869 | ||
| 824 | spin_lock(&hugetlb_lock); | 870 | spin_lock(&hugetlb_lock); |
| 825 | if (page) { | 871 | if (page) { |
| 826 | /* | 872 | r_nid = page_to_nid(page); |
| 827 | * This page is now managed by the hugetlb allocator and has | ||
| 828 | * no users -- drop the buddy allocator's reference. | ||
| 829 | */ | ||
| 830 | put_page_testzero(page); | ||
| 831 | VM_BUG_ON(page_count(page)); | ||
| 832 | nid = page_to_nid(page); | ||
| 833 | set_compound_page_dtor(page, free_huge_page); | 873 | set_compound_page_dtor(page, free_huge_page); |
| 834 | /* | 874 | /* |
| 835 | * We incremented the global counters already | 875 | * We incremented the global counters already |
| 836 | */ | 876 | */ |
| 837 | h->nr_huge_pages_node[nid]++; | 877 | h->nr_huge_pages_node[r_nid]++; |
| 838 | h->surplus_huge_pages_node[nid]++; | 878 | h->surplus_huge_pages_node[r_nid]++; |
| 839 | __count_vm_event(HTLB_BUDDY_PGALLOC); | 879 | __count_vm_event(HTLB_BUDDY_PGALLOC); |
| 840 | } else { | 880 | } else { |
| 841 | h->nr_huge_pages--; | 881 | h->nr_huge_pages--; |
| @@ -848,6 +888,25 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, | |||
| 848 | } | 888 | } |
| 849 | 889 | ||
| 850 | /* | 890 | /* |
| 891 | * This allocation function is useful in the context where vma is irrelevant. | ||
| 892 | * E.g. soft-offlining uses this function because it only cares physical | ||
| 893 | * address of error page. | ||
| 894 | */ | ||
| 895 | struct page *alloc_huge_page_node(struct hstate *h, int nid) | ||
| 896 | { | ||
| 897 | struct page *page; | ||
| 898 | |||
| 899 | spin_lock(&hugetlb_lock); | ||
| 900 | page = dequeue_huge_page_node(h, nid); | ||
| 901 | spin_unlock(&hugetlb_lock); | ||
| 902 | |||
| 903 | if (!page) | ||
| 904 | page = alloc_buddy_huge_page(h, nid); | ||
| 905 | |||
| 906 | return page; | ||
| 907 | } | ||
| 908 | |||
| 909 | /* | ||
| 851 | * Increase the hugetlb pool such that it can accomodate a reservation | 910 | * Increase the hugetlb pool such that it can accomodate a reservation |
| 852 | * of size 'delta'. | 911 | * of size 'delta'. |
| 853 | */ | 912 | */ |
| @@ -871,17 +930,14 @@ static int gather_surplus_pages(struct hstate *h, int delta) | |||
| 871 | retry: | 930 | retry: |
| 872 | spin_unlock(&hugetlb_lock); | 931 | spin_unlock(&hugetlb_lock); |
| 873 | for (i = 0; i < needed; i++) { | 932 | for (i = 0; i < needed; i++) { |
| 874 | page = alloc_buddy_huge_page(h, NULL, 0); | 933 | page = alloc_buddy_huge_page(h, NUMA_NO_NODE); |
| 875 | if (!page) { | 934 | if (!page) |
| 876 | /* | 935 | /* |
| 877 | * We were not able to allocate enough pages to | 936 | * We were not able to allocate enough pages to |
| 878 | * satisfy the entire reservation so we free what | 937 | * satisfy the entire reservation so we free what |
| 879 | * we've allocated so far. | 938 | * we've allocated so far. |
| 880 | */ | 939 | */ |
| 881 | spin_lock(&hugetlb_lock); | ||
| 882 | needed = 0; | ||
| 883 | goto free; | 940 | goto free; |
| 884 | } | ||
| 885 | 941 | ||
| 886 | list_add(&page->lru, &surplus_list); | 942 | list_add(&page->lru, &surplus_list); |
| 887 | } | 943 | } |
| @@ -908,31 +964,31 @@ retry: | |||
| 908 | needed += allocated; | 964 | needed += allocated; |
| 909 | h->resv_huge_pages += delta; | 965 | h->resv_huge_pages += delta; |
| 910 | ret = 0; | 966 | ret = 0; |
| 911 | free: | 967 | |
| 968 | spin_unlock(&hugetlb_lock); | ||
| 912 | /* Free the needed pages to the hugetlb pool */ | 969 | /* Free the needed pages to the hugetlb pool */ |
| 913 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { | 970 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { |
| 914 | if ((--needed) < 0) | 971 | if ((--needed) < 0) |
| 915 | break; | 972 | break; |
| 916 | list_del(&page->lru); | 973 | list_del(&page->lru); |
| 974 | /* | ||
| 975 | * This page is now managed by the hugetlb allocator and has | ||
| 976 | * no users -- drop the buddy allocator's reference. | ||
| 977 | */ | ||
| 978 | put_page_testzero(page); | ||
| 979 | VM_BUG_ON(page_count(page)); | ||
| 917 | enqueue_huge_page(h, page); | 980 | enqueue_huge_page(h, page); |
| 918 | } | 981 | } |
| 919 | 982 | ||
| 920 | /* Free unnecessary surplus pages to the buddy allocator */ | 983 | /* Free unnecessary surplus pages to the buddy allocator */ |
| 984 | free: | ||
| 921 | if (!list_empty(&surplus_list)) { | 985 | if (!list_empty(&surplus_list)) { |
| 922 | spin_unlock(&hugetlb_lock); | ||
| 923 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { | 986 | list_for_each_entry_safe(page, tmp, &surplus_list, lru) { |
| 924 | list_del(&page->lru); | 987 | list_del(&page->lru); |
| 925 | /* | 988 | put_page(page); |
| 926 | * The page has a reference count of zero already, so | ||
| 927 | * call free_huge_page directly instead of using | ||
| 928 | * put_page. This must be done with hugetlb_lock | ||
| 929 | * unlocked which is safe because free_huge_page takes | ||
| 930 | * hugetlb_lock before deciding how to free the page. | ||
| 931 | */ | ||
| 932 | free_huge_page(page); | ||
| 933 | } | 989 | } |
| 934 | spin_lock(&hugetlb_lock); | ||
| 935 | } | 990 | } |
| 991 | spin_lock(&hugetlb_lock); | ||
| 936 | 992 | ||
| 937 | return ret; | 993 | return ret; |
| 938 | } | 994 | } |
| @@ -1052,14 +1108,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, | |||
| 1052 | spin_unlock(&hugetlb_lock); | 1108 | spin_unlock(&hugetlb_lock); |
| 1053 | 1109 | ||
| 1054 | if (!page) { | 1110 | if (!page) { |
| 1055 | page = alloc_buddy_huge_page(h, vma, addr); | 1111 | page = alloc_buddy_huge_page(h, NUMA_NO_NODE); |
| 1056 | if (!page) { | 1112 | if (!page) { |
| 1057 | hugetlb_put_quota(inode->i_mapping, chg); | 1113 | hugetlb_put_quota(inode->i_mapping, chg); |
| 1058 | return ERR_PTR(-VM_FAULT_SIGBUS); | 1114 | return ERR_PTR(-VM_FAULT_SIGBUS); |
| 1059 | } | 1115 | } |
| 1060 | } | 1116 | } |
| 1061 | 1117 | ||
| 1062 | set_page_refcounted(page); | ||
| 1063 | set_page_private(page, (unsigned long) mapping); | 1118 | set_page_private(page, (unsigned long) mapping); |
| 1064 | 1119 | ||
| 1065 | vma_commit_reservation(h, vma, addr); | 1120 | vma_commit_reservation(h, vma, addr); |
| @@ -2153,6 +2208,19 @@ nomem: | |||
| 2153 | return -ENOMEM; | 2208 | return -ENOMEM; |
| 2154 | } | 2209 | } |
| 2155 | 2210 | ||
| 2211 | static int is_hugetlb_entry_migration(pte_t pte) | ||
| 2212 | { | ||
| 2213 | swp_entry_t swp; | ||
| 2214 | |||
| 2215 | if (huge_pte_none(pte) || pte_present(pte)) | ||
| 2216 | return 0; | ||
| 2217 | swp = pte_to_swp_entry(pte); | ||
| 2218 | if (non_swap_entry(swp) && is_migration_entry(swp)) { | ||
| 2219 | return 1; | ||
| 2220 | } else | ||
| 2221 | return 0; | ||
| 2222 | } | ||
| 2223 | |||
| 2156 | static int is_hugetlb_entry_hwpoisoned(pte_t pte) | 2224 | static int is_hugetlb_entry_hwpoisoned(pte_t pte) |
| 2157 | { | 2225 | { |
| 2158 | swp_entry_t swp; | 2226 | swp_entry_t swp; |
| @@ -2380,10 +2448,13 @@ retry_avoidcopy: | |||
| 2380 | * When the original hugepage is shared one, it does not have | 2448 | * When the original hugepage is shared one, it does not have |
| 2381 | * anon_vma prepared. | 2449 | * anon_vma prepared. |
| 2382 | */ | 2450 | */ |
| 2383 | if (unlikely(anon_vma_prepare(vma))) | 2451 | if (unlikely(anon_vma_prepare(vma))) { |
| 2452 | /* Caller expects lock to be held */ | ||
| 2453 | spin_lock(&mm->page_table_lock); | ||
| 2384 | return VM_FAULT_OOM; | 2454 | return VM_FAULT_OOM; |
| 2455 | } | ||
| 2385 | 2456 | ||
| 2386 | copy_huge_page(new_page, old_page, address, vma); | 2457 | copy_user_huge_page(new_page, old_page, address, vma); |
| 2387 | __SetPageUptodate(new_page); | 2458 | __SetPageUptodate(new_page); |
| 2388 | 2459 | ||
| 2389 | /* | 2460 | /* |
| @@ -2515,22 +2586,20 @@ retry: | |||
| 2515 | hugepage_add_new_anon_rmap(page, vma, address); | 2586 | hugepage_add_new_anon_rmap(page, vma, address); |
| 2516 | } | 2587 | } |
| 2517 | } else { | 2588 | } else { |
| 2589 | /* | ||
| 2590 | * If memory error occurs between mmap() and fault, some process | ||
| 2591 | * don't have hwpoisoned swap entry for errored virtual address. | ||
| 2592 | * So we need to block hugepage fault by PG_hwpoison bit check. | ||
| 2593 | */ | ||
| 2594 | if (unlikely(PageHWPoison(page))) { | ||
| 2595 | ret = VM_FAULT_HWPOISON | | ||
| 2596 | VM_FAULT_SET_HINDEX(h - hstates); | ||
| 2597 | goto backout_unlocked; | ||
| 2598 | } | ||
| 2518 | page_dup_rmap(page); | 2599 | page_dup_rmap(page); |
| 2519 | } | 2600 | } |
| 2520 | 2601 | ||
| 2521 | /* | 2602 | /* |
| 2522 | * Since memory error handler replaces pte into hwpoison swap entry | ||
| 2523 | * at the time of error handling, a process which reserved but not have | ||
| 2524 | * the mapping to the error hugepage does not have hwpoison swap entry. | ||
| 2525 | * So we need to block accesses from such a process by checking | ||
| 2526 | * PG_hwpoison bit here. | ||
| 2527 | */ | ||
| 2528 | if (unlikely(PageHWPoison(page))) { | ||
| 2529 | ret = VM_FAULT_HWPOISON; | ||
| 2530 | goto backout_unlocked; | ||
| 2531 | } | ||
| 2532 | |||
| 2533 | /* | ||
| 2534 | * If we are going to COW a private mapping later, we examine the | 2603 | * If we are going to COW a private mapping later, we examine the |
| 2535 | * pending reservations for this page now. This will ensure that | 2604 | * pending reservations for this page now. This will ensure that |
| 2536 | * any allocations necessary to record that reservation occur outside | 2605 | * any allocations necessary to record that reservation occur outside |
| @@ -2587,8 +2656,12 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2587 | ptep = huge_pte_offset(mm, address); | 2656 | ptep = huge_pte_offset(mm, address); |
| 2588 | if (ptep) { | 2657 | if (ptep) { |
| 2589 | entry = huge_ptep_get(ptep); | 2658 | entry = huge_ptep_get(ptep); |
| 2590 | if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) | 2659 | if (unlikely(is_hugetlb_entry_migration(entry))) { |
| 2591 | return VM_FAULT_HWPOISON; | 2660 | migration_entry_wait(mm, (pmd_t *)ptep, address); |
| 2661 | return 0; | ||
| 2662 | } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) | ||
| 2663 | return VM_FAULT_HWPOISON_LARGE | | ||
| 2664 | VM_FAULT_SET_HINDEX(h - hstates); | ||
| 2592 | } | 2665 | } |
| 2593 | 2666 | ||
| 2594 | ptep = huge_pte_alloc(mm, address, huge_page_size(h)); | 2667 | ptep = huge_pte_alloc(mm, address, huge_page_size(h)); |
| @@ -2665,7 +2738,8 @@ out_page_table_lock: | |||
| 2665 | unlock_page(pagecache_page); | 2738 | unlock_page(pagecache_page); |
| 2666 | put_page(pagecache_page); | 2739 | put_page(pagecache_page); |
| 2667 | } | 2740 | } |
| 2668 | unlock_page(page); | 2741 | if (page != pagecache_page) |
| 2742 | unlock_page(page); | ||
| 2669 | 2743 | ||
| 2670 | out_mutex: | 2744 | out_mutex: |
| 2671 | mutex_unlock(&hugetlb_instantiation_mutex); | 2745 | mutex_unlock(&hugetlb_instantiation_mutex); |
| @@ -2878,18 +2952,41 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) | |||
| 2878 | hugetlb_acct_memory(h, -(chg - freed)); | 2952 | hugetlb_acct_memory(h, -(chg - freed)); |
| 2879 | } | 2953 | } |
| 2880 | 2954 | ||
| 2955 | #ifdef CONFIG_MEMORY_FAILURE | ||
| 2956 | |||
| 2957 | /* Should be called in hugetlb_lock */ | ||
| 2958 | static int is_hugepage_on_freelist(struct page *hpage) | ||
| 2959 | { | ||
| 2960 | struct page *page; | ||
| 2961 | struct page *tmp; | ||
| 2962 | struct hstate *h = page_hstate(hpage); | ||
| 2963 | int nid = page_to_nid(hpage); | ||
| 2964 | |||
| 2965 | list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru) | ||
| 2966 | if (page == hpage) | ||
| 2967 | return 1; | ||
| 2968 | return 0; | ||
| 2969 | } | ||
| 2970 | |||
| 2881 | /* | 2971 | /* |
| 2882 | * This function is called from memory failure code. | 2972 | * This function is called from memory failure code. |
| 2883 | * Assume the caller holds page lock of the head page. | 2973 | * Assume the caller holds page lock of the head page. |
| 2884 | */ | 2974 | */ |
| 2885 | void __isolate_hwpoisoned_huge_page(struct page *hpage) | 2975 | int dequeue_hwpoisoned_huge_page(struct page *hpage) |
| 2886 | { | 2976 | { |
| 2887 | struct hstate *h = page_hstate(hpage); | 2977 | struct hstate *h = page_hstate(hpage); |
| 2888 | int nid = page_to_nid(hpage); | 2978 | int nid = page_to_nid(hpage); |
| 2979 | int ret = -EBUSY; | ||
| 2889 | 2980 | ||
| 2890 | spin_lock(&hugetlb_lock); | 2981 | spin_lock(&hugetlb_lock); |
| 2891 | list_del(&hpage->lru); | 2982 | if (is_hugepage_on_freelist(hpage)) { |
| 2892 | h->free_huge_pages--; | 2983 | list_del(&hpage->lru); |
| 2893 | h->free_huge_pages_node[nid]--; | 2984 | set_page_refcounted(hpage); |
| 2985 | h->free_huge_pages--; | ||
| 2986 | h->free_huge_pages_node[nid]--; | ||
| 2987 | ret = 0; | ||
| 2988 | } | ||
| 2894 | spin_unlock(&hugetlb_lock); | 2989 | spin_unlock(&hugetlb_lock); |
| 2990 | return ret; | ||
| 2895 | } | 2991 | } |
| 2992 | #endif | ||
diff --git a/mm/internal.h b/mm/internal.h index 6a697bb97fc5..dedb0aff673f 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
| @@ -62,7 +62,7 @@ extern bool is_free_buddy_page(struct page *page); | |||
| 62 | */ | 62 | */ |
| 63 | static inline unsigned long page_order(struct page *page) | 63 | static inline unsigned long page_order(struct page *page) |
| 64 | { | 64 | { |
| 65 | VM_BUG_ON(!PageBuddy(page)); | 65 | /* PageBuddy() must be checked by the caller */ |
| 66 | return page_private(page); | 66 | return page_private(page); |
| 67 | } | 67 | } |
| 68 | 68 | ||
| @@ -1724,8 +1724,13 @@ static int ksm_memory_callback(struct notifier_block *self, | |||
| 1724 | /* | 1724 | /* |
| 1725 | * Keep it very simple for now: just lock out ksmd and | 1725 | * Keep it very simple for now: just lock out ksmd and |
| 1726 | * MADV_UNMERGEABLE while any memory is going offline. | 1726 | * MADV_UNMERGEABLE while any memory is going offline. |
| 1727 | * mutex_lock_nested() is necessary because lockdep was alarmed | ||
| 1728 | * that here we take ksm_thread_mutex inside notifier chain | ||
| 1729 | * mutex, and later take notifier chain mutex inside | ||
| 1730 | * ksm_thread_mutex to unlock it. But that's safe because both | ||
| 1731 | * are inside mem_hotplug_mutex. | ||
| 1727 | */ | 1732 | */ |
| 1728 | mutex_lock(&ksm_thread_mutex); | 1733 | mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING); |
| 1729 | break; | 1734 | break; |
| 1730 | 1735 | ||
| 1731 | case MEM_OFFLINE: | 1736 | case MEM_OFFLINE: |
diff --git a/mm/maccess.c b/mm/maccess.c index 4e348dbaecd7..e2b6f5634e0d 100644 --- a/mm/maccess.c +++ b/mm/maccess.c | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Access kernel memory without faulting. | 2 | * Access kernel memory without faulting. |
| 3 | */ | 3 | */ |
| 4 | #include <linux/uaccess.h> | ||
| 5 | #include <linux/module.h> | 4 | #include <linux/module.h> |
| 6 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
| 6 | #include <linux/uaccess.h> | ||
| 7 | 7 | ||
| 8 | /** | 8 | /** |
| 9 | * probe_kernel_read(): safely attempt to read from a location | 9 | * probe_kernel_read(): safely attempt to read from a location |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9be3cf8a5da4..7a22b4129211 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -61,7 +61,14 @@ struct mem_cgroup *root_mem_cgroup __read_mostly; | |||
| 61 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 61 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
| 62 | /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ | 62 | /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ |
| 63 | int do_swap_account __read_mostly; | 63 | int do_swap_account __read_mostly; |
| 64 | static int really_do_swap_account __initdata = 1; /* for remember boot option*/ | 64 | |
| 65 | /* for remember boot option*/ | ||
| 66 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED | ||
| 67 | static int really_do_swap_account __initdata = 1; | ||
| 68 | #else | ||
| 69 | static int really_do_swap_account __initdata = 0; | ||
| 70 | #endif | ||
| 71 | |||
| 65 | #else | 72 | #else |
| 66 | #define do_swap_account (0) | 73 | #define do_swap_account (0) |
| 67 | #endif | 74 | #endif |
| @@ -89,7 +96,10 @@ enum mem_cgroup_stat_index { | |||
| 89 | MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ | 96 | MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ |
| 90 | MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ | 97 | MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ |
| 91 | MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ | 98 | MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ |
| 92 | MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */ | 99 | MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */ |
| 100 | /* incremented at every pagein/pageout */ | ||
| 101 | MEM_CGROUP_EVENTS = MEM_CGROUP_STAT_DATA, | ||
| 102 | MEM_CGROUP_ON_MOVE, /* someone is moving account between groups */ | ||
| 93 | 103 | ||
| 94 | MEM_CGROUP_STAT_NSTATS, | 104 | MEM_CGROUP_STAT_NSTATS, |
| 95 | }; | 105 | }; |
| @@ -254,6 +264,12 @@ struct mem_cgroup { | |||
| 254 | * percpu counter. | 264 | * percpu counter. |
| 255 | */ | 265 | */ |
| 256 | struct mem_cgroup_stat_cpu *stat; | 266 | struct mem_cgroup_stat_cpu *stat; |
| 267 | /* | ||
| 268 | * used when a cpu is offlined or other synchronizations | ||
| 269 | * See mem_cgroup_read_stat(). | ||
| 270 | */ | ||
| 271 | struct mem_cgroup_stat_cpu nocpu_base; | ||
| 272 | spinlock_t pcp_counter_lock; | ||
| 257 | }; | 273 | }; |
| 258 | 274 | ||
| 259 | /* Stuffs for move charges at task migration. */ | 275 | /* Stuffs for move charges at task migration. */ |
| @@ -269,13 +285,14 @@ enum move_type { | |||
| 269 | 285 | ||
| 270 | /* "mc" and its members are protected by cgroup_mutex */ | 286 | /* "mc" and its members are protected by cgroup_mutex */ |
| 271 | static struct move_charge_struct { | 287 | static struct move_charge_struct { |
| 272 | spinlock_t lock; /* for from, to, moving_task */ | 288 | spinlock_t lock; /* for from, to */ |
| 273 | struct mem_cgroup *from; | 289 | struct mem_cgroup *from; |
| 274 | struct mem_cgroup *to; | 290 | struct mem_cgroup *to; |
| 275 | unsigned long precharge; | 291 | unsigned long precharge; |
| 276 | unsigned long moved_charge; | 292 | unsigned long moved_charge; |
| 277 | unsigned long moved_swap; | 293 | unsigned long moved_swap; |
| 278 | struct task_struct *moving_task; /* a task moving charges */ | 294 | struct task_struct *moving_task; /* a task moving charges */ |
| 295 | struct mm_struct *mm; | ||
| 279 | wait_queue_head_t waitq; /* a waitq for other context */ | 296 | wait_queue_head_t waitq; /* a waitq for other context */ |
| 280 | } mc = { | 297 | } mc = { |
| 281 | .lock = __SPIN_LOCK_UNLOCKED(mc.lock), | 298 | .lock = __SPIN_LOCK_UNLOCKED(mc.lock), |
| @@ -530,14 +547,40 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) | |||
| 530 | return mz; | 547 | return mz; |
| 531 | } | 548 | } |
| 532 | 549 | ||
| 550 | /* | ||
| 551 | * Implementation Note: reading percpu statistics for memcg. | ||
| 552 | * | ||
| 553 | * Both of vmstat[] and percpu_counter has threshold and do periodic | ||
| 554 | * synchronization to implement "quick" read. There are trade-off between | ||
| 555 | * reading cost and precision of value. Then, we may have a chance to implement | ||
| 556 | * a periodic synchronizion of counter in memcg's counter. | ||
| 557 | * | ||
| 558 | * But this _read() function is used for user interface now. The user accounts | ||
| 559 | * memory usage by memory cgroup and he _always_ requires exact value because | ||
| 560 | * he accounts memory. Even if we provide quick-and-fuzzy read, we always | ||
| 561 | * have to visit all online cpus and make sum. So, for now, unnecessary | ||
| 562 | * synchronization is not implemented. (just implemented for cpu hotplug) | ||
| 563 | * | ||
| 564 | * If there are kernel internal actions which can make use of some not-exact | ||
| 565 | * value, and reading all cpu value can be performance bottleneck in some | ||
| 566 | * common workload, threashold and synchonization as vmstat[] should be | ||
| 567 | * implemented. | ||
| 568 | */ | ||
| 533 | static s64 mem_cgroup_read_stat(struct mem_cgroup *mem, | 569 | static s64 mem_cgroup_read_stat(struct mem_cgroup *mem, |
| 534 | enum mem_cgroup_stat_index idx) | 570 | enum mem_cgroup_stat_index idx) |
| 535 | { | 571 | { |
| 536 | int cpu; | 572 | int cpu; |
| 537 | s64 val = 0; | 573 | s64 val = 0; |
| 538 | 574 | ||
| 539 | for_each_possible_cpu(cpu) | 575 | get_online_cpus(); |
| 576 | for_each_online_cpu(cpu) | ||
| 540 | val += per_cpu(mem->stat->count[idx], cpu); | 577 | val += per_cpu(mem->stat->count[idx], cpu); |
| 578 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 579 | spin_lock(&mem->pcp_counter_lock); | ||
| 580 | val += mem->nocpu_base.count[idx]; | ||
| 581 | spin_unlock(&mem->pcp_counter_lock); | ||
| 582 | #endif | ||
| 583 | put_online_cpus(); | ||
| 541 | return val; | 584 | return val; |
| 542 | } | 585 | } |
| 543 | 586 | ||
| @@ -659,40 +702,83 @@ static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) | |||
| 659 | return mem; | 702 | return mem; |
| 660 | } | 703 | } |
| 661 | 704 | ||
| 662 | /* | 705 | /* The caller has to guarantee "mem" exists before calling this */ |
| 663 | * Call callback function against all cgroup under hierarchy tree. | 706 | static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem) |
| 664 | */ | ||
| 665 | static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data, | ||
| 666 | int (*func)(struct mem_cgroup *, void *)) | ||
| 667 | { | 707 | { |
| 668 | int found, ret, nextid; | ||
| 669 | struct cgroup_subsys_state *css; | 708 | struct cgroup_subsys_state *css; |
| 670 | struct mem_cgroup *mem; | 709 | int found; |
| 671 | |||
| 672 | if (!root->use_hierarchy) | ||
| 673 | return (*func)(root, data); | ||
| 674 | 710 | ||
| 675 | nextid = 1; | 711 | if (!mem) /* ROOT cgroup has the smallest ID */ |
| 676 | do { | 712 | return root_mem_cgroup; /*css_put/get against root is ignored*/ |
| 677 | ret = 0; | 713 | if (!mem->use_hierarchy) { |
| 714 | if (css_tryget(&mem->css)) | ||
| 715 | return mem; | ||
| 716 | return NULL; | ||
| 717 | } | ||
| 718 | rcu_read_lock(); | ||
| 719 | /* | ||
| 720 | * searching a memory cgroup which has the smallest ID under given | ||
| 721 | * ROOT cgroup. (ID >= 1) | ||
| 722 | */ | ||
| 723 | css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found); | ||
| 724 | if (css && css_tryget(css)) | ||
| 725 | mem = container_of(css, struct mem_cgroup, css); | ||
| 726 | else | ||
| 678 | mem = NULL; | 727 | mem = NULL; |
| 728 | rcu_read_unlock(); | ||
| 729 | return mem; | ||
| 730 | } | ||
| 731 | |||
| 732 | static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter, | ||
| 733 | struct mem_cgroup *root, | ||
| 734 | bool cond) | ||
| 735 | { | ||
| 736 | int nextid = css_id(&iter->css) + 1; | ||
| 737 | int found; | ||
| 738 | int hierarchy_used; | ||
| 739 | struct cgroup_subsys_state *css; | ||
| 740 | |||
| 741 | hierarchy_used = iter->use_hierarchy; | ||
| 742 | |||
| 743 | css_put(&iter->css); | ||
| 744 | /* If no ROOT, walk all, ignore hierarchy */ | ||
| 745 | if (!cond || (root && !hierarchy_used)) | ||
| 746 | return NULL; | ||
| 747 | |||
| 748 | if (!root) | ||
| 749 | root = root_mem_cgroup; | ||
| 679 | 750 | ||
| 751 | do { | ||
| 752 | iter = NULL; | ||
| 680 | rcu_read_lock(); | 753 | rcu_read_lock(); |
| 681 | css = css_get_next(&mem_cgroup_subsys, nextid, &root->css, | 754 | |
| 682 | &found); | 755 | css = css_get_next(&mem_cgroup_subsys, nextid, |
| 756 | &root->css, &found); | ||
| 683 | if (css && css_tryget(css)) | 757 | if (css && css_tryget(css)) |
| 684 | mem = container_of(css, struct mem_cgroup, css); | 758 | iter = container_of(css, struct mem_cgroup, css); |
| 685 | rcu_read_unlock(); | 759 | rcu_read_unlock(); |
| 686 | 760 | /* If css is NULL, no more cgroups will be found */ | |
| 687 | if (mem) { | ||
| 688 | ret = (*func)(mem, data); | ||
| 689 | css_put(&mem->css); | ||
| 690 | } | ||
| 691 | nextid = found + 1; | 761 | nextid = found + 1; |
| 692 | } while (!ret && css); | 762 | } while (css && !iter); |
| 693 | 763 | ||
| 694 | return ret; | 764 | return iter; |
| 695 | } | 765 | } |
| 766 | /* | ||
| 767 | * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please | ||
| 768 | * be careful that "break" loop is not allowed. We have reference count. | ||
| 769 | * Instead of that modify "cond" to be false and "continue" to exit the loop. | ||
| 770 | */ | ||
| 771 | #define for_each_mem_cgroup_tree_cond(iter, root, cond) \ | ||
| 772 | for (iter = mem_cgroup_start_loop(root);\ | ||
| 773 | iter != NULL;\ | ||
| 774 | iter = mem_cgroup_get_next(iter, root, cond)) | ||
| 775 | |||
| 776 | #define for_each_mem_cgroup_tree(iter, root) \ | ||
| 777 | for_each_mem_cgroup_tree_cond(iter, root, true) | ||
| 778 | |||
| 779 | #define for_each_mem_cgroup_all(iter) \ | ||
| 780 | for_each_mem_cgroup_tree_cond(iter, NULL, true) | ||
| 781 | |||
| 696 | 782 | ||
| 697 | static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) | 783 | static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) |
| 698 | { | 784 | { |
| @@ -1051,7 +1137,52 @@ static unsigned int get_swappiness(struct mem_cgroup *memcg) | |||
| 1051 | return swappiness; | 1137 | return swappiness; |
| 1052 | } | 1138 | } |
| 1053 | 1139 | ||
| 1054 | /* A routine for testing mem is not under move_account */ | 1140 | static void mem_cgroup_start_move(struct mem_cgroup *mem) |
| 1141 | { | ||
| 1142 | int cpu; | ||
| 1143 | |||
| 1144 | get_online_cpus(); | ||
| 1145 | spin_lock(&mem->pcp_counter_lock); | ||
| 1146 | for_each_online_cpu(cpu) | ||
| 1147 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1; | ||
| 1148 | mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1; | ||
| 1149 | spin_unlock(&mem->pcp_counter_lock); | ||
| 1150 | put_online_cpus(); | ||
| 1151 | |||
| 1152 | synchronize_rcu(); | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | static void mem_cgroup_end_move(struct mem_cgroup *mem) | ||
| 1156 | { | ||
| 1157 | int cpu; | ||
| 1158 | |||
| 1159 | if (!mem) | ||
| 1160 | return; | ||
| 1161 | get_online_cpus(); | ||
| 1162 | spin_lock(&mem->pcp_counter_lock); | ||
| 1163 | for_each_online_cpu(cpu) | ||
| 1164 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1; | ||
| 1165 | mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1; | ||
| 1166 | spin_unlock(&mem->pcp_counter_lock); | ||
| 1167 | put_online_cpus(); | ||
| 1168 | } | ||
| 1169 | /* | ||
| 1170 | * 2 routines for checking "mem" is under move_account() or not. | ||
| 1171 | * | ||
| 1172 | * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used | ||
| 1173 | * for avoiding race in accounting. If true, | ||
| 1174 | * pc->mem_cgroup may be overwritten. | ||
| 1175 | * | ||
| 1176 | * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or | ||
| 1177 | * under hierarchy of moving cgroups. This is for | ||
| 1178 | * waiting at hith-memory prressure caused by "move". | ||
| 1179 | */ | ||
| 1180 | |||
| 1181 | static bool mem_cgroup_stealed(struct mem_cgroup *mem) | ||
| 1182 | { | ||
| 1183 | VM_BUG_ON(!rcu_read_lock_held()); | ||
| 1184 | return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0; | ||
| 1185 | } | ||
| 1055 | 1186 | ||
| 1056 | static bool mem_cgroup_under_move(struct mem_cgroup *mem) | 1187 | static bool mem_cgroup_under_move(struct mem_cgroup *mem) |
| 1057 | { | 1188 | { |
| @@ -1092,13 +1223,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem) | |||
| 1092 | return false; | 1223 | return false; |
| 1093 | } | 1224 | } |
| 1094 | 1225 | ||
| 1095 | static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data) | ||
| 1096 | { | ||
| 1097 | int *val = data; | ||
| 1098 | (*val)++; | ||
| 1099 | return 0; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | /** | 1226 | /** |
| 1103 | * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. | 1227 | * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode. |
| 1104 | * @memcg: The memory cgroup that went over limit | 1228 | * @memcg: The memory cgroup that went over limit |
| @@ -1173,7 +1297,10 @@ done: | |||
| 1173 | static int mem_cgroup_count_children(struct mem_cgroup *mem) | 1297 | static int mem_cgroup_count_children(struct mem_cgroup *mem) |
| 1174 | { | 1298 | { |
| 1175 | int num = 0; | 1299 | int num = 0; |
| 1176 | mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb); | 1300 | struct mem_cgroup *iter; |
| 1301 | |||
| 1302 | for_each_mem_cgroup_tree(iter, mem) | ||
| 1303 | num++; | ||
| 1177 | return num; | 1304 | return num; |
| 1178 | } | 1305 | } |
| 1179 | 1306 | ||
| @@ -1322,49 +1449,39 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
| 1322 | return total; | 1449 | return total; |
| 1323 | } | 1450 | } |
| 1324 | 1451 | ||
| 1325 | static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data) | ||
| 1326 | { | ||
| 1327 | int *val = (int *)data; | ||
| 1328 | int x; | ||
| 1329 | /* | ||
| 1330 | * Logically, we can stop scanning immediately when we find | ||
| 1331 | * a memcg is already locked. But condidering unlock ops and | ||
| 1332 | * creation/removal of memcg, scan-all is simple operation. | ||
| 1333 | */ | ||
| 1334 | x = atomic_inc_return(&mem->oom_lock); | ||
| 1335 | *val = max(x, *val); | ||
| 1336 | return 0; | ||
| 1337 | } | ||
| 1338 | /* | 1452 | /* |
| 1339 | * Check OOM-Killer is already running under our hierarchy. | 1453 | * Check OOM-Killer is already running under our hierarchy. |
| 1340 | * If someone is running, return false. | 1454 | * If someone is running, return false. |
| 1341 | */ | 1455 | */ |
| 1342 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) | 1456 | static bool mem_cgroup_oom_lock(struct mem_cgroup *mem) |
| 1343 | { | 1457 | { |
| 1344 | int lock_count = 0; | 1458 | int x, lock_count = 0; |
| 1459 | struct mem_cgroup *iter; | ||
| 1345 | 1460 | ||
| 1346 | mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb); | 1461 | for_each_mem_cgroup_tree(iter, mem) { |
| 1462 | x = atomic_inc_return(&iter->oom_lock); | ||
| 1463 | lock_count = max(x, lock_count); | ||
| 1464 | } | ||
| 1347 | 1465 | ||
| 1348 | if (lock_count == 1) | 1466 | if (lock_count == 1) |
| 1349 | return true; | 1467 | return true; |
| 1350 | return false; | 1468 | return false; |
| 1351 | } | 1469 | } |
| 1352 | 1470 | ||
| 1353 | static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data) | 1471 | static int mem_cgroup_oom_unlock(struct mem_cgroup *mem) |
| 1354 | { | 1472 | { |
| 1473 | struct mem_cgroup *iter; | ||
| 1474 | |||
| 1355 | /* | 1475 | /* |
| 1356 | * When a new child is created while the hierarchy is under oom, | 1476 | * When a new child is created while the hierarchy is under oom, |
| 1357 | * mem_cgroup_oom_lock() may not be called. We have to use | 1477 | * mem_cgroup_oom_lock() may not be called. We have to use |
| 1358 | * atomic_add_unless() here. | 1478 | * atomic_add_unless() here. |
| 1359 | */ | 1479 | */ |
| 1360 | atomic_add_unless(&mem->oom_lock, -1, 0); | 1480 | for_each_mem_cgroup_tree(iter, mem) |
| 1481 | atomic_add_unless(&iter->oom_lock, -1, 0); | ||
| 1361 | return 0; | 1482 | return 0; |
| 1362 | } | 1483 | } |
| 1363 | 1484 | ||
| 1364 | static void mem_cgroup_oom_unlock(struct mem_cgroup *mem) | ||
| 1365 | { | ||
| 1366 | mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb); | ||
| 1367 | } | ||
| 1368 | 1485 | ||
| 1369 | static DEFINE_MUTEX(memcg_oom_mutex); | 1486 | static DEFINE_MUTEX(memcg_oom_mutex); |
| 1370 | static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); | 1487 | static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq); |
| @@ -1462,34 +1579,73 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) | |||
| 1462 | /* | 1579 | /* |
| 1463 | * Currently used to update mapped file statistics, but the routine can be | 1580 | * Currently used to update mapped file statistics, but the routine can be |
| 1464 | * generalized to update other statistics as well. | 1581 | * generalized to update other statistics as well. |
| 1582 | * | ||
| 1583 | * Notes: Race condition | ||
| 1584 | * | ||
| 1585 | * We usually use page_cgroup_lock() for accessing page_cgroup member but | ||
| 1586 | * it tends to be costly. But considering some conditions, we doesn't need | ||
| 1587 | * to do so _always_. | ||
| 1588 | * | ||
| 1589 | * Considering "charge", lock_page_cgroup() is not required because all | ||
| 1590 | * file-stat operations happen after a page is attached to radix-tree. There | ||
| 1591 | * are no race with "charge". | ||
| 1592 | * | ||
| 1593 | * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup | ||
| 1594 | * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even | ||
| 1595 | * if there are race with "uncharge". Statistics itself is properly handled | ||
| 1596 | * by flags. | ||
| 1597 | * | ||
| 1598 | * Considering "move", this is an only case we see a race. To make the race | ||
| 1599 | * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are | ||
| 1600 | * possibility of race condition. If there is, we take a lock. | ||
| 1465 | */ | 1601 | */ |
| 1466 | void mem_cgroup_update_file_mapped(struct page *page, int val) | 1602 | |
| 1603 | static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) | ||
| 1467 | { | 1604 | { |
| 1468 | struct mem_cgroup *mem; | 1605 | struct mem_cgroup *mem; |
| 1469 | struct page_cgroup *pc; | 1606 | struct page_cgroup *pc = lookup_page_cgroup(page); |
| 1607 | bool need_unlock = false; | ||
| 1470 | 1608 | ||
| 1471 | pc = lookup_page_cgroup(page); | ||
| 1472 | if (unlikely(!pc)) | 1609 | if (unlikely(!pc)) |
| 1473 | return; | 1610 | return; |
| 1474 | 1611 | ||
| 1475 | lock_page_cgroup(pc); | 1612 | rcu_read_lock(); |
| 1476 | mem = pc->mem_cgroup; | 1613 | mem = pc->mem_cgroup; |
| 1477 | if (!mem || !PageCgroupUsed(pc)) | 1614 | if (unlikely(!mem || !PageCgroupUsed(pc))) |
| 1478 | goto done; | 1615 | goto out; |
| 1616 | /* pc->mem_cgroup is unstable ? */ | ||
| 1617 | if (unlikely(mem_cgroup_stealed(mem))) { | ||
| 1618 | /* take a lock against to access pc->mem_cgroup */ | ||
| 1619 | lock_page_cgroup(pc); | ||
| 1620 | need_unlock = true; | ||
| 1621 | mem = pc->mem_cgroup; | ||
| 1622 | if (!mem || !PageCgroupUsed(pc)) | ||
| 1623 | goto out; | ||
| 1624 | } | ||
| 1479 | 1625 | ||
| 1480 | /* | 1626 | this_cpu_add(mem->stat->count[idx], val); |
| 1481 | * Preemption is already disabled. We can use __this_cpu_xxx | 1627 | |
| 1482 | */ | 1628 | switch (idx) { |
| 1483 | if (val > 0) { | 1629 | case MEM_CGROUP_STAT_FILE_MAPPED: |
| 1484 | __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); | 1630 | if (val > 0) |
| 1485 | SetPageCgroupFileMapped(pc); | 1631 | SetPageCgroupFileMapped(pc); |
| 1486 | } else { | 1632 | else if (!page_mapped(page)) |
| 1487 | __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); | 1633 | ClearPageCgroupFileMapped(pc); |
| 1488 | ClearPageCgroupFileMapped(pc); | 1634 | break; |
| 1635 | default: | ||
| 1636 | BUG(); | ||
| 1489 | } | 1637 | } |
| 1490 | 1638 | ||
| 1491 | done: | 1639 | out: |
| 1492 | unlock_page_cgroup(pc); | 1640 | if (unlikely(need_unlock)) |
| 1641 | unlock_page_cgroup(pc); | ||
| 1642 | rcu_read_unlock(); | ||
| 1643 | return; | ||
| 1644 | } | ||
| 1645 | |||
| 1646 | void mem_cgroup_update_file_mapped(struct page *page, int val) | ||
| 1647 | { | ||
| 1648 | mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val); | ||
| 1493 | } | 1649 | } |
| 1494 | 1650 | ||
| 1495 | /* | 1651 | /* |
| @@ -1605,15 +1761,55 @@ static void drain_all_stock_sync(void) | |||
| 1605 | atomic_dec(&memcg_drain_count); | 1761 | atomic_dec(&memcg_drain_count); |
| 1606 | } | 1762 | } |
| 1607 | 1763 | ||
| 1608 | static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb, | 1764 | /* |
| 1765 | * This function drains percpu counter value from DEAD cpu and | ||
| 1766 | * move it to local cpu. Note that this function can be preempted. | ||
| 1767 | */ | ||
| 1768 | static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu) | ||
| 1769 | { | ||
| 1770 | int i; | ||
| 1771 | |||
| 1772 | spin_lock(&mem->pcp_counter_lock); | ||
| 1773 | for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { | ||
| 1774 | s64 x = per_cpu(mem->stat->count[i], cpu); | ||
| 1775 | |||
| 1776 | per_cpu(mem->stat->count[i], cpu) = 0; | ||
| 1777 | mem->nocpu_base.count[i] += x; | ||
| 1778 | } | ||
| 1779 | /* need to clear ON_MOVE value, works as a kind of lock. */ | ||
| 1780 | per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0; | ||
| 1781 | spin_unlock(&mem->pcp_counter_lock); | ||
| 1782 | } | ||
| 1783 | |||
| 1784 | static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu) | ||
| 1785 | { | ||
| 1786 | int idx = MEM_CGROUP_ON_MOVE; | ||
| 1787 | |||
| 1788 | spin_lock(&mem->pcp_counter_lock); | ||
| 1789 | per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx]; | ||
| 1790 | spin_unlock(&mem->pcp_counter_lock); | ||
| 1791 | } | ||
| 1792 | |||
| 1793 | static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, | ||
| 1609 | unsigned long action, | 1794 | unsigned long action, |
| 1610 | void *hcpu) | 1795 | void *hcpu) |
| 1611 | { | 1796 | { |
| 1612 | int cpu = (unsigned long)hcpu; | 1797 | int cpu = (unsigned long)hcpu; |
| 1613 | struct memcg_stock_pcp *stock; | 1798 | struct memcg_stock_pcp *stock; |
| 1799 | struct mem_cgroup *iter; | ||
| 1614 | 1800 | ||
| 1615 | if (action != CPU_DEAD) | 1801 | if ((action == CPU_ONLINE)) { |
| 1802 | for_each_mem_cgroup_all(iter) | ||
| 1803 | synchronize_mem_cgroup_on_move(iter, cpu); | ||
| 1616 | return NOTIFY_OK; | 1804 | return NOTIFY_OK; |
| 1805 | } | ||
| 1806 | |||
| 1807 | if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) | ||
| 1808 | return NOTIFY_OK; | ||
| 1809 | |||
| 1810 | for_each_mem_cgroup_all(iter) | ||
| 1811 | mem_cgroup_drain_pcp_counter(iter, cpu); | ||
| 1812 | |||
| 1617 | stock = &per_cpu(memcg_stock, cpu); | 1813 | stock = &per_cpu(memcg_stock, cpu); |
| 1618 | drain_stock(stock); | 1814 | drain_stock(stock); |
| 1619 | return NOTIFY_OK; | 1815 | return NOTIFY_OK; |
| @@ -1964,7 +2160,7 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc, | |||
| 1964 | { | 2160 | { |
| 1965 | VM_BUG_ON(from == to); | 2161 | VM_BUG_ON(from == to); |
| 1966 | VM_BUG_ON(PageLRU(pc->page)); | 2162 | VM_BUG_ON(PageLRU(pc->page)); |
| 1967 | VM_BUG_ON(!PageCgroupLocked(pc)); | 2163 | VM_BUG_ON(!page_is_cgroup_locked(pc)); |
| 1968 | VM_BUG_ON(!PageCgroupUsed(pc)); | 2164 | VM_BUG_ON(!PageCgroupUsed(pc)); |
| 1969 | VM_BUG_ON(pc->mem_cgroup != from); | 2165 | VM_BUG_ON(pc->mem_cgroup != from); |
| 1970 | 2166 | ||
| @@ -3038,6 +3234,7 @@ move_account: | |||
| 3038 | lru_add_drain_all(); | 3234 | lru_add_drain_all(); |
| 3039 | drain_all_stock_sync(); | 3235 | drain_all_stock_sync(); |
| 3040 | ret = 0; | 3236 | ret = 0; |
| 3237 | mem_cgroup_start_move(mem); | ||
| 3041 | for_each_node_state(node, N_HIGH_MEMORY) { | 3238 | for_each_node_state(node, N_HIGH_MEMORY) { |
| 3042 | for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { | 3239 | for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) { |
| 3043 | enum lru_list l; | 3240 | enum lru_list l; |
| @@ -3051,6 +3248,7 @@ move_account: | |||
| 3051 | if (ret) | 3248 | if (ret) |
| 3052 | break; | 3249 | break; |
| 3053 | } | 3250 | } |
| 3251 | mem_cgroup_end_move(mem); | ||
| 3054 | memcg_oom_recover(mem); | 3252 | memcg_oom_recover(mem); |
| 3055 | /* it seems parent cgroup doesn't have enough mem */ | 3253 | /* it seems parent cgroup doesn't have enough mem */ |
| 3056 | if (ret == -ENOMEM) | 3254 | if (ret == -ENOMEM) |
| @@ -3137,33 +3335,25 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft, | |||
| 3137 | return retval; | 3335 | return retval; |
| 3138 | } | 3336 | } |
| 3139 | 3337 | ||
| 3140 | struct mem_cgroup_idx_data { | ||
| 3141 | s64 val; | ||
| 3142 | enum mem_cgroup_stat_index idx; | ||
| 3143 | }; | ||
| 3144 | 3338 | ||
| 3145 | static int | 3339 | static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem, |
| 3146 | mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data) | 3340 | enum mem_cgroup_stat_index idx) |
| 3147 | { | 3341 | { |
| 3148 | struct mem_cgroup_idx_data *d = data; | 3342 | struct mem_cgroup *iter; |
| 3149 | d->val += mem_cgroup_read_stat(mem, d->idx); | 3343 | s64 val = 0; |
| 3150 | return 0; | ||
| 3151 | } | ||
| 3152 | 3344 | ||
| 3153 | static void | 3345 | /* each per cpu's value can be minus.Then, use s64 */ |
| 3154 | mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem, | 3346 | for_each_mem_cgroup_tree(iter, mem) |
| 3155 | enum mem_cgroup_stat_index idx, s64 *val) | 3347 | val += mem_cgroup_read_stat(iter, idx); |
| 3156 | { | 3348 | |
| 3157 | struct mem_cgroup_idx_data d; | 3349 | if (val < 0) /* race ? */ |
| 3158 | d.idx = idx; | 3350 | val = 0; |
| 3159 | d.val = 0; | 3351 | return val; |
| 3160 | mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat); | ||
| 3161 | *val = d.val; | ||
| 3162 | } | 3352 | } |
| 3163 | 3353 | ||
| 3164 | static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) | 3354 | static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) |
| 3165 | { | 3355 | { |
| 3166 | u64 idx_val, val; | 3356 | u64 val; |
| 3167 | 3357 | ||
| 3168 | if (!mem_cgroup_is_root(mem)) { | 3358 | if (!mem_cgroup_is_root(mem)) { |
| 3169 | if (!swap) | 3359 | if (!swap) |
| @@ -3172,16 +3362,12 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap) | |||
| 3172 | return res_counter_read_u64(&mem->memsw, RES_USAGE); | 3362 | return res_counter_read_u64(&mem->memsw, RES_USAGE); |
| 3173 | } | 3363 | } |
| 3174 | 3364 | ||
| 3175 | mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val); | 3365 | val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE); |
| 3176 | val = idx_val; | 3366 | val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS); |
| 3177 | mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val); | ||
| 3178 | val += idx_val; | ||
| 3179 | 3367 | ||
| 3180 | if (swap) { | 3368 | if (swap) |
| 3181 | mem_cgroup_get_recursive_idx_stat(mem, | 3369 | val += mem_cgroup_get_recursive_idx_stat(mem, |
| 3182 | MEM_CGROUP_STAT_SWAPOUT, &idx_val); | 3370 | MEM_CGROUP_STAT_SWAPOUT); |
| 3183 | val += idx_val; | ||
| 3184 | } | ||
| 3185 | 3371 | ||
| 3186 | return val << PAGE_SHIFT; | 3372 | return val << PAGE_SHIFT; |
| 3187 | } | 3373 | } |
| @@ -3389,9 +3575,9 @@ struct { | |||
| 3389 | }; | 3575 | }; |
| 3390 | 3576 | ||
| 3391 | 3577 | ||
| 3392 | static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data) | 3578 | static void |
| 3579 | mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | ||
| 3393 | { | 3580 | { |
| 3394 | struct mcs_total_stat *s = data; | ||
| 3395 | s64 val; | 3581 | s64 val; |
| 3396 | 3582 | ||
| 3397 | /* per cpu stat */ | 3583 | /* per cpu stat */ |
| @@ -3421,13 +3607,15 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data) | |||
| 3421 | s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; | 3607 | s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE; |
| 3422 | val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE); | 3608 | val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE); |
| 3423 | s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; | 3609 | s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE; |
| 3424 | return 0; | ||
| 3425 | } | 3610 | } |
| 3426 | 3611 | ||
| 3427 | static void | 3612 | static void |
| 3428 | mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | 3613 | mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) |
| 3429 | { | 3614 | { |
| 3430 | mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat); | 3615 | struct mem_cgroup *iter; |
| 3616 | |||
| 3617 | for_each_mem_cgroup_tree(iter, mem) | ||
| 3618 | mem_cgroup_get_local_stat(iter, s); | ||
| 3431 | } | 3619 | } |
| 3432 | 3620 | ||
| 3433 | static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, | 3621 | static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, |
| @@ -3604,7 +3792,7 @@ static int compare_thresholds(const void *a, const void *b) | |||
| 3604 | return _a->threshold - _b->threshold; | 3792 | return _a->threshold - _b->threshold; |
| 3605 | } | 3793 | } |
| 3606 | 3794 | ||
| 3607 | static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data) | 3795 | static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) |
| 3608 | { | 3796 | { |
| 3609 | struct mem_cgroup_eventfd_list *ev; | 3797 | struct mem_cgroup_eventfd_list *ev; |
| 3610 | 3798 | ||
| @@ -3615,7 +3803,10 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data) | |||
| 3615 | 3803 | ||
| 3616 | static void mem_cgroup_oom_notify(struct mem_cgroup *mem) | 3804 | static void mem_cgroup_oom_notify(struct mem_cgroup *mem) |
| 3617 | { | 3805 | { |
| 3618 | mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb); | 3806 | struct mem_cgroup *iter; |
| 3807 | |||
| 3808 | for_each_mem_cgroup_tree(iter, mem) | ||
| 3809 | mem_cgroup_oom_notify_cb(iter); | ||
| 3619 | } | 3810 | } |
| 3620 | 3811 | ||
| 3621 | static int mem_cgroup_usage_register_event(struct cgroup *cgrp, | 3812 | static int mem_cgroup_usage_register_event(struct cgroup *cgrp, |
| @@ -4025,14 +4216,17 @@ static struct mem_cgroup *mem_cgroup_alloc(void) | |||
| 4025 | 4216 | ||
| 4026 | memset(mem, 0, size); | 4217 | memset(mem, 0, size); |
| 4027 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); | 4218 | mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu); |
| 4028 | if (!mem->stat) { | 4219 | if (!mem->stat) |
| 4029 | if (size < PAGE_SIZE) | 4220 | goto out_free; |
| 4030 | kfree(mem); | 4221 | spin_lock_init(&mem->pcp_counter_lock); |
| 4031 | else | ||
| 4032 | vfree(mem); | ||
| 4033 | mem = NULL; | ||
| 4034 | } | ||
| 4035 | return mem; | 4222 | return mem; |
| 4223 | |||
| 4224 | out_free: | ||
| 4225 | if (size < PAGE_SIZE) | ||
| 4226 | kfree(mem); | ||
| 4227 | else | ||
| 4228 | vfree(mem); | ||
| 4229 | return NULL; | ||
| 4036 | } | 4230 | } |
| 4037 | 4231 | ||
| 4038 | /* | 4232 | /* |
| @@ -4158,7 +4352,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
| 4158 | &per_cpu(memcg_stock, cpu); | 4352 | &per_cpu(memcg_stock, cpu); |
| 4159 | INIT_WORK(&stock->work, drain_local_stock); | 4353 | INIT_WORK(&stock->work, drain_local_stock); |
| 4160 | } | 4354 | } |
| 4161 | hotcpu_notifier(memcg_stock_cpu_callback, 0); | 4355 | hotcpu_notifier(memcg_cpu_hotplug_callback, 0); |
| 4162 | } else { | 4356 | } else { |
| 4163 | parent = mem_cgroup_from_cont(cont->parent); | 4357 | parent = mem_cgroup_from_cont(cont->parent); |
| 4164 | mem->use_hierarchy = parent->use_hierarchy; | 4358 | mem->use_hierarchy = parent->use_hierarchy; |
| @@ -4445,7 +4639,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | |||
| 4445 | unsigned long precharge; | 4639 | unsigned long precharge; |
| 4446 | struct vm_area_struct *vma; | 4640 | struct vm_area_struct *vma; |
| 4447 | 4641 | ||
| 4448 | down_read(&mm->mmap_sem); | 4642 | /* We've already held the mmap_sem */ |
| 4449 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 4643 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 4450 | struct mm_walk mem_cgroup_count_precharge_walk = { | 4644 | struct mm_walk mem_cgroup_count_precharge_walk = { |
| 4451 | .pmd_entry = mem_cgroup_count_precharge_pte_range, | 4645 | .pmd_entry = mem_cgroup_count_precharge_pte_range, |
| @@ -4457,7 +4651,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | |||
| 4457 | walk_page_range(vma->vm_start, vma->vm_end, | 4651 | walk_page_range(vma->vm_start, vma->vm_end, |
| 4458 | &mem_cgroup_count_precharge_walk); | 4652 | &mem_cgroup_count_precharge_walk); |
| 4459 | } | 4653 | } |
| 4460 | up_read(&mm->mmap_sem); | ||
| 4461 | 4654 | ||
| 4462 | precharge = mc.precharge; | 4655 | precharge = mc.precharge; |
| 4463 | mc.precharge = 0; | 4656 | mc.precharge = 0; |
| @@ -4508,11 +4701,17 @@ static void mem_cgroup_clear_mc(void) | |||
| 4508 | 4701 | ||
| 4509 | mc.moved_swap = 0; | 4702 | mc.moved_swap = 0; |
| 4510 | } | 4703 | } |
| 4704 | if (mc.mm) { | ||
| 4705 | up_read(&mc.mm->mmap_sem); | ||
| 4706 | mmput(mc.mm); | ||
| 4707 | } | ||
| 4511 | spin_lock(&mc.lock); | 4708 | spin_lock(&mc.lock); |
| 4512 | mc.from = NULL; | 4709 | mc.from = NULL; |
| 4513 | mc.to = NULL; | 4710 | mc.to = NULL; |
| 4514 | mc.moving_task = NULL; | ||
| 4515 | spin_unlock(&mc.lock); | 4711 | spin_unlock(&mc.lock); |
| 4712 | mc.moving_task = NULL; | ||
| 4713 | mc.mm = NULL; | ||
| 4714 | mem_cgroup_end_move(from); | ||
| 4516 | memcg_oom_recover(from); | 4715 | memcg_oom_recover(from); |
| 4517 | memcg_oom_recover(to); | 4716 | memcg_oom_recover(to); |
| 4518 | wake_up_all(&mc.waitq); | 4717 | wake_up_all(&mc.waitq); |
| @@ -4537,26 +4736,38 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
| 4537 | return 0; | 4736 | return 0; |
| 4538 | /* We move charges only when we move a owner of the mm */ | 4737 | /* We move charges only when we move a owner of the mm */ |
| 4539 | if (mm->owner == p) { | 4738 | if (mm->owner == p) { |
| 4739 | /* | ||
| 4740 | * We do all the move charge works under one mmap_sem to | ||
| 4741 | * avoid deadlock with down_write(&mmap_sem) | ||
| 4742 | * -> try_charge() -> if (mc.moving_task) -> sleep. | ||
| 4743 | */ | ||
| 4744 | down_read(&mm->mmap_sem); | ||
| 4745 | |||
| 4540 | VM_BUG_ON(mc.from); | 4746 | VM_BUG_ON(mc.from); |
| 4541 | VM_BUG_ON(mc.to); | 4747 | VM_BUG_ON(mc.to); |
| 4542 | VM_BUG_ON(mc.precharge); | 4748 | VM_BUG_ON(mc.precharge); |
| 4543 | VM_BUG_ON(mc.moved_charge); | 4749 | VM_BUG_ON(mc.moved_charge); |
| 4544 | VM_BUG_ON(mc.moved_swap); | 4750 | VM_BUG_ON(mc.moved_swap); |
| 4545 | VM_BUG_ON(mc.moving_task); | 4751 | VM_BUG_ON(mc.moving_task); |
| 4752 | VM_BUG_ON(mc.mm); | ||
| 4753 | |||
| 4754 | mem_cgroup_start_move(from); | ||
| 4546 | spin_lock(&mc.lock); | 4755 | spin_lock(&mc.lock); |
| 4547 | mc.from = from; | 4756 | mc.from = from; |
| 4548 | mc.to = mem; | 4757 | mc.to = mem; |
| 4549 | mc.precharge = 0; | 4758 | mc.precharge = 0; |
| 4550 | mc.moved_charge = 0; | 4759 | mc.moved_charge = 0; |
| 4551 | mc.moved_swap = 0; | 4760 | mc.moved_swap = 0; |
| 4552 | mc.moving_task = current; | ||
| 4553 | spin_unlock(&mc.lock); | 4761 | spin_unlock(&mc.lock); |
| 4762 | mc.moving_task = current; | ||
| 4763 | mc.mm = mm; | ||
| 4554 | 4764 | ||
| 4555 | ret = mem_cgroup_precharge_mc(mm); | 4765 | ret = mem_cgroup_precharge_mc(mm); |
| 4556 | if (ret) | 4766 | if (ret) |
| 4557 | mem_cgroup_clear_mc(); | 4767 | mem_cgroup_clear_mc(); |
| 4558 | } | 4768 | /* We call up_read() and mmput() in clear_mc(). */ |
| 4559 | mmput(mm); | 4769 | } else |
| 4770 | mmput(mm); | ||
| 4560 | } | 4771 | } |
| 4561 | return ret; | 4772 | return ret; |
| 4562 | } | 4773 | } |
| @@ -4644,7 +4855,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) | |||
| 4644 | struct vm_area_struct *vma; | 4855 | struct vm_area_struct *vma; |
| 4645 | 4856 | ||
| 4646 | lru_add_drain_all(); | 4857 | lru_add_drain_all(); |
| 4647 | down_read(&mm->mmap_sem); | 4858 | /* We've already held the mmap_sem */ |
| 4648 | for (vma = mm->mmap; vma; vma = vma->vm_next) { | 4859 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 4649 | int ret; | 4860 | int ret; |
| 4650 | struct mm_walk mem_cgroup_move_charge_walk = { | 4861 | struct mm_walk mem_cgroup_move_charge_walk = { |
| @@ -4663,7 +4874,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) | |||
| 4663 | */ | 4874 | */ |
| 4664 | break; | 4875 | break; |
| 4665 | } | 4876 | } |
| 4666 | up_read(&mm->mmap_sem); | ||
| 4667 | } | 4877 | } |
| 4668 | 4878 | ||
| 4669 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 4879 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
| @@ -4672,17 +4882,11 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
| 4672 | struct task_struct *p, | 4882 | struct task_struct *p, |
| 4673 | bool threadgroup) | 4883 | bool threadgroup) |
| 4674 | { | 4884 | { |
| 4675 | struct mm_struct *mm; | 4885 | if (!mc.mm) |
| 4676 | |||
| 4677 | if (!mc.to) | ||
| 4678 | /* no need to move charge */ | 4886 | /* no need to move charge */ |
| 4679 | return; | 4887 | return; |
| 4680 | 4888 | ||
| 4681 | mm = get_task_mm(p); | 4889 | mem_cgroup_move_charge(mc.mm); |
| 4682 | if (mm) { | ||
| 4683 | mem_cgroup_move_charge(mm); | ||
| 4684 | mmput(mm); | ||
| 4685 | } | ||
| 4686 | mem_cgroup_clear_mc(); | 4890 | mem_cgroup_clear_mc(); |
| 4687 | } | 4891 | } |
| 4688 | #else /* !CONFIG_MMU */ | 4892 | #else /* !CONFIG_MMU */ |
| @@ -4723,10 +4927,20 @@ struct cgroup_subsys mem_cgroup_subsys = { | |||
| 4723 | }; | 4927 | }; |
| 4724 | 4928 | ||
| 4725 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 4929 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
| 4930 | static int __init enable_swap_account(char *s) | ||
| 4931 | { | ||
| 4932 | /* consider enabled if no parameter or 1 is given */ | ||
| 4933 | if (!s || !strcmp(s, "1")) | ||
| 4934 | really_do_swap_account = 1; | ||
| 4935 | else if (!strcmp(s, "0")) | ||
| 4936 | really_do_swap_account = 0; | ||
| 4937 | return 1; | ||
| 4938 | } | ||
| 4939 | __setup("swapaccount", enable_swap_account); | ||
| 4726 | 4940 | ||
| 4727 | static int __init disable_swap_account(char *s) | 4941 | static int __init disable_swap_account(char *s) |
| 4728 | { | 4942 | { |
| 4729 | really_do_swap_account = 0; | 4943 | enable_swap_account("0"); |
| 4730 | return 1; | 4944 | return 1; |
| 4731 | } | 4945 | } |
| 4732 | __setup("noswapaccount", disable_swap_account); | 4946 | __setup("noswapaccount", disable_swap_account); |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 757f6b0accfe..46ab2c044b0e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
| @@ -7,21 +7,26 @@ | |||
| 7 | * Free Software Foundation. | 7 | * Free Software Foundation. |
| 8 | * | 8 | * |
| 9 | * High level machine check handler. Handles pages reported by the | 9 | * High level machine check handler. Handles pages reported by the |
| 10 | * hardware as being corrupted usually due to a 2bit ECC memory or cache | 10 | * hardware as being corrupted usually due to a multi-bit ECC memory or cache |
| 11 | * failure. | 11 | * failure. |
| 12 | * | ||
| 13 | * In addition there is a "soft offline" entry point that allows stop using | ||
| 14 | * not-yet-corrupted-by-suspicious pages without killing anything. | ||
| 12 | * | 15 | * |
| 13 | * Handles page cache pages in various states. The tricky part | 16 | * Handles page cache pages in various states. The tricky part |
| 14 | * here is that we can access any page asynchronous to other VM | 17 | * here is that we can access any page asynchronously in respect to |
| 15 | * users, because memory failures could happen anytime and anywhere, | 18 | * other VM users, because memory failures could happen anytime and |
| 16 | * possibly violating some of their assumptions. This is why this code | 19 | * anywhere. This could violate some of their assumptions. This is why |
| 17 | * has to be extremely careful. Generally it tries to use normal locking | 20 | * this code has to be extremely careful. Generally it tries to use |
| 18 | * rules, as in get the standard locks, even if that means the | 21 | * normal locking rules, as in get the standard locks, even if that means |
| 19 | * error handling takes potentially a long time. | 22 | * the error handling takes potentially a long time. |
| 20 | * | 23 | * |
| 21 | * The operation to map back from RMAP chains to processes has to walk | 24 | * There are several operations here with exponential complexity because |
| 22 | * the complete process list and has non linear complexity with the number | 25 | * of unsuitable VM data structures. For example the operation to map back |
| 23 | * mappings. In short it can be quite slow. But since memory corruptions | 26 | * from RMAP chains to processes has to walk the complete process list and |
| 24 | * are rare we hope to get away with this. | 27 | * has non linear complexity with the number. But since memory corruptions |
| 28 | * are rare we hope to get away with this. This avoids impacting the core | ||
| 29 | * VM. | ||
| 25 | */ | 30 | */ |
| 26 | 31 | ||
| 27 | /* | 32 | /* |
| @@ -30,7 +35,6 @@ | |||
| 30 | * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages | 35 | * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages |
| 31 | * - pass bad pages to kdump next kernel | 36 | * - pass bad pages to kdump next kernel |
| 32 | */ | 37 | */ |
| 33 | #define DEBUG 1 /* remove me in 2.6.34 */ | ||
| 34 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
| 35 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
| 36 | #include <linux/page-flags.h> | 40 | #include <linux/page-flags.h> |
| @@ -47,6 +51,7 @@ | |||
| 47 | #include <linux/slab.h> | 51 | #include <linux/slab.h> |
| 48 | #include <linux/swapops.h> | 52 | #include <linux/swapops.h> |
| 49 | #include <linux/hugetlb.h> | 53 | #include <linux/hugetlb.h> |
| 54 | #include <linux/memory_hotplug.h> | ||
| 50 | #include "internal.h" | 55 | #include "internal.h" |
| 51 | 56 | ||
| 52 | int sysctl_memory_failure_early_kill __read_mostly = 0; | 57 | int sysctl_memory_failure_early_kill __read_mostly = 0; |
| @@ -78,7 +83,7 @@ static int hwpoison_filter_dev(struct page *p) | |||
| 78 | return 0; | 83 | return 0; |
| 79 | 84 | ||
| 80 | /* | 85 | /* |
| 81 | * page_mapping() does not accept slab page | 86 | * page_mapping() does not accept slab pages. |
| 82 | */ | 87 | */ |
| 83 | if (PageSlab(p)) | 88 | if (PageSlab(p)) |
| 84 | return -EINVAL; | 89 | return -EINVAL; |
| @@ -268,7 +273,7 @@ struct to_kill { | |||
| 268 | struct list_head nd; | 273 | struct list_head nd; |
| 269 | struct task_struct *tsk; | 274 | struct task_struct *tsk; |
| 270 | unsigned long addr; | 275 | unsigned long addr; |
| 271 | unsigned addr_valid:1; | 276 | char addr_valid; |
| 272 | }; | 277 | }; |
| 273 | 278 | ||
| 274 | /* | 279 | /* |
| @@ -309,7 +314,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p, | |||
| 309 | * a SIGKILL because the error is not contained anymore. | 314 | * a SIGKILL because the error is not contained anymore. |
| 310 | */ | 315 | */ |
| 311 | if (tk->addr == -EFAULT) { | 316 | if (tk->addr == -EFAULT) { |
| 312 | pr_debug("MCE: Unable to find user space address %lx in %s\n", | 317 | pr_info("MCE: Unable to find user space address %lx in %s\n", |
| 313 | page_to_pfn(p), tsk->comm); | 318 | page_to_pfn(p), tsk->comm); |
| 314 | tk->addr_valid = 0; | 319 | tk->addr_valid = 0; |
| 315 | } | 320 | } |
| @@ -577,7 +582,7 @@ static int me_pagecache_clean(struct page *p, unsigned long pfn) | |||
| 577 | pfn, err); | 582 | pfn, err); |
| 578 | } else if (page_has_private(p) && | 583 | } else if (page_has_private(p) && |
| 579 | !try_to_release_page(p, GFP_NOIO)) { | 584 | !try_to_release_page(p, GFP_NOIO)) { |
| 580 | pr_debug("MCE %#lx: failed to release buffers\n", pfn); | 585 | pr_info("MCE %#lx: failed to release buffers\n", pfn); |
| 581 | } else { | 586 | } else { |
| 582 | ret = RECOVERED; | 587 | ret = RECOVERED; |
| 583 | } | 588 | } |
| @@ -693,11 +698,10 @@ static int me_swapcache_clean(struct page *p, unsigned long pfn) | |||
| 693 | * Issues: | 698 | * Issues: |
| 694 | * - Error on hugepage is contained in hugepage unit (not in raw page unit.) | 699 | * - Error on hugepage is contained in hugepage unit (not in raw page unit.) |
| 695 | * To narrow down kill region to one page, we need to break up pmd. | 700 | * To narrow down kill region to one page, we need to break up pmd. |
| 696 | * - To support soft-offlining for hugepage, we need to support hugepage | ||
| 697 | * migration. | ||
| 698 | */ | 701 | */ |
| 699 | static int me_huge_page(struct page *p, unsigned long pfn) | 702 | static int me_huge_page(struct page *p, unsigned long pfn) |
| 700 | { | 703 | { |
| 704 | int res = 0; | ||
| 701 | struct page *hpage = compound_head(p); | 705 | struct page *hpage = compound_head(p); |
| 702 | /* | 706 | /* |
| 703 | * We can safely recover from error on free or reserved (i.e. | 707 | * We can safely recover from error on free or reserved (i.e. |
| @@ -710,8 +714,9 @@ static int me_huge_page(struct page *p, unsigned long pfn) | |||
| 710 | * so there is no race between isolation and mapping/unmapping. | 714 | * so there is no race between isolation and mapping/unmapping. |
| 711 | */ | 715 | */ |
| 712 | if (!(page_mapping(hpage) || PageAnon(hpage))) { | 716 | if (!(page_mapping(hpage) || PageAnon(hpage))) { |
| 713 | __isolate_hwpoisoned_huge_page(hpage); | 717 | res = dequeue_hwpoisoned_huge_page(hpage); |
| 714 | return RECOVERED; | 718 | if (!res) |
| 719 | return RECOVERED; | ||
| 715 | } | 720 | } |
| 716 | return DELAYED; | 721 | return DELAYED; |
| 717 | } | 722 | } |
| @@ -836,8 +841,6 @@ static int page_action(struct page_state *ps, struct page *p, | |||
| 836 | return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; | 841 | return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; |
| 837 | } | 842 | } |
| 838 | 843 | ||
| 839 | #define N_UNMAP_TRIES 5 | ||
| 840 | |||
| 841 | /* | 844 | /* |
| 842 | * Do all that is necessary to remove user space mappings. Unmap | 845 | * Do all that is necessary to remove user space mappings. Unmap |
| 843 | * the pages and send SIGBUS to the processes if the data was dirty. | 846 | * the pages and send SIGBUS to the processes if the data was dirty. |
| @@ -849,7 +852,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 849 | struct address_space *mapping; | 852 | struct address_space *mapping; |
| 850 | LIST_HEAD(tokill); | 853 | LIST_HEAD(tokill); |
| 851 | int ret; | 854 | int ret; |
| 852 | int i; | ||
| 853 | int kill = 1; | 855 | int kill = 1; |
| 854 | struct page *hpage = compound_head(p); | 856 | struct page *hpage = compound_head(p); |
| 855 | 857 | ||
| @@ -903,17 +905,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, | |||
| 903 | if (kill) | 905 | if (kill) |
| 904 | collect_procs(hpage, &tokill); | 906 | collect_procs(hpage, &tokill); |
| 905 | 907 | ||
| 906 | /* | 908 | ret = try_to_unmap(hpage, ttu); |
| 907 | * try_to_unmap can fail temporarily due to races. | ||
| 908 | * Try a few times (RED-PEN better strategy?) | ||
| 909 | */ | ||
| 910 | for (i = 0; i < N_UNMAP_TRIES; i++) { | ||
| 911 | ret = try_to_unmap(hpage, ttu); | ||
| 912 | if (ret == SWAP_SUCCESS) | ||
| 913 | break; | ||
| 914 | pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret); | ||
| 915 | } | ||
| 916 | |||
| 917 | if (ret != SWAP_SUCCESS) | 909 | if (ret != SWAP_SUCCESS) |
| 918 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", | 910 | printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", |
| 919 | pfn, page_mapcount(hpage)); | 911 | pfn, page_mapcount(hpage)); |
| @@ -981,7 +973,10 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
| 981 | * We need/can do nothing about count=0 pages. | 973 | * We need/can do nothing about count=0 pages. |
| 982 | * 1) it's a free page, and therefore in safe hand: | 974 | * 1) it's a free page, and therefore in safe hand: |
| 983 | * prep_new_page() will be the gate keeper. | 975 | * prep_new_page() will be the gate keeper. |
| 984 | * 2) it's part of a non-compound high order page. | 976 | * 2) it's a free hugepage, which is also safe: |
| 977 | * an affected hugepage will be dequeued from hugepage freelist, | ||
| 978 | * so there's no concern about reusing it ever after. | ||
| 979 | * 3) it's part of a non-compound high order page. | ||
| 985 | * Implies some kernel user: cannot stop them from | 980 | * Implies some kernel user: cannot stop them from |
| 986 | * R/W the page; let's pray that the page has been | 981 | * R/W the page; let's pray that the page has been |
| 987 | * used and will be freed some time later. | 982 | * used and will be freed some time later. |
| @@ -993,6 +988,24 @@ int __memory_failure(unsigned long pfn, int trapno, int flags) | |||
| 993 | if (is_free_buddy_page(p)) { | 988 | if (is_free_buddy_page(p)) { |
| 994 | action_result(pfn, "free buddy", DELAYED); | 989 | action_result(pfn, "free buddy", DELAYED); |
| 995 | return 0; | 990 | return 0; |
| 991 | } else if (PageHuge(hpage)) { | ||
| 992 | /* | ||
| 993 | * Check "just unpoisoned", "filter hit", and | ||
| 994 | * "race with other subpage." | ||
| 995 | */ | ||
| 996 | lock_page_nosync(hpage); | ||
| 997 | if (!PageHWPoison(hpage) | ||
| 998 | || (hwpoison_filter(p) && TestClearPageHWPoison(p)) | ||
| 999 | || (p != hpage && TestSetPageHWPoison(hpage))) { | ||
| 1000 | atomic_long_sub(nr_pages, &mce_bad_pages); | ||
| 1001 | return 0; | ||
| 1002 | } | ||
| 1003 | set_page_hwpoison_huge_page(hpage); | ||
| 1004 | res = dequeue_hwpoisoned_huge_page(hpage); | ||
| 1005 | action_result(pfn, "free huge", | ||
| 1006 | res ? IGNORED : DELAYED); | ||
| 1007 | unlock_page(hpage); | ||
| 1008 | return res; | ||
| 996 | } else { | 1009 | } else { |
| 997 | action_result(pfn, "high order kernel", IGNORED); | 1010 | action_result(pfn, "high order kernel", IGNORED); |
| 998 | return -EBUSY; | 1011 | return -EBUSY; |
| @@ -1147,16 +1160,26 @@ int unpoison_memory(unsigned long pfn) | |||
| 1147 | page = compound_head(p); | 1160 | page = compound_head(p); |
| 1148 | 1161 | ||
| 1149 | if (!PageHWPoison(p)) { | 1162 | if (!PageHWPoison(p)) { |
| 1150 | pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn); | 1163 | pr_info("MCE: Page was already unpoisoned %#lx\n", pfn); |
| 1151 | return 0; | 1164 | return 0; |
| 1152 | } | 1165 | } |
| 1153 | 1166 | ||
| 1154 | nr_pages = 1 << compound_order(page); | 1167 | nr_pages = 1 << compound_order(page); |
| 1155 | 1168 | ||
| 1156 | if (!get_page_unless_zero(page)) { | 1169 | if (!get_page_unless_zero(page)) { |
| 1170 | /* | ||
| 1171 | * Since HWPoisoned hugepage should have non-zero refcount, | ||
| 1172 | * race between memory failure and unpoison seems to happen. | ||
| 1173 | * In such case unpoison fails and memory failure runs | ||
| 1174 | * to the end. | ||
| 1175 | */ | ||
| 1176 | if (PageHuge(page)) { | ||
| 1177 | pr_debug("MCE: Memory failure is now running on free hugepage %#lx\n", pfn); | ||
| 1178 | return 0; | ||
| 1179 | } | ||
| 1157 | if (TestClearPageHWPoison(p)) | 1180 | if (TestClearPageHWPoison(p)) |
| 1158 | atomic_long_sub(nr_pages, &mce_bad_pages); | 1181 | atomic_long_sub(nr_pages, &mce_bad_pages); |
| 1159 | pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn); | 1182 | pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn); |
| 1160 | return 0; | 1183 | return 0; |
| 1161 | } | 1184 | } |
| 1162 | 1185 | ||
| @@ -1168,12 +1191,12 @@ int unpoison_memory(unsigned long pfn) | |||
| 1168 | * the free buddy page pool. | 1191 | * the free buddy page pool. |
| 1169 | */ | 1192 | */ |
| 1170 | if (TestClearPageHWPoison(page)) { | 1193 | if (TestClearPageHWPoison(page)) { |
| 1171 | pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn); | 1194 | pr_info("MCE: Software-unpoisoned page %#lx\n", pfn); |
| 1172 | atomic_long_sub(nr_pages, &mce_bad_pages); | 1195 | atomic_long_sub(nr_pages, &mce_bad_pages); |
| 1173 | freeit = 1; | 1196 | freeit = 1; |
| 1197 | if (PageHuge(page)) | ||
| 1198 | clear_page_hwpoison_huge_page(page); | ||
| 1174 | } | 1199 | } |
| 1175 | if (PageHuge(p)) | ||
| 1176 | clear_page_hwpoison_huge_page(page); | ||
| 1177 | unlock_page(page); | 1200 | unlock_page(page); |
| 1178 | 1201 | ||
| 1179 | put_page(page); | 1202 | put_page(page); |
| @@ -1187,7 +1210,11 @@ EXPORT_SYMBOL(unpoison_memory); | |||
| 1187 | static struct page *new_page(struct page *p, unsigned long private, int **x) | 1210 | static struct page *new_page(struct page *p, unsigned long private, int **x) |
| 1188 | { | 1211 | { |
| 1189 | int nid = page_to_nid(p); | 1212 | int nid = page_to_nid(p); |
| 1190 | return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); | 1213 | if (PageHuge(p)) |
| 1214 | return alloc_huge_page_node(page_hstate(compound_head(p)), | ||
| 1215 | nid); | ||
| 1216 | else | ||
| 1217 | return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); | ||
| 1191 | } | 1218 | } |
| 1192 | 1219 | ||
| 1193 | /* | 1220 | /* |
| @@ -1204,25 +1231,31 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags) | |||
| 1204 | return 1; | 1231 | return 1; |
| 1205 | 1232 | ||
| 1206 | /* | 1233 | /* |
| 1207 | * The lock_system_sleep prevents a race with memory hotplug, | 1234 | * The lock_memory_hotplug prevents a race with memory hotplug. |
| 1208 | * because the isolation assumes there's only a single user. | ||
| 1209 | * This is a big hammer, a better would be nicer. | 1235 | * This is a big hammer, a better would be nicer. |
| 1210 | */ | 1236 | */ |
| 1211 | lock_system_sleep(); | 1237 | lock_memory_hotplug(); |
| 1212 | 1238 | ||
| 1213 | /* | 1239 | /* |
| 1214 | * Isolate the page, so that it doesn't get reallocated if it | 1240 | * Isolate the page, so that it doesn't get reallocated if it |
| 1215 | * was free. | 1241 | * was free. |
| 1216 | */ | 1242 | */ |
| 1217 | set_migratetype_isolate(p); | 1243 | set_migratetype_isolate(p); |
| 1244 | /* | ||
| 1245 | * When the target page is a free hugepage, just remove it | ||
| 1246 | * from free hugepage list. | ||
| 1247 | */ | ||
| 1218 | if (!get_page_unless_zero(compound_head(p))) { | 1248 | if (!get_page_unless_zero(compound_head(p))) { |
| 1219 | if (is_free_buddy_page(p)) { | 1249 | if (PageHuge(p)) { |
| 1220 | pr_debug("get_any_page: %#lx free buddy page\n", pfn); | 1250 | pr_info("get_any_page: %#lx free huge page\n", pfn); |
| 1251 | ret = dequeue_hwpoisoned_huge_page(compound_head(p)); | ||
| 1252 | } else if (is_free_buddy_page(p)) { | ||
| 1253 | pr_info("get_any_page: %#lx free buddy page\n", pfn); | ||
| 1221 | /* Set hwpoison bit while page is still isolated */ | 1254 | /* Set hwpoison bit while page is still isolated */ |
| 1222 | SetPageHWPoison(p); | 1255 | SetPageHWPoison(p); |
| 1223 | ret = 0; | 1256 | ret = 0; |
| 1224 | } else { | 1257 | } else { |
| 1225 | pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n", | 1258 | pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n", |
| 1226 | pfn, p->flags); | 1259 | pfn, p->flags); |
| 1227 | ret = -EIO; | 1260 | ret = -EIO; |
| 1228 | } | 1261 | } |
| @@ -1231,7 +1264,47 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags) | |||
| 1231 | ret = 1; | 1264 | ret = 1; |
| 1232 | } | 1265 | } |
| 1233 | unset_migratetype_isolate(p); | 1266 | unset_migratetype_isolate(p); |
| 1234 | unlock_system_sleep(); | 1267 | unlock_memory_hotplug(); |
| 1268 | return ret; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | static int soft_offline_huge_page(struct page *page, int flags) | ||
| 1272 | { | ||
| 1273 | int ret; | ||
| 1274 | unsigned long pfn = page_to_pfn(page); | ||
| 1275 | struct page *hpage = compound_head(page); | ||
| 1276 | LIST_HEAD(pagelist); | ||
| 1277 | |||
| 1278 | ret = get_any_page(page, pfn, flags); | ||
| 1279 | if (ret < 0) | ||
| 1280 | return ret; | ||
| 1281 | if (ret == 0) | ||
| 1282 | goto done; | ||
| 1283 | |||
| 1284 | if (PageHWPoison(hpage)) { | ||
| 1285 | put_page(hpage); | ||
| 1286 | pr_debug("soft offline: %#lx hugepage already poisoned\n", pfn); | ||
| 1287 | return -EBUSY; | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | /* Keep page count to indicate a given hugepage is isolated. */ | ||
| 1291 | |||
| 1292 | list_add(&hpage->lru, &pagelist); | ||
| 1293 | ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); | ||
| 1294 | if (ret) { | ||
| 1295 | putback_lru_pages(&pagelist); | ||
| 1296 | pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", | ||
| 1297 | pfn, ret, page->flags); | ||
| 1298 | if (ret > 0) | ||
| 1299 | ret = -EIO; | ||
| 1300 | return ret; | ||
| 1301 | } | ||
| 1302 | done: | ||
| 1303 | if (!PageHWPoison(hpage)) | ||
| 1304 | atomic_long_add(1 << compound_order(hpage), &mce_bad_pages); | ||
| 1305 | set_page_hwpoison_huge_page(hpage); | ||
| 1306 | dequeue_hwpoisoned_huge_page(hpage); | ||
| 1307 | /* keep elevated page count for bad page */ | ||
| 1235 | return ret; | 1308 | return ret; |
| 1236 | } | 1309 | } |
| 1237 | 1310 | ||
| @@ -1262,6 +1335,9 @@ int soft_offline_page(struct page *page, int flags) | |||
| 1262 | int ret; | 1335 | int ret; |
| 1263 | unsigned long pfn = page_to_pfn(page); | 1336 | unsigned long pfn = page_to_pfn(page); |
| 1264 | 1337 | ||
| 1338 | if (PageHuge(page)) | ||
| 1339 | return soft_offline_huge_page(page, flags); | ||
| 1340 | |||
| 1265 | ret = get_any_page(page, pfn, flags); | 1341 | ret = get_any_page(page, pfn, flags); |
| 1266 | if (ret < 0) | 1342 | if (ret < 0) |
| 1267 | return ret; | 1343 | return ret; |
| @@ -1288,7 +1364,7 @@ int soft_offline_page(struct page *page, int flags) | |||
| 1288 | goto done; | 1364 | goto done; |
| 1289 | } | 1365 | } |
| 1290 | if (!PageLRU(page)) { | 1366 | if (!PageLRU(page)) { |
| 1291 | pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n", | 1367 | pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", |
| 1292 | pfn, page->flags); | 1368 | pfn, page->flags); |
| 1293 | return -EIO; | 1369 | return -EIO; |
| 1294 | } | 1370 | } |
| @@ -1302,7 +1378,7 @@ int soft_offline_page(struct page *page, int flags) | |||
| 1302 | if (PageHWPoison(page)) { | 1378 | if (PageHWPoison(page)) { |
| 1303 | unlock_page(page); | 1379 | unlock_page(page); |
| 1304 | put_page(page); | 1380 | put_page(page); |
| 1305 | pr_debug("soft offline: %#lx page already poisoned\n", pfn); | 1381 | pr_info("soft offline: %#lx page already poisoned\n", pfn); |
| 1306 | return -EBUSY; | 1382 | return -EBUSY; |
| 1307 | } | 1383 | } |
| 1308 | 1384 | ||
| @@ -1323,7 +1399,7 @@ int soft_offline_page(struct page *page, int flags) | |||
| 1323 | put_page(page); | 1399 | put_page(page); |
| 1324 | if (ret == 1) { | 1400 | if (ret == 1) { |
| 1325 | ret = 0; | 1401 | ret = 0; |
| 1326 | pr_debug("soft_offline: %#lx: invalidated\n", pfn); | 1402 | pr_info("soft_offline: %#lx: invalidated\n", pfn); |
| 1327 | goto done; | 1403 | goto done; |
| 1328 | } | 1404 | } |
| 1329 | 1405 | ||
| @@ -1339,13 +1415,13 @@ int soft_offline_page(struct page *page, int flags) | |||
| 1339 | list_add(&page->lru, &pagelist); | 1415 | list_add(&page->lru, &pagelist); |
| 1340 | ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); | 1416 | ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); |
| 1341 | if (ret) { | 1417 | if (ret) { |
| 1342 | pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", | 1418 | pr_info("soft offline: %#lx: migration failed %d, type %lx\n", |
| 1343 | pfn, ret, page->flags); | 1419 | pfn, ret, page->flags); |
| 1344 | if (ret > 0) | 1420 | if (ret > 0) |
| 1345 | ret = -EIO; | 1421 | ret = -EIO; |
| 1346 | } | 1422 | } |
| 1347 | } else { | 1423 | } else { |
| 1348 | pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", | 1424 | pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", |
| 1349 | pfn, ret, page_count(page), page->flags); | 1425 | pfn, ret, page_count(page), page->flags); |
| 1350 | } | 1426 | } |
| 1351 | if (ret) | 1427 | if (ret) |
diff --git a/mm/memory.c b/mm/memory.c index 98b58fecedef..02e48aa0ed13 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -736,7 +736,7 @@ again: | |||
| 736 | dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); | 736 | dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); |
| 737 | if (!dst_pte) | 737 | if (!dst_pte) |
| 738 | return -ENOMEM; | 738 | return -ENOMEM; |
| 739 | src_pte = pte_offset_map_nested(src_pmd, addr); | 739 | src_pte = pte_offset_map(src_pmd, addr); |
| 740 | src_ptl = pte_lockptr(src_mm, src_pmd); | 740 | src_ptl = pte_lockptr(src_mm, src_pmd); |
| 741 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); | 741 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
| 742 | orig_src_pte = src_pte; | 742 | orig_src_pte = src_pte; |
| @@ -767,7 +767,7 @@ again: | |||
| 767 | 767 | ||
| 768 | arch_leave_lazy_mmu_mode(); | 768 | arch_leave_lazy_mmu_mode(); |
| 769 | spin_unlock(src_ptl); | 769 | spin_unlock(src_ptl); |
| 770 | pte_unmap_nested(orig_src_pte); | 770 | pte_unmap(orig_src_pte); |
| 771 | add_mm_rss_vec(dst_mm, rss); | 771 | add_mm_rss_vec(dst_mm, rss); |
| 772 | pte_unmap_unlock(orig_dst_pte, dst_ptl); | 772 | pte_unmap_unlock(orig_dst_pte, dst_ptl); |
| 773 | cond_resched(); | 773 | cond_resched(); |
| @@ -1450,7 +1450,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1450 | if (ret & VM_FAULT_OOM) | 1450 | if (ret & VM_FAULT_OOM) |
| 1451 | return i ? i : -ENOMEM; | 1451 | return i ? i : -ENOMEM; |
| 1452 | if (ret & | 1452 | if (ret & |
| 1453 | (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) | 1453 | (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE| |
| 1454 | VM_FAULT_SIGBUS)) | ||
| 1454 | return i ? i : -EFAULT; | 1455 | return i ? i : -EFAULT; |
| 1455 | BUG(); | 1456 | BUG(); |
| 1456 | } | 1457 | } |
| @@ -1590,7 +1591,7 @@ struct page *get_dump_page(unsigned long addr) | |||
| 1590 | } | 1591 | } |
| 1591 | #endif /* CONFIG_ELF_CORE */ | 1592 | #endif /* CONFIG_ELF_CORE */ |
| 1592 | 1593 | ||
| 1593 | pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, | 1594 | pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, |
| 1594 | spinlock_t **ptl) | 1595 | spinlock_t **ptl) |
| 1595 | { | 1596 | { |
| 1596 | pgd_t * pgd = pgd_offset(mm, addr); | 1597 | pgd_t * pgd = pgd_offset(mm, addr); |
| @@ -2079,7 +2080,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo | |||
| 2079 | * zeroes. | 2080 | * zeroes. |
| 2080 | */ | 2081 | */ |
| 2081 | if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) | 2082 | if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) |
| 2082 | memset(kaddr, 0, PAGE_SIZE); | 2083 | clear_page(kaddr); |
| 2083 | kunmap_atomic(kaddr, KM_USER0); | 2084 | kunmap_atomic(kaddr, KM_USER0); |
| 2084 | flush_dcache_page(dst); | 2085 | flush_dcache_page(dst); |
| 2085 | } else | 2086 | } else |
| @@ -2107,6 +2108,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo | |||
| 2107 | static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | 2108 | static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 2108 | unsigned long address, pte_t *page_table, pmd_t *pmd, | 2109 | unsigned long address, pte_t *page_table, pmd_t *pmd, |
| 2109 | spinlock_t *ptl, pte_t orig_pte) | 2110 | spinlock_t *ptl, pte_t orig_pte) |
| 2111 | __releases(ptl) | ||
| 2110 | { | 2112 | { |
| 2111 | struct page *old_page, *new_page; | 2113 | struct page *old_page, *new_page; |
| 2112 | pte_t entry; | 2114 | pte_t entry; |
| @@ -2626,6 +2628,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2626 | struct page *page, *swapcache = NULL; | 2628 | struct page *page, *swapcache = NULL; |
| 2627 | swp_entry_t entry; | 2629 | swp_entry_t entry; |
| 2628 | pte_t pte; | 2630 | pte_t pte; |
| 2631 | int locked; | ||
| 2629 | struct mem_cgroup *ptr = NULL; | 2632 | struct mem_cgroup *ptr = NULL; |
| 2630 | int exclusive = 0; | 2633 | int exclusive = 0; |
| 2631 | int ret = 0; | 2634 | int ret = 0; |
| @@ -2676,8 +2679,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2676 | goto out_release; | 2679 | goto out_release; |
| 2677 | } | 2680 | } |
| 2678 | 2681 | ||
| 2679 | lock_page(page); | 2682 | locked = lock_page_or_retry(page, mm, flags); |
| 2680 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); | 2683 | delayacct_clear_flag(DELAYACCT_PF_SWAPIN); |
| 2684 | if (!locked) { | ||
| 2685 | ret |= VM_FAULT_RETRY; | ||
| 2686 | goto out_release; | ||
| 2687 | } | ||
| 2681 | 2688 | ||
| 2682 | /* | 2689 | /* |
| 2683 | * Make sure try_to_free_swap or reuse_swap_page or swapoff did not | 2690 | * Make sure try_to_free_swap or reuse_swap_page or swapoff did not |
| @@ -2926,7 +2933,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 2926 | vmf.page = NULL; | 2933 | vmf.page = NULL; |
| 2927 | 2934 | ||
| 2928 | ret = vma->vm_ops->fault(vma, &vmf); | 2935 | ret = vma->vm_ops->fault(vma, &vmf); |
| 2929 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) | 2936 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | |
| 2937 | VM_FAULT_RETRY))) | ||
| 2930 | return ret; | 2938 | return ret; |
| 2931 | 2939 | ||
| 2932 | if (unlikely(PageHWPoison(vmf.page))) { | 2940 | if (unlikely(PageHWPoison(vmf.page))) { |
| @@ -3343,7 +3351,7 @@ int in_gate_area_no_task(unsigned long addr) | |||
| 3343 | 3351 | ||
| 3344 | #endif /* __HAVE_ARCH_GATE_AREA */ | 3352 | #endif /* __HAVE_ARCH_GATE_AREA */ |
| 3345 | 3353 | ||
| 3346 | static int follow_pte(struct mm_struct *mm, unsigned long address, | 3354 | static int __follow_pte(struct mm_struct *mm, unsigned long address, |
| 3347 | pte_t **ptepp, spinlock_t **ptlp) | 3355 | pte_t **ptepp, spinlock_t **ptlp) |
| 3348 | { | 3356 | { |
| 3349 | pgd_t *pgd; | 3357 | pgd_t *pgd; |
| @@ -3380,6 +3388,17 @@ out: | |||
| 3380 | return -EINVAL; | 3388 | return -EINVAL; |
| 3381 | } | 3389 | } |
| 3382 | 3390 | ||
| 3391 | static inline int follow_pte(struct mm_struct *mm, unsigned long address, | ||
| 3392 | pte_t **ptepp, spinlock_t **ptlp) | ||
| 3393 | { | ||
| 3394 | int res; | ||
| 3395 | |||
| 3396 | /* (void) is needed to make gcc happy */ | ||
| 3397 | (void) __cond_lock(*ptlp, | ||
| 3398 | !(res = __follow_pte(mm, address, ptepp, ptlp))); | ||
| 3399 | return res; | ||
| 3400 | } | ||
| 3401 | |||
| 3383 | /** | 3402 | /** |
| 3384 | * follow_pfn - look up PFN at a user virtual address | 3403 | * follow_pfn - look up PFN at a user virtual address |
| 3385 | * @vma: memory mapping | 3404 | * @vma: memory mapping |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index d4e940a26945..2c6523af5473 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -34,6 +34,23 @@ | |||
| 34 | 34 | ||
| 35 | #include "internal.h" | 35 | #include "internal.h" |
| 36 | 36 | ||
| 37 | DEFINE_MUTEX(mem_hotplug_mutex); | ||
| 38 | |||
| 39 | void lock_memory_hotplug(void) | ||
| 40 | { | ||
| 41 | mutex_lock(&mem_hotplug_mutex); | ||
| 42 | |||
| 43 | /* for exclusive hibernation if CONFIG_HIBERNATION=y */ | ||
| 44 | lock_system_sleep(); | ||
| 45 | } | ||
| 46 | |||
| 47 | void unlock_memory_hotplug(void) | ||
| 48 | { | ||
| 49 | unlock_system_sleep(); | ||
| 50 | mutex_unlock(&mem_hotplug_mutex); | ||
| 51 | } | ||
| 52 | |||
| 53 | |||
| 37 | /* add this memory to iomem resource */ | 54 | /* add this memory to iomem resource */ |
| 38 | static struct resource *register_memory_resource(u64 start, u64 size) | 55 | static struct resource *register_memory_resource(u64 start, u64 size) |
| 39 | { | 56 | { |
| @@ -493,7 +510,7 @@ int mem_online_node(int nid) | |||
| 493 | pg_data_t *pgdat; | 510 | pg_data_t *pgdat; |
| 494 | int ret; | 511 | int ret; |
| 495 | 512 | ||
| 496 | lock_system_sleep(); | 513 | lock_memory_hotplug(); |
| 497 | pgdat = hotadd_new_pgdat(nid, 0); | 514 | pgdat = hotadd_new_pgdat(nid, 0); |
| 498 | if (pgdat) { | 515 | if (pgdat) { |
| 499 | ret = -ENOMEM; | 516 | ret = -ENOMEM; |
| @@ -504,7 +521,7 @@ int mem_online_node(int nid) | |||
| 504 | BUG_ON(ret); | 521 | BUG_ON(ret); |
| 505 | 522 | ||
| 506 | out: | 523 | out: |
| 507 | unlock_system_sleep(); | 524 | unlock_memory_hotplug(); |
| 508 | return ret; | 525 | return ret; |
| 509 | } | 526 | } |
| 510 | 527 | ||
| @@ -516,7 +533,7 @@ int __ref add_memory(int nid, u64 start, u64 size) | |||
| 516 | struct resource *res; | 533 | struct resource *res; |
| 517 | int ret; | 534 | int ret; |
| 518 | 535 | ||
| 519 | lock_system_sleep(); | 536 | lock_memory_hotplug(); |
| 520 | 537 | ||
| 521 | res = register_memory_resource(start, size); | 538 | res = register_memory_resource(start, size); |
| 522 | ret = -EEXIST; | 539 | ret = -EEXIST; |
| @@ -563,7 +580,7 @@ error: | |||
| 563 | release_memory_resource(res); | 580 | release_memory_resource(res); |
| 564 | 581 | ||
| 565 | out: | 582 | out: |
| 566 | unlock_system_sleep(); | 583 | unlock_memory_hotplug(); |
| 567 | return ret; | 584 | return ret; |
| 568 | } | 585 | } |
| 569 | EXPORT_SYMBOL_GPL(add_memory); | 586 | EXPORT_SYMBOL_GPL(add_memory); |
| @@ -602,27 +619,14 @@ static struct page *next_active_pageblock(struct page *page) | |||
| 602 | /* Checks if this range of memory is likely to be hot-removable. */ | 619 | /* Checks if this range of memory is likely to be hot-removable. */ |
| 603 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | 620 | int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) |
| 604 | { | 621 | { |
| 605 | int type; | ||
| 606 | struct page *page = pfn_to_page(start_pfn); | 622 | struct page *page = pfn_to_page(start_pfn); |
| 607 | struct page *end_page = page + nr_pages; | 623 | struct page *end_page = page + nr_pages; |
| 608 | 624 | ||
| 609 | /* Check the starting page of each pageblock within the range */ | 625 | /* Check the starting page of each pageblock within the range */ |
| 610 | for (; page < end_page; page = next_active_pageblock(page)) { | 626 | for (; page < end_page; page = next_active_pageblock(page)) { |
| 611 | type = get_pageblock_migratetype(page); | 627 | if (!is_pageblock_removable_nolock(page)) |
| 612 | |||
| 613 | /* | ||
| 614 | * A pageblock containing MOVABLE or free pages is considered | ||
| 615 | * removable | ||
| 616 | */ | ||
| 617 | if (type != MIGRATE_MOVABLE && !pageblock_free(page)) | ||
| 618 | return 0; | ||
| 619 | |||
| 620 | /* | ||
| 621 | * A pageblock starting with a PageReserved page is not | ||
| 622 | * considered removable. | ||
| 623 | */ | ||
| 624 | if (PageReserved(page)) | ||
| 625 | return 0; | 628 | return 0; |
| 629 | cond_resched(); | ||
| 626 | } | 630 | } |
| 627 | 631 | ||
| 628 | /* All pageblocks in the memory block are likely to be hot-removable */ | 632 | /* All pageblocks in the memory block are likely to be hot-removable */ |
| @@ -659,7 +663,7 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) | |||
| 659 | * Scanning pfn is much easier than scanning lru list. | 663 | * Scanning pfn is much easier than scanning lru list. |
| 660 | * Scan pfn from start to end and Find LRU page. | 664 | * Scan pfn from start to end and Find LRU page. |
| 661 | */ | 665 | */ |
| 662 | int scan_lru_pages(unsigned long start, unsigned long end) | 666 | static unsigned long scan_lru_pages(unsigned long start, unsigned long end) |
| 663 | { | 667 | { |
| 664 | unsigned long pfn; | 668 | unsigned long pfn; |
| 665 | struct page *page; | 669 | struct page *page; |
| @@ -709,29 +713,30 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
| 709 | page_is_file_cache(page)); | 713 | page_is_file_cache(page)); |
| 710 | 714 | ||
| 711 | } else { | 715 | } else { |
| 712 | /* Becasue we don't have big zone->lock. we should | ||
| 713 | check this again here. */ | ||
| 714 | if (page_count(page)) | ||
| 715 | not_managed++; | ||
| 716 | #ifdef CONFIG_DEBUG_VM | 716 | #ifdef CONFIG_DEBUG_VM |
| 717 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", | 717 | printk(KERN_ALERT "removing pfn %lx from LRU failed\n", |
| 718 | pfn); | 718 | pfn); |
| 719 | dump_page(page); | 719 | dump_page(page); |
| 720 | #endif | 720 | #endif |
| 721 | /* Becasue we don't have big zone->lock. we should | ||
| 722 | check this again here. */ | ||
| 723 | if (page_count(page)) { | ||
| 724 | not_managed++; | ||
| 725 | ret = -EBUSY; | ||
| 726 | break; | ||
| 727 | } | ||
| 721 | } | 728 | } |
| 722 | } | 729 | } |
| 723 | ret = -EBUSY; | 730 | if (!list_empty(&source)) { |
| 724 | if (not_managed) { | 731 | if (not_managed) { |
| 725 | if (!list_empty(&source)) | 732 | putback_lru_pages(&source); |
| 733 | goto out; | ||
| 734 | } | ||
| 735 | /* this function returns # of failed pages */ | ||
| 736 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); | ||
| 737 | if (ret) | ||
| 726 | putback_lru_pages(&source); | 738 | putback_lru_pages(&source); |
| 727 | goto out; | ||
| 728 | } | 739 | } |
| 729 | ret = 0; | ||
| 730 | if (list_empty(&source)) | ||
| 731 | goto out; | ||
| 732 | /* this function returns # of failed pages */ | ||
| 733 | ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); | ||
| 734 | |||
| 735 | out: | 740 | out: |
| 736 | return ret; | 741 | return ret; |
| 737 | } | 742 | } |
| @@ -803,7 +808,7 @@ static int offline_pages(unsigned long start_pfn, | |||
| 803 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) | 808 | if (!test_pages_in_a_zone(start_pfn, end_pfn)) |
| 804 | return -EINVAL; | 809 | return -EINVAL; |
| 805 | 810 | ||
| 806 | lock_system_sleep(); | 811 | lock_memory_hotplug(); |
| 807 | 812 | ||
| 808 | zone = page_zone(pfn_to_page(start_pfn)); | 813 | zone = page_zone(pfn_to_page(start_pfn)); |
| 809 | node = zone_to_nid(zone); | 814 | node = zone_to_nid(zone); |
| @@ -892,7 +897,7 @@ repeat: | |||
| 892 | writeback_set_ratelimit(); | 897 | writeback_set_ratelimit(); |
| 893 | 898 | ||
| 894 | memory_notify(MEM_OFFLINE, &arg); | 899 | memory_notify(MEM_OFFLINE, &arg); |
| 895 | unlock_system_sleep(); | 900 | unlock_memory_hotplug(); |
| 896 | return 0; | 901 | return 0; |
| 897 | 902 | ||
| 898 | failed_removal: | 903 | failed_removal: |
| @@ -903,7 +908,7 @@ failed_removal: | |||
| 903 | undo_isolate_page_range(start_pfn, end_pfn); | 908 | undo_isolate_page_range(start_pfn, end_pfn); |
| 904 | 909 | ||
| 905 | out: | 910 | out: |
| 906 | unlock_system_sleep(); | 911 | unlock_memory_hotplug(); |
| 907 | return ret; | 912 | return ret; |
| 908 | } | 913 | } |
| 909 | 914 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f969da5dd8a2..11ff260fb282 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -924,15 +924,21 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, | |||
| 924 | nodemask_t nmask; | 924 | nodemask_t nmask; |
| 925 | LIST_HEAD(pagelist); | 925 | LIST_HEAD(pagelist); |
| 926 | int err = 0; | 926 | int err = 0; |
| 927 | struct vm_area_struct *vma; | ||
| 927 | 928 | ||
| 928 | nodes_clear(nmask); | 929 | nodes_clear(nmask); |
| 929 | node_set(source, nmask); | 930 | node_set(source, nmask); |
| 930 | 931 | ||
| 931 | check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, | 932 | vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask, |
| 932 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); | 933 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); |
| 934 | if (IS_ERR(vma)) | ||
| 935 | return PTR_ERR(vma); | ||
| 933 | 936 | ||
| 934 | if (!list_empty(&pagelist)) | 937 | if (!list_empty(&pagelist)) { |
| 935 | err = migrate_pages(&pagelist, new_node_page, dest, 0); | 938 | err = migrate_pages(&pagelist, new_node_page, dest, 0); |
| 939 | if (err) | ||
| 940 | putback_lru_pages(&pagelist); | ||
| 941 | } | ||
| 936 | 942 | ||
| 937 | return err; | 943 | return err; |
| 938 | } | 944 | } |
| @@ -1147,9 +1153,12 @@ static long do_mbind(unsigned long start, unsigned long len, | |||
| 1147 | 1153 | ||
| 1148 | err = mbind_range(mm, start, end, new); | 1154 | err = mbind_range(mm, start, end, new); |
| 1149 | 1155 | ||
| 1150 | if (!list_empty(&pagelist)) | 1156 | if (!list_empty(&pagelist)) { |
| 1151 | nr_failed = migrate_pages(&pagelist, new_vma_page, | 1157 | nr_failed = migrate_pages(&pagelist, new_vma_page, |
| 1152 | (unsigned long)vma, 0); | 1158 | (unsigned long)vma, 0); |
| 1159 | if (nr_failed) | ||
| 1160 | putback_lru_pages(&pagelist); | ||
| 1161 | } | ||
| 1153 | 1162 | ||
| 1154 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) | 1163 | if (!err && nr_failed && (flags & MPOL_MF_STRICT)) |
| 1155 | err = -EIO; | 1164 | err = -EIO; |
| @@ -1298,15 +1307,18 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, | |||
| 1298 | goto out; | 1307 | goto out; |
| 1299 | 1308 | ||
| 1300 | /* Find the mm_struct */ | 1309 | /* Find the mm_struct */ |
| 1310 | rcu_read_lock(); | ||
| 1301 | read_lock(&tasklist_lock); | 1311 | read_lock(&tasklist_lock); |
| 1302 | task = pid ? find_task_by_vpid(pid) : current; | 1312 | task = pid ? find_task_by_vpid(pid) : current; |
| 1303 | if (!task) { | 1313 | if (!task) { |
| 1304 | read_unlock(&tasklist_lock); | 1314 | read_unlock(&tasklist_lock); |
| 1315 | rcu_read_unlock(); | ||
| 1305 | err = -ESRCH; | 1316 | err = -ESRCH; |
| 1306 | goto out; | 1317 | goto out; |
| 1307 | } | 1318 | } |
| 1308 | mm = get_task_mm(task); | 1319 | mm = get_task_mm(task); |
| 1309 | read_unlock(&tasklist_lock); | 1320 | read_unlock(&tasklist_lock); |
| 1321 | rcu_read_unlock(); | ||
| 1310 | 1322 | ||
| 1311 | err = -EINVAL; | 1323 | err = -EINVAL; |
| 1312 | if (!mm) | 1324 | if (!mm) |
| @@ -1588,7 +1600,7 @@ unsigned slab_node(struct mempolicy *policy) | |||
| 1588 | (void)first_zones_zonelist(zonelist, highest_zoneidx, | 1600 | (void)first_zones_zonelist(zonelist, highest_zoneidx, |
| 1589 | &policy->v.nodes, | 1601 | &policy->v.nodes, |
| 1590 | &zone); | 1602 | &zone); |
| 1591 | return zone->node; | 1603 | return zone ? zone->node : numa_node_id(); |
| 1592 | } | 1604 | } |
| 1593 | 1605 | ||
| 1594 | default: | 1606 | default: |
diff --git a/mm/migrate.c b/mm/migrate.c index 38e7cad782f4..fe5a3c6a5426 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/security.h> | 32 | #include <linux/security.h> |
| 33 | #include <linux/memcontrol.h> | 33 | #include <linux/memcontrol.h> |
| 34 | #include <linux/syscalls.h> | 34 | #include <linux/syscalls.h> |
| 35 | #include <linux/hugetlb.h> | ||
| 35 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
| 36 | 37 | ||
| 37 | #include "internal.h" | 38 | #include "internal.h" |
| @@ -95,26 +96,34 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | |||
| 95 | pte_t *ptep, pte; | 96 | pte_t *ptep, pte; |
| 96 | spinlock_t *ptl; | 97 | spinlock_t *ptl; |
| 97 | 98 | ||
| 98 | pgd = pgd_offset(mm, addr); | 99 | if (unlikely(PageHuge(new))) { |
| 99 | if (!pgd_present(*pgd)) | 100 | ptep = huge_pte_offset(mm, addr); |
| 100 | goto out; | 101 | if (!ptep) |
| 102 | goto out; | ||
| 103 | ptl = &mm->page_table_lock; | ||
| 104 | } else { | ||
| 105 | pgd = pgd_offset(mm, addr); | ||
| 106 | if (!pgd_present(*pgd)) | ||
| 107 | goto out; | ||
| 101 | 108 | ||
| 102 | pud = pud_offset(pgd, addr); | 109 | pud = pud_offset(pgd, addr); |
| 103 | if (!pud_present(*pud)) | 110 | if (!pud_present(*pud)) |
| 104 | goto out; | 111 | goto out; |
| 105 | 112 | ||
| 106 | pmd = pmd_offset(pud, addr); | 113 | pmd = pmd_offset(pud, addr); |
| 107 | if (!pmd_present(*pmd)) | 114 | if (!pmd_present(*pmd)) |
| 108 | goto out; | 115 | goto out; |
| 109 | 116 | ||
| 110 | ptep = pte_offset_map(pmd, addr); | 117 | ptep = pte_offset_map(pmd, addr); |
| 111 | 118 | ||
| 112 | if (!is_swap_pte(*ptep)) { | 119 | if (!is_swap_pte(*ptep)) { |
| 113 | pte_unmap(ptep); | 120 | pte_unmap(ptep); |
| 114 | goto out; | 121 | goto out; |
| 115 | } | 122 | } |
| 123 | |||
| 124 | ptl = pte_lockptr(mm, pmd); | ||
| 125 | } | ||
| 116 | 126 | ||
| 117 | ptl = pte_lockptr(mm, pmd); | ||
| 118 | spin_lock(ptl); | 127 | spin_lock(ptl); |
| 119 | pte = *ptep; | 128 | pte = *ptep; |
| 120 | if (!is_swap_pte(pte)) | 129 | if (!is_swap_pte(pte)) |
| @@ -130,10 +139,19 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | |||
| 130 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); | 139 | pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); |
| 131 | if (is_write_migration_entry(entry)) | 140 | if (is_write_migration_entry(entry)) |
| 132 | pte = pte_mkwrite(pte); | 141 | pte = pte_mkwrite(pte); |
| 142 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 143 | if (PageHuge(new)) | ||
| 144 | pte = pte_mkhuge(pte); | ||
| 145 | #endif | ||
| 133 | flush_cache_page(vma, addr, pte_pfn(pte)); | 146 | flush_cache_page(vma, addr, pte_pfn(pte)); |
| 134 | set_pte_at(mm, addr, ptep, pte); | 147 | set_pte_at(mm, addr, ptep, pte); |
| 135 | 148 | ||
| 136 | if (PageAnon(new)) | 149 | if (PageHuge(new)) { |
| 150 | if (PageAnon(new)) | ||
| 151 | hugepage_add_anon_rmap(new, vma, addr); | ||
| 152 | else | ||
| 153 | page_dup_rmap(new); | ||
| 154 | } else if (PageAnon(new)) | ||
| 137 | page_add_anon_rmap(new, vma, addr); | 155 | page_add_anon_rmap(new, vma, addr); |
| 138 | else | 156 | else |
| 139 | page_add_file_rmap(new); | 157 | page_add_file_rmap(new); |
| @@ -276,11 +294,59 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
| 276 | } | 294 | } |
| 277 | 295 | ||
| 278 | /* | 296 | /* |
| 297 | * The expected number of remaining references is the same as that | ||
| 298 | * of migrate_page_move_mapping(). | ||
| 299 | */ | ||
| 300 | int migrate_huge_page_move_mapping(struct address_space *mapping, | ||
| 301 | struct page *newpage, struct page *page) | ||
| 302 | { | ||
| 303 | int expected_count; | ||
| 304 | void **pslot; | ||
| 305 | |||
| 306 | if (!mapping) { | ||
| 307 | if (page_count(page) != 1) | ||
| 308 | return -EAGAIN; | ||
| 309 | return 0; | ||
| 310 | } | ||
| 311 | |||
| 312 | spin_lock_irq(&mapping->tree_lock); | ||
| 313 | |||
| 314 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | ||
| 315 | page_index(page)); | ||
| 316 | |||
| 317 | expected_count = 2 + page_has_private(page); | ||
| 318 | if (page_count(page) != expected_count || | ||
| 319 | (struct page *)radix_tree_deref_slot(pslot) != page) { | ||
| 320 | spin_unlock_irq(&mapping->tree_lock); | ||
| 321 | return -EAGAIN; | ||
| 322 | } | ||
| 323 | |||
| 324 | if (!page_freeze_refs(page, expected_count)) { | ||
| 325 | spin_unlock_irq(&mapping->tree_lock); | ||
| 326 | return -EAGAIN; | ||
| 327 | } | ||
| 328 | |||
| 329 | get_page(newpage); | ||
| 330 | |||
| 331 | radix_tree_replace_slot(pslot, newpage); | ||
| 332 | |||
| 333 | page_unfreeze_refs(page, expected_count); | ||
| 334 | |||
| 335 | __put_page(page); | ||
| 336 | |||
| 337 | spin_unlock_irq(&mapping->tree_lock); | ||
| 338 | return 0; | ||
| 339 | } | ||
| 340 | |||
| 341 | /* | ||
| 279 | * Copy the page to its new location | 342 | * Copy the page to its new location |
| 280 | */ | 343 | */ |
| 281 | static void migrate_page_copy(struct page *newpage, struct page *page) | 344 | void migrate_page_copy(struct page *newpage, struct page *page) |
| 282 | { | 345 | { |
| 283 | copy_highpage(newpage, page); | 346 | if (PageHuge(page)) |
| 347 | copy_huge_page(newpage, page); | ||
| 348 | else | ||
| 349 | copy_highpage(newpage, page); | ||
| 284 | 350 | ||
| 285 | if (PageError(page)) | 351 | if (PageError(page)) |
| 286 | SetPageError(newpage); | 352 | SetPageError(newpage); |
| @@ -431,7 +497,6 @@ static int writeout(struct address_space *mapping, struct page *page) | |||
| 431 | .nr_to_write = 1, | 497 | .nr_to_write = 1, |
| 432 | .range_start = 0, | 498 | .range_start = 0, |
| 433 | .range_end = LLONG_MAX, | 499 | .range_end = LLONG_MAX, |
| 434 | .nonblocking = 1, | ||
| 435 | .for_reclaim = 1 | 500 | .for_reclaim = 1 |
| 436 | }; | 501 | }; |
| 437 | int rc; | 502 | int rc; |
| @@ -724,6 +789,92 @@ move_newpage: | |||
| 724 | } | 789 | } |
| 725 | 790 | ||
| 726 | /* | 791 | /* |
| 792 | * Counterpart of unmap_and_move_page() for hugepage migration. | ||
| 793 | * | ||
| 794 | * This function doesn't wait the completion of hugepage I/O | ||
| 795 | * because there is no race between I/O and migration for hugepage. | ||
| 796 | * Note that currently hugepage I/O occurs only in direct I/O | ||
| 797 | * where no lock is held and PG_writeback is irrelevant, | ||
| 798 | * and writeback status of all subpages are counted in the reference | ||
| 799 | * count of the head page (i.e. if all subpages of a 2MB hugepage are | ||
| 800 | * under direct I/O, the reference of the head page is 512 and a bit more.) | ||
| 801 | * This means that when we try to migrate hugepage whose subpages are | ||
| 802 | * doing direct I/O, some references remain after try_to_unmap() and | ||
| 803 | * hugepage migration fails without data corruption. | ||
| 804 | * | ||
| 805 | * There is also no race when direct I/O is issued on the page under migration, | ||
| 806 | * because then pte is replaced with migration swap entry and direct I/O code | ||
| 807 | * will wait in the page fault for migration to complete. | ||
| 808 | */ | ||
| 809 | static int unmap_and_move_huge_page(new_page_t get_new_page, | ||
| 810 | unsigned long private, struct page *hpage, | ||
| 811 | int force, int offlining) | ||
| 812 | { | ||
| 813 | int rc = 0; | ||
| 814 | int *result = NULL; | ||
| 815 | struct page *new_hpage = get_new_page(hpage, private, &result); | ||
| 816 | int rcu_locked = 0; | ||
| 817 | struct anon_vma *anon_vma = NULL; | ||
| 818 | |||
| 819 | if (!new_hpage) | ||
| 820 | return -ENOMEM; | ||
| 821 | |||
| 822 | rc = -EAGAIN; | ||
| 823 | |||
| 824 | if (!trylock_page(hpage)) { | ||
| 825 | if (!force) | ||
| 826 | goto out; | ||
| 827 | lock_page(hpage); | ||
| 828 | } | ||
| 829 | |||
| 830 | if (PageAnon(hpage)) { | ||
| 831 | rcu_read_lock(); | ||
| 832 | rcu_locked = 1; | ||
| 833 | |||
| 834 | if (page_mapped(hpage)) { | ||
| 835 | anon_vma = page_anon_vma(hpage); | ||
| 836 | atomic_inc(&anon_vma->external_refcount); | ||
| 837 | } | ||
| 838 | } | ||
| 839 | |||
| 840 | try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); | ||
| 841 | |||
| 842 | if (!page_mapped(hpage)) | ||
| 843 | rc = move_to_new_page(new_hpage, hpage, 1); | ||
| 844 | |||
| 845 | if (rc) | ||
| 846 | remove_migration_ptes(hpage, hpage); | ||
| 847 | |||
| 848 | if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, | ||
| 849 | &anon_vma->lock)) { | ||
| 850 | int empty = list_empty(&anon_vma->head); | ||
| 851 | spin_unlock(&anon_vma->lock); | ||
| 852 | if (empty) | ||
| 853 | anon_vma_free(anon_vma); | ||
| 854 | } | ||
| 855 | |||
| 856 | if (rcu_locked) | ||
| 857 | rcu_read_unlock(); | ||
| 858 | out: | ||
| 859 | unlock_page(hpage); | ||
| 860 | |||
| 861 | if (rc != -EAGAIN) { | ||
| 862 | list_del(&hpage->lru); | ||
| 863 | put_page(hpage); | ||
| 864 | } | ||
| 865 | |||
| 866 | put_page(new_hpage); | ||
| 867 | |||
| 868 | if (result) { | ||
| 869 | if (rc) | ||
| 870 | *result = rc; | ||
| 871 | else | ||
| 872 | *result = page_to_nid(new_hpage); | ||
| 873 | } | ||
| 874 | return rc; | ||
| 875 | } | ||
| 876 | |||
| 877 | /* | ||
| 727 | * migrate_pages | 878 | * migrate_pages |
| 728 | * | 879 | * |
| 729 | * The function takes one list of pages to migrate and a function | 880 | * The function takes one list of pages to migrate and a function |
| @@ -732,8 +883,9 @@ move_newpage: | |||
| 732 | * | 883 | * |
| 733 | * The function returns after 10 attempts or if no pages | 884 | * The function returns after 10 attempts or if no pages |
| 734 | * are movable anymore because to has become empty | 885 | * are movable anymore because to has become empty |
| 735 | * or no retryable pages exist anymore. All pages will be | 886 | * or no retryable pages exist anymore. |
| 736 | * returned to the LRU or freed. | 887 | * Caller should call putback_lru_pages to return pages to the LRU |
| 888 | * or free list. | ||
| 737 | * | 889 | * |
| 738 | * Return: Number of pages not migrated or error code. | 890 | * Return: Number of pages not migrated or error code. |
| 739 | */ | 891 | */ |
| @@ -780,7 +932,51 @@ out: | |||
| 780 | if (!swapwrite) | 932 | if (!swapwrite) |
| 781 | current->flags &= ~PF_SWAPWRITE; | 933 | current->flags &= ~PF_SWAPWRITE; |
| 782 | 934 | ||
| 783 | putback_lru_pages(from); | 935 | if (rc) |
| 936 | return rc; | ||
| 937 | |||
| 938 | return nr_failed + retry; | ||
| 939 | } | ||
| 940 | |||
| 941 | int migrate_huge_pages(struct list_head *from, | ||
| 942 | new_page_t get_new_page, unsigned long private, int offlining) | ||
| 943 | { | ||
| 944 | int retry = 1; | ||
| 945 | int nr_failed = 0; | ||
| 946 | int pass = 0; | ||
| 947 | struct page *page; | ||
| 948 | struct page *page2; | ||
| 949 | int rc; | ||
| 950 | |||
| 951 | for (pass = 0; pass < 10 && retry; pass++) { | ||
| 952 | retry = 0; | ||
| 953 | |||
| 954 | list_for_each_entry_safe(page, page2, from, lru) { | ||
| 955 | cond_resched(); | ||
| 956 | |||
| 957 | rc = unmap_and_move_huge_page(get_new_page, | ||
| 958 | private, page, pass > 2, offlining); | ||
| 959 | |||
| 960 | switch(rc) { | ||
| 961 | case -ENOMEM: | ||
| 962 | goto out; | ||
| 963 | case -EAGAIN: | ||
| 964 | retry++; | ||
| 965 | break; | ||
| 966 | case 0: | ||
| 967 | break; | ||
| 968 | default: | ||
| 969 | /* Permanent failure */ | ||
| 970 | nr_failed++; | ||
| 971 | break; | ||
| 972 | } | ||
| 973 | } | ||
| 974 | } | ||
| 975 | rc = 0; | ||
| 976 | out: | ||
| 977 | |||
| 978 | list_for_each_entry_safe(page, page2, from, lru) | ||
| 979 | put_page(page); | ||
| 784 | 980 | ||
| 785 | if (rc) | 981 | if (rc) |
| 786 | return rc; | 982 | return rc; |
| @@ -841,7 +1037,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm, | |||
| 841 | 1037 | ||
| 842 | err = -EFAULT; | 1038 | err = -EFAULT; |
| 843 | vma = find_vma(mm, pp->addr); | 1039 | vma = find_vma(mm, pp->addr); |
| 844 | if (!vma || !vma_migratable(vma)) | 1040 | if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) |
| 845 | goto set_status; | 1041 | goto set_status; |
| 846 | 1042 | ||
| 847 | page = follow_page(vma, pp->addr, FOLL_GET); | 1043 | page = follow_page(vma, pp->addr, FOLL_GET); |
| @@ -890,9 +1086,12 @@ set_status: | |||
| 890 | } | 1086 | } |
| 891 | 1087 | ||
| 892 | err = 0; | 1088 | err = 0; |
| 893 | if (!list_empty(&pagelist)) | 1089 | if (!list_empty(&pagelist)) { |
| 894 | err = migrate_pages(&pagelist, new_page_node, | 1090 | err = migrate_pages(&pagelist, new_page_node, |
| 895 | (unsigned long)pm, 0); | 1091 | (unsigned long)pm, 0); |
| 1092 | if (err) | ||
| 1093 | putback_lru_pages(&pagelist); | ||
| 1094 | } | ||
| 896 | 1095 | ||
| 897 | up_read(&mm->mmap_sem); | 1096 | up_read(&mm->mmap_sem); |
| 898 | return err; | 1097 | return err; |
| @@ -1005,7 +1204,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages, | |||
| 1005 | int err = -EFAULT; | 1204 | int err = -EFAULT; |
| 1006 | 1205 | ||
| 1007 | vma = find_vma(mm, addr); | 1206 | vma = find_vma(mm, addr); |
| 1008 | if (!vma) | 1207 | if (!vma || addr < vma->vm_start) |
| 1009 | goto set_status; | 1208 | goto set_status; |
| 1010 | 1209 | ||
| 1011 | page = follow_page(vma, addr, 0); | 1210 | page = follow_page(vma, addr, 0); |
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/rmap.h> | 28 | #include <linux/rmap.h> |
| 29 | #include <linux/mmu_notifier.h> | 29 | #include <linux/mmu_notifier.h> |
| 30 | #include <linux/perf_event.h> | 30 | #include <linux/perf_event.h> |
| 31 | #include <linux/audit.h> | ||
| 31 | 32 | ||
| 32 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
| 33 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
| @@ -1108,6 +1109,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | |||
| 1108 | unsigned long retval = -EBADF; | 1109 | unsigned long retval = -EBADF; |
| 1109 | 1110 | ||
| 1110 | if (!(flags & MAP_ANONYMOUS)) { | 1111 | if (!(flags & MAP_ANONYMOUS)) { |
| 1112 | audit_mmap_fd(fd, flags); | ||
| 1111 | if (unlikely(flags & MAP_HUGETLB)) | 1113 | if (unlikely(flags & MAP_HUGETLB)) |
| 1112 | return -EINVAL; | 1114 | return -EINVAL; |
| 1113 | file = fget(fd); | 1115 | file = fget(fd); |
diff --git a/mm/mprotect.c b/mm/mprotect.c index 2d1bf7cf8851..4c5133873097 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
| @@ -211,6 +211,7 @@ success: | |||
| 211 | mmu_notifier_invalidate_range_end(mm, start, end); | 211 | mmu_notifier_invalidate_range_end(mm, start, end); |
| 212 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); | 212 | vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); |
| 213 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); | 213 | vm_stat_account(mm, newflags, vma->vm_file, nrpages); |
| 214 | perf_event_mmap(vma); | ||
| 214 | return 0; | 215 | return 0; |
| 215 | 216 | ||
| 216 | fail: | 217 | fail: |
| @@ -299,7 +300,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, | |||
| 299 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); | 300 | error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); |
| 300 | if (error) | 301 | if (error) |
| 301 | goto out; | 302 | goto out; |
| 302 | perf_event_mmap(vma); | ||
| 303 | nstart = tmp; | 303 | nstart = tmp; |
| 304 | 304 | ||
| 305 | if (nstart < prev->vm_end) | 305 | if (nstart < prev->vm_end) |
diff --git a/mm/mremap.c b/mm/mremap.c index cde56ee51ef7..563fbdd6293a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
| @@ -101,7 +101,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
| 101 | * pte locks because exclusive mmap_sem prevents deadlock. | 101 | * pte locks because exclusive mmap_sem prevents deadlock. |
| 102 | */ | 102 | */ |
| 103 | old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); | 103 | old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
| 104 | new_pte = pte_offset_map_nested(new_pmd, new_addr); | 104 | new_pte = pte_offset_map(new_pmd, new_addr); |
| 105 | new_ptl = pte_lockptr(mm, new_pmd); | 105 | new_ptl = pte_lockptr(mm, new_pmd); |
| 106 | if (new_ptl != old_ptl) | 106 | if (new_ptl != old_ptl) |
| 107 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); | 107 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
| @@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
| 119 | arch_leave_lazy_mmu_mode(); | 119 | arch_leave_lazy_mmu_mode(); |
| 120 | if (new_ptl != old_ptl) | 120 | if (new_ptl != old_ptl) |
| 121 | spin_unlock(new_ptl); | 121 | spin_unlock(new_ptl); |
| 122 | pte_unmap_nested(new_pte - 1); | 122 | pte_unmap(new_pte - 1); |
| 123 | pte_unmap_unlock(old_pte - 1, old_ptl); | 123 | pte_unmap_unlock(old_pte - 1, old_ptl); |
| 124 | if (mapping) | 124 | if (mapping) |
| 125 | spin_unlock(&mapping->i_mmap_lock); | 125 | spin_unlock(&mapping->i_mmap_lock); |
diff --git a/mm/nommu.c b/mm/nommu.c index 88ff091eb07a..27a9ac588516 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
| 30 | #include <linux/security.h> | 30 | #include <linux/security.h> |
| 31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
| 32 | #include <linux/audit.h> | ||
| 32 | 33 | ||
| 33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
| 34 | #include <asm/tlb.h> | 35 | #include <asm/tlb.h> |
| @@ -293,11 +294,58 @@ void *vmalloc(unsigned long size) | |||
| 293 | } | 294 | } |
| 294 | EXPORT_SYMBOL(vmalloc); | 295 | EXPORT_SYMBOL(vmalloc); |
| 295 | 296 | ||
| 297 | /* | ||
| 298 | * vzalloc - allocate virtually continguos memory with zero fill | ||
| 299 | * | ||
| 300 | * @size: allocation size | ||
| 301 | * | ||
| 302 | * Allocate enough pages to cover @size from the page level | ||
| 303 | * allocator and map them into continguos kernel virtual space. | ||
| 304 | * The memory allocated is set to zero. | ||
| 305 | * | ||
| 306 | * For tight control over page level allocator and protection flags | ||
| 307 | * use __vmalloc() instead. | ||
| 308 | */ | ||
| 309 | void *vzalloc(unsigned long size) | ||
| 310 | { | ||
| 311 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
| 312 | PAGE_KERNEL); | ||
| 313 | } | ||
| 314 | EXPORT_SYMBOL(vzalloc); | ||
| 315 | |||
| 316 | /** | ||
| 317 | * vmalloc_node - allocate memory on a specific node | ||
| 318 | * @size: allocation size | ||
| 319 | * @node: numa node | ||
| 320 | * | ||
| 321 | * Allocate enough pages to cover @size from the page level | ||
| 322 | * allocator and map them into contiguous kernel virtual space. | ||
| 323 | * | ||
| 324 | * For tight control over page level allocator and protection flags | ||
| 325 | * use __vmalloc() instead. | ||
| 326 | */ | ||
| 296 | void *vmalloc_node(unsigned long size, int node) | 327 | void *vmalloc_node(unsigned long size, int node) |
| 297 | { | 328 | { |
| 298 | return vmalloc(size); | 329 | return vmalloc(size); |
| 299 | } | 330 | } |
| 300 | EXPORT_SYMBOL(vmalloc_node); | 331 | |
| 332 | /** | ||
| 333 | * vzalloc_node - allocate memory on a specific node with zero fill | ||
| 334 | * @size: allocation size | ||
| 335 | * @node: numa node | ||
| 336 | * | ||
| 337 | * Allocate enough pages to cover @size from the page level | ||
| 338 | * allocator and map them into contiguous kernel virtual space. | ||
| 339 | * The memory allocated is set to zero. | ||
| 340 | * | ||
| 341 | * For tight control over page level allocator and protection flags | ||
| 342 | * use __vmalloc() instead. | ||
| 343 | */ | ||
| 344 | void *vzalloc_node(unsigned long size, int node) | ||
| 345 | { | ||
| 346 | return vzalloc(size); | ||
| 347 | } | ||
| 348 | EXPORT_SYMBOL(vzalloc_node); | ||
| 301 | 349 | ||
| 302 | #ifndef PAGE_KERNEL_EXEC | 350 | #ifndef PAGE_KERNEL_EXEC |
| 303 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | 351 | # define PAGE_KERNEL_EXEC PAGE_KERNEL |
| @@ -1411,6 +1459,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, | |||
| 1411 | struct file *file = NULL; | 1459 | struct file *file = NULL; |
| 1412 | unsigned long retval = -EBADF; | 1460 | unsigned long retval = -EBADF; |
| 1413 | 1461 | ||
| 1462 | audit_mmap_fd(fd, flags); | ||
| 1414 | if (!(flags & MAP_ANONYMOUS)) { | 1463 | if (!(flags & MAP_ANONYMOUS)) { |
| 1415 | file = fget(fd); | 1464 | file = fget(fd); |
| 1416 | if (!file) | 1465 | if (!file) |
| @@ -1668,6 +1717,7 @@ void exit_mmap(struct mm_struct *mm) | |||
| 1668 | mm->mmap = vma->vm_next; | 1717 | mm->mmap = vma->vm_next; |
| 1669 | delete_vma_from_mm(vma); | 1718 | delete_vma_from_mm(vma); |
| 1670 | delete_vma(mm, vma); | 1719 | delete_vma(mm, vma); |
| 1720 | cond_resched(); | ||
| 1671 | } | 1721 | } |
| 1672 | 1722 | ||
| 1673 | kleave(""); | 1723 | kleave(""); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 4029583a1024..7dcca55ede7c 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -162,10 +162,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, | |||
| 162 | return 0; | 162 | return 0; |
| 163 | 163 | ||
| 164 | /* | 164 | /* |
| 165 | * Shortcut check for OOM_SCORE_ADJ_MIN so the entire heuristic doesn't | 165 | * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN |
| 166 | * need to be executed for something that cannot be killed. | 166 | * so the entire heuristic doesn't need to be executed for something |
| 167 | * that cannot be killed. | ||
| 167 | */ | 168 | */ |
| 168 | if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { | 169 | if (atomic_read(&p->mm->oom_disable_count)) { |
| 169 | task_unlock(p); | 170 | task_unlock(p); |
| 170 | return 0; | 171 | return 0; |
| 171 | } | 172 | } |
| @@ -403,16 +404,40 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
| 403 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 404 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
| 404 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) | 405 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) |
| 405 | { | 406 | { |
| 407 | struct task_struct *q; | ||
| 408 | struct mm_struct *mm; | ||
| 409 | |||
| 406 | p = find_lock_task_mm(p); | 410 | p = find_lock_task_mm(p); |
| 407 | if (!p) | 411 | if (!p) |
| 408 | return 1; | 412 | return 1; |
| 409 | 413 | ||
| 414 | /* mm cannot be safely dereferenced after task_unlock(p) */ | ||
| 415 | mm = p->mm; | ||
| 416 | |||
| 410 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", | 417 | pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", |
| 411 | task_pid_nr(p), p->comm, K(p->mm->total_vm), | 418 | task_pid_nr(p), p->comm, K(p->mm->total_vm), |
| 412 | K(get_mm_counter(p->mm, MM_ANONPAGES)), | 419 | K(get_mm_counter(p->mm, MM_ANONPAGES)), |
| 413 | K(get_mm_counter(p->mm, MM_FILEPAGES))); | 420 | K(get_mm_counter(p->mm, MM_FILEPAGES))); |
| 414 | task_unlock(p); | 421 | task_unlock(p); |
| 415 | 422 | ||
| 423 | /* | ||
| 424 | * Kill all processes sharing p->mm in other thread groups, if any. | ||
| 425 | * They don't get access to memory reserves or a higher scheduler | ||
| 426 | * priority, though, to avoid depletion of all memory or task | ||
| 427 | * starvation. This prevents mm->mmap_sem livelock when an oom killed | ||
| 428 | * task cannot exit because it requires the semaphore and its contended | ||
| 429 | * by another thread trying to allocate memory itself. That thread will | ||
| 430 | * now get access to memory reserves since it has a pending fatal | ||
| 431 | * signal. | ||
| 432 | */ | ||
| 433 | for_each_process(q) | ||
| 434 | if (q->mm == mm && !same_thread_group(q, p)) { | ||
| 435 | task_lock(q); /* Protect ->comm from prctl() */ | ||
| 436 | pr_err("Kill process %d (%s) sharing same memory\n", | ||
| 437 | task_pid_nr(q), q->comm); | ||
| 438 | task_unlock(q); | ||
| 439 | force_sig(SIGKILL, q); | ||
| 440 | } | ||
| 416 | 441 | ||
| 417 | set_tsk_thread_flag(p, TIF_MEMDIE); | 442 | set_tsk_thread_flag(p, TIF_MEMDIE); |
| 418 | force_sig(SIGKILL, p); | 443 | force_sig(SIGKILL, p); |
| @@ -680,7 +705,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
| 680 | read_lock(&tasklist_lock); | 705 | read_lock(&tasklist_lock); |
| 681 | if (sysctl_oom_kill_allocating_task && | 706 | if (sysctl_oom_kill_allocating_task && |
| 682 | !oom_unkillable_task(current, NULL, nodemask) && | 707 | !oom_unkillable_task(current, NULL, nodemask) && |
| 683 | (current->signal->oom_adj != OOM_DISABLE)) { | 708 | current->mm && !atomic_read(¤t->mm->oom_disable_count)) { |
| 684 | /* | 709 | /* |
| 685 | * oom_kill_process() needs tasklist_lock held. If it returns | 710 | * oom_kill_process() needs tasklist_lock held. If it returns |
| 686 | * non-zero, current could not be killed so we must fallback to | 711 | * non-zero, current could not be killed so we must fallback to |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e3bccac1f025..b840afa89761 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -415,14 +415,8 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | |||
| 415 | 415 | ||
| 416 | if (vm_dirty_bytes) | 416 | if (vm_dirty_bytes) |
| 417 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | 417 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); |
| 418 | else { | 418 | else |
| 419 | int dirty_ratio; | 419 | dirty = (vm_dirty_ratio * available_memory) / 100; |
| 420 | |||
| 421 | dirty_ratio = vm_dirty_ratio; | ||
| 422 | if (dirty_ratio < 5) | ||
| 423 | dirty_ratio = 5; | ||
| 424 | dirty = (dirty_ratio * available_memory) / 100; | ||
| 425 | } | ||
| 426 | 420 | ||
| 427 | if (dirty_background_bytes) | 421 | if (dirty_background_bytes) |
| 428 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | 422 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); |
| @@ -510,7 +504,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
| 510 | * catch-up. This avoids (excessively) small writeouts | 504 | * catch-up. This avoids (excessively) small writeouts |
| 511 | * when the bdi limits are ramping up. | 505 | * when the bdi limits are ramping up. |
| 512 | */ | 506 | */ |
| 513 | if (nr_reclaimable + nr_writeback < | 507 | if (nr_reclaimable + nr_writeback <= |
| 514 | (background_thresh + dirty_thresh) / 2) | 508 | (background_thresh + dirty_thresh) / 2) |
| 515 | break; | 509 | break; |
| 516 | 510 | ||
| @@ -542,8 +536,8 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
| 542 | * the last resort safeguard. | 536 | * the last resort safeguard. |
| 543 | */ | 537 | */ |
| 544 | dirty_exceeded = | 538 | dirty_exceeded = |
| 545 | (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh) | 539 | (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) |
| 546 | || (nr_reclaimable + nr_writeback >= dirty_thresh); | 540 | || (nr_reclaimable + nr_writeback > dirty_thresh); |
| 547 | 541 | ||
| 548 | if (!dirty_exceeded) | 542 | if (!dirty_exceeded) |
| 549 | break; | 543 | break; |
| @@ -1121,6 +1115,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
| 1121 | { | 1115 | { |
| 1122 | if (mapping_cap_account_dirty(mapping)) { | 1116 | if (mapping_cap_account_dirty(mapping)) { |
| 1123 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 1117 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
| 1118 | __inc_zone_page_state(page, NR_DIRTIED); | ||
| 1124 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 1119 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
| 1125 | task_dirty_inc(current); | 1120 | task_dirty_inc(current); |
| 1126 | task_io_account_write(PAGE_CACHE_SIZE); | 1121 | task_io_account_write(PAGE_CACHE_SIZE); |
| @@ -1129,6 +1124,18 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
| 1129 | EXPORT_SYMBOL(account_page_dirtied); | 1124 | EXPORT_SYMBOL(account_page_dirtied); |
| 1130 | 1125 | ||
| 1131 | /* | 1126 | /* |
| 1127 | * Helper function for set_page_writeback family. | ||
| 1128 | * NOTE: Unlike account_page_dirtied this does not rely on being atomic | ||
| 1129 | * wrt interrupts. | ||
| 1130 | */ | ||
| 1131 | void account_page_writeback(struct page *page) | ||
| 1132 | { | ||
| 1133 | inc_zone_page_state(page, NR_WRITEBACK); | ||
| 1134 | inc_zone_page_state(page, NR_WRITTEN); | ||
| 1135 | } | ||
| 1136 | EXPORT_SYMBOL(account_page_writeback); | ||
| 1137 | |||
| 1138 | /* | ||
| 1132 | * For address_spaces which do not use buffers. Just tag the page as dirty in | 1139 | * For address_spaces which do not use buffers. Just tag the page as dirty in |
| 1133 | * its radix tree. | 1140 | * its radix tree. |
| 1134 | * | 1141 | * |
| @@ -1366,7 +1373,7 @@ int test_set_page_writeback(struct page *page) | |||
| 1366 | ret = TestSetPageWriteback(page); | 1373 | ret = TestSetPageWriteback(page); |
| 1367 | } | 1374 | } |
| 1368 | if (!ret) | 1375 | if (!ret) |
| 1369 | inc_zone_page_state(page, NR_WRITEBACK); | 1376 | account_page_writeback(page); |
| 1370 | return ret; | 1377 | return ret; |
| 1371 | 1378 | ||
| 1372 | } | 1379 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2a362c52fdf4..ff7e15872398 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -104,19 +104,24 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; | |||
| 104 | * only be modified with pm_mutex held, unless the suspend/hibernate code is | 104 | * only be modified with pm_mutex held, unless the suspend/hibernate code is |
| 105 | * guaranteed not to run in parallel with that modification). | 105 | * guaranteed not to run in parallel with that modification). |
| 106 | */ | 106 | */ |
| 107 | void set_gfp_allowed_mask(gfp_t mask) | 107 | |
| 108 | static gfp_t saved_gfp_mask; | ||
| 109 | |||
| 110 | void pm_restore_gfp_mask(void) | ||
| 108 | { | 111 | { |
| 109 | WARN_ON(!mutex_is_locked(&pm_mutex)); | 112 | WARN_ON(!mutex_is_locked(&pm_mutex)); |
| 110 | gfp_allowed_mask = mask; | 113 | if (saved_gfp_mask) { |
| 114 | gfp_allowed_mask = saved_gfp_mask; | ||
| 115 | saved_gfp_mask = 0; | ||
| 116 | } | ||
| 111 | } | 117 | } |
| 112 | 118 | ||
| 113 | gfp_t clear_gfp_allowed_mask(gfp_t mask) | 119 | void pm_restrict_gfp_mask(void) |
| 114 | { | 120 | { |
| 115 | gfp_t ret = gfp_allowed_mask; | ||
| 116 | |||
| 117 | WARN_ON(!mutex_is_locked(&pm_mutex)); | 121 | WARN_ON(!mutex_is_locked(&pm_mutex)); |
| 118 | gfp_allowed_mask &= ~mask; | 122 | WARN_ON(saved_gfp_mask); |
| 119 | return ret; | 123 | saved_gfp_mask = gfp_allowed_mask; |
| 124 | gfp_allowed_mask &= ~GFP_IOFS; | ||
| 120 | } | 125 | } |
| 121 | #endif /* CONFIG_PM_SLEEP */ | 126 | #endif /* CONFIG_PM_SLEEP */ |
| 122 | 127 | ||
| @@ -531,7 +536,7 @@ static inline void __free_one_page(struct page *page, | |||
| 531 | * so it's less likely to be used soon and more likely to be merged | 536 | * so it's less likely to be used soon and more likely to be merged |
| 532 | * as a higher order page | 537 | * as a higher order page |
| 533 | */ | 538 | */ |
| 534 | if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) { | 539 | if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { |
| 535 | struct page *higher_page, *higher_buddy; | 540 | struct page *higher_page, *higher_buddy; |
| 536 | combined_idx = __find_combined_index(page_idx, order); | 541 | combined_idx = __find_combined_index(page_idx, order); |
| 537 | higher_page = page + combined_idx - page_idx; | 542 | higher_page = page + combined_idx - page_idx; |
| @@ -1907,7 +1912,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, | |||
| 1907 | preferred_zone, migratetype); | 1912 | preferred_zone, migratetype); |
| 1908 | 1913 | ||
| 1909 | if (!page && gfp_mask & __GFP_NOFAIL) | 1914 | if (!page && gfp_mask & __GFP_NOFAIL) |
| 1910 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 1915 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
| 1911 | } while (!page && (gfp_mask & __GFP_NOFAIL)); | 1916 | } while (!page && (gfp_mask & __GFP_NOFAIL)); |
| 1912 | 1917 | ||
| 1913 | return page; | 1918 | return page; |
| @@ -1932,7 +1937,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
| 1932 | const gfp_t wait = gfp_mask & __GFP_WAIT; | 1937 | const gfp_t wait = gfp_mask & __GFP_WAIT; |
| 1933 | 1938 | ||
| 1934 | /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ | 1939 | /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ |
| 1935 | BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH); | 1940 | BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); |
| 1936 | 1941 | ||
| 1937 | /* | 1942 | /* |
| 1938 | * The caller may dip into page reserves a bit more if the caller | 1943 | * The caller may dip into page reserves a bit more if the caller |
| @@ -1940,7 +1945,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) | |||
| 1940 | * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will | 1945 | * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will |
| 1941 | * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). | 1946 | * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). |
| 1942 | */ | 1947 | */ |
| 1943 | alloc_flags |= (gfp_mask & __GFP_HIGH); | 1948 | alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); |
| 1944 | 1949 | ||
| 1945 | if (!wait) { | 1950 | if (!wait) { |
| 1946 | alloc_flags |= ALLOC_HARDER; | 1951 | alloc_flags |= ALLOC_HARDER; |
| @@ -2095,7 +2100,7 @@ rebalance: | |||
| 2095 | pages_reclaimed += did_some_progress; | 2100 | pages_reclaimed += did_some_progress; |
| 2096 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { | 2101 | if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) { |
| 2097 | /* Wait for some write requests to complete then retry */ | 2102 | /* Wait for some write requests to complete then retry */ |
| 2098 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 2103 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); |
| 2099 | goto rebalance; | 2104 | goto rebalance; |
| 2100 | } | 2105 | } |
| 2101 | 2106 | ||
| @@ -3008,14 +3013,6 @@ static __init_refok int __build_all_zonelists(void *data) | |||
| 3008 | build_zonelist_cache(pgdat); | 3013 | build_zonelist_cache(pgdat); |
| 3009 | } | 3014 | } |
| 3010 | 3015 | ||
| 3011 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
| 3012 | /* Setup real pagesets for the new zone */ | ||
| 3013 | if (data) { | ||
| 3014 | struct zone *zone = data; | ||
| 3015 | setup_zone_pageset(zone); | ||
| 3016 | } | ||
| 3017 | #endif | ||
| 3018 | |||
| 3019 | /* | 3016 | /* |
| 3020 | * Initialize the boot_pagesets that are going to be used | 3017 | * Initialize the boot_pagesets that are going to be used |
| 3021 | * for bootstrapping processors. The real pagesets for | 3018 | * for bootstrapping processors. The real pagesets for |
| @@ -3064,7 +3061,11 @@ void build_all_zonelists(void *data) | |||
| 3064 | } else { | 3061 | } else { |
| 3065 | /* we have to stop all cpus to guarantee there is no user | 3062 | /* we have to stop all cpus to guarantee there is no user |
| 3066 | of zonelist */ | 3063 | of zonelist */ |
| 3067 | stop_machine(__build_all_zonelists, data, NULL); | 3064 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 3065 | if (data) | ||
| 3066 | setup_zone_pageset((struct zone *)data); | ||
| 3067 | #endif | ||
| 3068 | stop_machine(__build_all_zonelists, NULL, NULL); | ||
| 3068 | /* cpuset refresh routine should be here */ | 3069 | /* cpuset refresh routine should be here */ |
| 3069 | } | 3070 | } |
| 3070 | vm_total_pages = nr_free_pagecache_pages(); | 3071 | vm_total_pages = nr_free_pagecache_pages(); |
| @@ -5297,12 +5298,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
| 5297 | * page allocater never alloc memory from ISOLATE block. | 5298 | * page allocater never alloc memory from ISOLATE block. |
| 5298 | */ | 5299 | */ |
| 5299 | 5300 | ||
| 5301 | static int | ||
| 5302 | __count_immobile_pages(struct zone *zone, struct page *page, int count) | ||
| 5303 | { | ||
| 5304 | unsigned long pfn, iter, found; | ||
| 5305 | /* | ||
| 5306 | * For avoiding noise data, lru_add_drain_all() should be called | ||
| 5307 | * If ZONE_MOVABLE, the zone never contains immobile pages | ||
| 5308 | */ | ||
| 5309 | if (zone_idx(zone) == ZONE_MOVABLE) | ||
| 5310 | return true; | ||
| 5311 | |||
| 5312 | if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) | ||
| 5313 | return true; | ||
| 5314 | |||
| 5315 | pfn = page_to_pfn(page); | ||
| 5316 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { | ||
| 5317 | unsigned long check = pfn + iter; | ||
| 5318 | |||
| 5319 | if (!pfn_valid_within(check)) { | ||
| 5320 | iter++; | ||
| 5321 | continue; | ||
| 5322 | } | ||
| 5323 | page = pfn_to_page(check); | ||
| 5324 | if (!page_count(page)) { | ||
| 5325 | if (PageBuddy(page)) | ||
| 5326 | iter += (1 << page_order(page)) - 1; | ||
| 5327 | continue; | ||
| 5328 | } | ||
| 5329 | if (!PageLRU(page)) | ||
| 5330 | found++; | ||
| 5331 | /* | ||
| 5332 | * If there are RECLAIMABLE pages, we need to check it. | ||
| 5333 | * But now, memory offline itself doesn't call shrink_slab() | ||
| 5334 | * and it still to be fixed. | ||
| 5335 | */ | ||
| 5336 | /* | ||
| 5337 | * If the page is not RAM, page_count()should be 0. | ||
| 5338 | * we don't need more check. This is an _used_ not-movable page. | ||
| 5339 | * | ||
| 5340 | * The problematic thing here is PG_reserved pages. PG_reserved | ||
| 5341 | * is set to both of a memory hole page and a _used_ kernel | ||
| 5342 | * page at boot. | ||
| 5343 | */ | ||
| 5344 | if (found > count) | ||
| 5345 | return false; | ||
| 5346 | } | ||
| 5347 | return true; | ||
| 5348 | } | ||
| 5349 | |||
| 5350 | bool is_pageblock_removable_nolock(struct page *page) | ||
| 5351 | { | ||
| 5352 | struct zone *zone = page_zone(page); | ||
| 5353 | return __count_immobile_pages(zone, page, 0); | ||
| 5354 | } | ||
| 5355 | |||
| 5300 | int set_migratetype_isolate(struct page *page) | 5356 | int set_migratetype_isolate(struct page *page) |
| 5301 | { | 5357 | { |
| 5302 | struct zone *zone; | 5358 | struct zone *zone; |
| 5303 | struct page *curr_page; | 5359 | unsigned long flags, pfn; |
| 5304 | unsigned long flags, pfn, iter; | ||
| 5305 | unsigned long immobile = 0; | ||
| 5306 | struct memory_isolate_notify arg; | 5360 | struct memory_isolate_notify arg; |
| 5307 | int notifier_ret; | 5361 | int notifier_ret; |
| 5308 | int ret = -EBUSY; | 5362 | int ret = -EBUSY; |
| @@ -5312,11 +5366,6 @@ int set_migratetype_isolate(struct page *page) | |||
| 5312 | zone_idx = zone_idx(zone); | 5366 | zone_idx = zone_idx(zone); |
| 5313 | 5367 | ||
| 5314 | spin_lock_irqsave(&zone->lock, flags); | 5368 | spin_lock_irqsave(&zone->lock, flags); |
| 5315 | if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE || | ||
| 5316 | zone_idx == ZONE_MOVABLE) { | ||
| 5317 | ret = 0; | ||
| 5318 | goto out; | ||
| 5319 | } | ||
| 5320 | 5369 | ||
| 5321 | pfn = page_to_pfn(page); | 5370 | pfn = page_to_pfn(page); |
| 5322 | arg.start_pfn = pfn; | 5371 | arg.start_pfn = pfn; |
| @@ -5336,23 +5385,20 @@ int set_migratetype_isolate(struct page *page) | |||
| 5336 | */ | 5385 | */ |
| 5337 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); | 5386 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); |
| 5338 | notifier_ret = notifier_to_errno(notifier_ret); | 5387 | notifier_ret = notifier_to_errno(notifier_ret); |
| 5339 | if (notifier_ret || !arg.pages_found) | 5388 | if (notifier_ret) |
| 5340 | goto out; | 5389 | goto out; |
| 5341 | 5390 | /* | |
| 5342 | for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) { | 5391 | * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. |
| 5343 | if (!pfn_valid_within(pfn)) | 5392 | * We just check MOVABLE pages. |
| 5344 | continue; | 5393 | */ |
| 5345 | 5394 | if (__count_immobile_pages(zone, page, arg.pages_found)) | |
| 5346 | curr_page = pfn_to_page(iter); | ||
| 5347 | if (!page_count(curr_page) || PageLRU(curr_page)) | ||
| 5348 | continue; | ||
| 5349 | |||
| 5350 | immobile++; | ||
| 5351 | } | ||
| 5352 | |||
| 5353 | if (arg.pages_found == immobile) | ||
| 5354 | ret = 0; | 5395 | ret = 0; |
| 5355 | 5396 | ||
| 5397 | /* | ||
| 5398 | * immobile means "not-on-lru" paes. If immobile is larger than | ||
| 5399 | * removable-by-driver pages reported by notifier, we'll fail. | ||
| 5400 | */ | ||
| 5401 | |||
| 5356 | out: | 5402 | out: |
| 5357 | if (!ret) { | 5403 | if (!ret) { |
| 5358 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | 5404 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 5e0ffd967452..4ae42bb40892 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
| @@ -86,7 +86,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn) | |||
| 86 | * all pages in [start_pfn...end_pfn) must be in the same zone. | 86 | * all pages in [start_pfn...end_pfn) must be in the same zone. |
| 87 | * zone->lock must be held before call this. | 87 | * zone->lock must be held before call this. |
| 88 | * | 88 | * |
| 89 | * Returns 0 if all pages in the range is isolated. | 89 | * Returns 1 if all pages in the range is isolated. |
| 90 | */ | 90 | */ |
| 91 | static int | 91 | static int |
| 92 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) | 92 | __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) |
| @@ -119,7 +119,6 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |||
| 119 | struct zone *zone; | 119 | struct zone *zone; |
| 120 | int ret; | 120 | int ret; |
| 121 | 121 | ||
| 122 | pfn = start_pfn; | ||
| 123 | /* | 122 | /* |
| 124 | * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page | 123 | * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page |
| 125 | * is not aligned to pageblock_nr_pages. | 124 | * is not aligned to pageblock_nr_pages. |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 8b1a2ce21ee5..38cc58b8b2b0 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
| @@ -139,7 +139,6 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
| 139 | pgd_t *pgd; | 139 | pgd_t *pgd; |
| 140 | unsigned long next; | 140 | unsigned long next; |
| 141 | int err = 0; | 141 | int err = 0; |
| 142 | struct vm_area_struct *vma; | ||
| 143 | 142 | ||
| 144 | if (addr >= end) | 143 | if (addr >= end) |
| 145 | return err; | 144 | return err; |
| @@ -149,15 +148,17 @@ int walk_page_range(unsigned long addr, unsigned long end, | |||
| 149 | 148 | ||
| 150 | pgd = pgd_offset(walk->mm, addr); | 149 | pgd = pgd_offset(walk->mm, addr); |
| 151 | do { | 150 | do { |
| 151 | struct vm_area_struct *uninitialized_var(vma); | ||
| 152 | |||
| 152 | next = pgd_addr_end(addr, end); | 153 | next = pgd_addr_end(addr, end); |
| 153 | 154 | ||
| 155 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 154 | /* | 156 | /* |
| 155 | * handle hugetlb vma individually because pagetable walk for | 157 | * handle hugetlb vma individually because pagetable walk for |
| 156 | * the hugetlb page is dependent on the architecture and | 158 | * the hugetlb page is dependent on the architecture and |
| 157 | * we can't handled it in the same manner as non-huge pages. | 159 | * we can't handled it in the same manner as non-huge pages. |
| 158 | */ | 160 | */ |
| 159 | vma = find_vma(walk->mm, addr); | 161 | vma = find_vma(walk->mm, addr); |
| 160 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 161 | if (vma && is_vm_hugetlb_page(vma)) { | 162 | if (vma && is_vm_hugetlb_page(vma)) { |
| 162 | if (vma->vm_end < next) | 163 | if (vma->vm_end < next) |
| 163 | next = vma->vm_end; | 164 | next = vma->vm_end; |
| @@ -80,7 +80,7 @@ static inline struct anon_vma_chain *anon_vma_chain_alloc(void) | |||
| 80 | return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); | 80 | return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) | 83 | static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) |
| 84 | { | 84 | { |
| 85 | kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); | 85 | kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); |
| 86 | } | 86 | } |
| @@ -314,7 +314,7 @@ void __init anon_vma_init(void) | |||
| 314 | * Getting a lock on a stable anon_vma from a page off the LRU is | 314 | * Getting a lock on a stable anon_vma from a page off the LRU is |
| 315 | * tricky: page_lock_anon_vma rely on RCU to guard against the races. | 315 | * tricky: page_lock_anon_vma rely on RCU to guard against the races. |
| 316 | */ | 316 | */ |
| 317 | struct anon_vma *page_lock_anon_vma(struct page *page) | 317 | struct anon_vma *__page_lock_anon_vma(struct page *page) |
| 318 | { | 318 | { |
| 319 | struct anon_vma *anon_vma, *root_anon_vma; | 319 | struct anon_vma *anon_vma, *root_anon_vma; |
| 320 | unsigned long anon_mapping; | 320 | unsigned long anon_mapping; |
| @@ -348,6 +348,8 @@ out: | |||
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | void page_unlock_anon_vma(struct anon_vma *anon_vma) | 350 | void page_unlock_anon_vma(struct anon_vma *anon_vma) |
| 351 | __releases(&anon_vma->root->lock) | ||
| 352 | __releases(RCU) | ||
| 351 | { | 353 | { |
| 352 | anon_vma_unlock(anon_vma); | 354 | anon_vma_unlock(anon_vma); |
| 353 | rcu_read_unlock(); | 355 | rcu_read_unlock(); |
| @@ -407,7 +409,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | |||
| 407 | * | 409 | * |
| 408 | * On success returns with pte mapped and locked. | 410 | * On success returns with pte mapped and locked. |
| 409 | */ | 411 | */ |
| 410 | pte_t *page_check_address(struct page *page, struct mm_struct *mm, | 412 | pte_t *__page_check_address(struct page *page, struct mm_struct *mm, |
| 411 | unsigned long address, spinlock_t **ptlp, int sync) | 413 | unsigned long address, spinlock_t **ptlp, int sync) |
| 412 | { | 414 | { |
| 413 | pgd_t *pgd; | 415 | pgd_t *pgd; |
| @@ -745,7 +747,7 @@ int page_mkclean(struct page *page) | |||
| 745 | if (mapping) { | 747 | if (mapping) { |
| 746 | ret = page_mkclean_file(mapping, page); | 748 | ret = page_mkclean_file(mapping, page); |
| 747 | if (page_test_dirty(page)) { | 749 | if (page_test_dirty(page)) { |
| 748 | page_clear_dirty(page); | 750 | page_clear_dirty(page, 1); |
| 749 | ret = 1; | 751 | ret = 1; |
| 750 | } | 752 | } |
| 751 | } | 753 | } |
| @@ -780,10 +782,10 @@ void page_move_anon_rmap(struct page *page, | |||
| 780 | } | 782 | } |
| 781 | 783 | ||
| 782 | /** | 784 | /** |
| 783 | * __page_set_anon_rmap - setup new anonymous rmap | 785 | * __page_set_anon_rmap - set up new anonymous rmap |
| 784 | * @page: the page to add the mapping to | 786 | * @page: Page to add to rmap |
| 785 | * @vma: the vm area in which the mapping is added | 787 | * @vma: VM area to add page to. |
| 786 | * @address: the user virtual address mapped | 788 | * @address: User virtual address of the mapping |
| 787 | * @exclusive: the page is exclusively owned by the current process | 789 | * @exclusive: the page is exclusively owned by the current process |
| 788 | */ | 790 | */ |
| 789 | static void __page_set_anon_rmap(struct page *page, | 791 | static void __page_set_anon_rmap(struct page *page, |
| @@ -793,25 +795,16 @@ static void __page_set_anon_rmap(struct page *page, | |||
| 793 | 795 | ||
| 794 | BUG_ON(!anon_vma); | 796 | BUG_ON(!anon_vma); |
| 795 | 797 | ||
| 798 | if (PageAnon(page)) | ||
| 799 | return; | ||
| 800 | |||
| 796 | /* | 801 | /* |
| 797 | * If the page isn't exclusively mapped into this vma, | 802 | * If the page isn't exclusively mapped into this vma, |
| 798 | * we must use the _oldest_ possible anon_vma for the | 803 | * we must use the _oldest_ possible anon_vma for the |
| 799 | * page mapping! | 804 | * page mapping! |
| 800 | */ | 805 | */ |
| 801 | if (!exclusive) { | 806 | if (!exclusive) |
| 802 | if (PageAnon(page)) | ||
| 803 | return; | ||
| 804 | anon_vma = anon_vma->root; | 807 | anon_vma = anon_vma->root; |
| 805 | } else { | ||
| 806 | /* | ||
| 807 | * In this case, swapped-out-but-not-discarded swap-cache | ||
| 808 | * is remapped. So, no need to update page->mapping here. | ||
| 809 | * We convice anon_vma poitned by page->mapping is not obsolete | ||
| 810 | * because vma->anon_vma is necessary to be a family of it. | ||
| 811 | */ | ||
| 812 | if (PageAnon(page)) | ||
| 813 | return; | ||
| 814 | } | ||
| 815 | 808 | ||
| 816 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | 809 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; |
| 817 | page->mapping = (struct address_space *) anon_vma; | 810 | page->mapping = (struct address_space *) anon_vma; |
| @@ -942,7 +935,7 @@ void page_remove_rmap(struct page *page) | |||
| 942 | * containing the swap entry, but page not yet written to swap. | 935 | * containing the swap entry, but page not yet written to swap. |
| 943 | */ | 936 | */ |
| 944 | if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { | 937 | if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { |
| 945 | page_clear_dirty(page); | 938 | page_clear_dirty(page, 1); |
| 946 | set_page_dirty(page); | 939 | set_page_dirty(page); |
| 947 | } | 940 | } |
| 948 | /* | 941 | /* |
diff --git a/mm/shmem.c b/mm/shmem.c index 080b09a57a8f..47fdeeb9d636 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -1586,6 +1586,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode | |||
| 1586 | 1586 | ||
| 1587 | inode = new_inode(sb); | 1587 | inode = new_inode(sb); |
| 1588 | if (inode) { | 1588 | if (inode) { |
| 1589 | inode->i_ino = get_next_ino(); | ||
| 1589 | inode_init_owner(inode, dir, mode); | 1590 | inode_init_owner(inode, dir, mode); |
| 1590 | inode->i_blocks = 0; | 1591 | inode->i_blocks = 0; |
| 1591 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; | 1592 | inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; |
| @@ -1903,7 +1904,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr | |||
| 1903 | dir->i_size += BOGO_DIRENT_SIZE; | 1904 | dir->i_size += BOGO_DIRENT_SIZE; |
| 1904 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; | 1905 | inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; |
| 1905 | inc_nlink(inode); | 1906 | inc_nlink(inode); |
| 1906 | atomic_inc(&inode->i_count); /* New dentry reference */ | 1907 | ihold(inode); /* New dentry reference */ |
| 1907 | dget(dentry); /* Extra pinning count for the created dentry */ | 1908 | dget(dentry); /* Extra pinning count for the created dentry */ |
| 1908 | d_instantiate(dentry, inode); | 1909 | d_instantiate(dentry, inode); |
| 1909 | out: | 1910 | out: |
| @@ -2146,7 +2147,7 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, | |||
| 2146 | if (*len < 3) | 2147 | if (*len < 3) |
| 2147 | return 255; | 2148 | return 255; |
| 2148 | 2149 | ||
| 2149 | if (hlist_unhashed(&inode->i_hash)) { | 2150 | if (inode_unhashed(inode)) { |
| 2150 | /* Unfortunately insert_inode_hash is not idempotent, | 2151 | /* Unfortunately insert_inode_hash is not idempotent, |
| 2151 | * so as we hash inodes here rather than at creation | 2152 | * so as we hash inodes here rather than at creation |
| 2152 | * time, we need a lock to ensure we only try | 2153 | * time, we need a lock to ensure we only try |
| @@ -2154,7 +2155,7 @@ static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, | |||
| 2154 | */ | 2155 | */ |
| 2155 | static DEFINE_SPINLOCK(lock); | 2156 | static DEFINE_SPINLOCK(lock); |
| 2156 | spin_lock(&lock); | 2157 | spin_lock(&lock); |
| 2157 | if (hlist_unhashed(&inode->i_hash)) | 2158 | if (inode_unhashed(inode)) |
| 2158 | __insert_inode_hash(inode, | 2159 | __insert_inode_hash(inode, |
| 2159 | inode->i_ino + inode->i_generation); | 2160 | inode->i_ino + inode->i_generation); |
| 2160 | spin_unlock(&lock); | 2161 | spin_unlock(&lock); |
| @@ -2537,16 +2538,16 @@ static const struct vm_operations_struct shmem_vm_ops = { | |||
| 2537 | }; | 2538 | }; |
| 2538 | 2539 | ||
| 2539 | 2540 | ||
| 2540 | static int shmem_get_sb(struct file_system_type *fs_type, | 2541 | static struct dentry *shmem_mount(struct file_system_type *fs_type, |
| 2541 | int flags, const char *dev_name, void *data, struct vfsmount *mnt) | 2542 | int flags, const char *dev_name, void *data) |
| 2542 | { | 2543 | { |
| 2543 | return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); | 2544 | return mount_nodev(fs_type, flags, data, shmem_fill_super); |
| 2544 | } | 2545 | } |
| 2545 | 2546 | ||
| 2546 | static struct file_system_type tmpfs_fs_type = { | 2547 | static struct file_system_type tmpfs_fs_type = { |
| 2547 | .owner = THIS_MODULE, | 2548 | .owner = THIS_MODULE, |
| 2548 | .name = "tmpfs", | 2549 | .name = "tmpfs", |
| 2549 | .get_sb = shmem_get_sb, | 2550 | .mount = shmem_mount, |
| 2550 | .kill_sb = kill_litter_super, | 2551 | .kill_sb = kill_litter_super, |
| 2551 | }; | 2552 | }; |
| 2552 | 2553 | ||
| @@ -2642,7 +2643,7 @@ out: | |||
| 2642 | 2643 | ||
| 2643 | static struct file_system_type tmpfs_fs_type = { | 2644 | static struct file_system_type tmpfs_fs_type = { |
| 2644 | .name = "tmpfs", | 2645 | .name = "tmpfs", |
| 2645 | .get_sb = ramfs_get_sb, | 2646 | .mount = ramfs_mount, |
| 2646 | .kill_sb = kill_litter_super, | 2647 | .kill_sb = kill_litter_super, |
| 2647 | }; | 2648 | }; |
| 2648 | 2649 | ||
| @@ -901,7 +901,7 @@ static int transfer_objects(struct array_cache *to, | |||
| 901 | struct array_cache *from, unsigned int max) | 901 | struct array_cache *from, unsigned int max) |
| 902 | { | 902 | { |
| 903 | /* Figure out how many entries to transfer */ | 903 | /* Figure out how many entries to transfer */ |
| 904 | int nr = min(min(from->avail, max), to->limit - to->avail); | 904 | int nr = min3(from->avail, max, to->limit - to->avail); |
| 905 | 905 | ||
| 906 | if (!nr) | 906 | if (!nr) |
| 907 | return 0; | 907 | return 0; |
| @@ -3273,9 +3273,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
| 3273 | kfree(n); | 3273 | kfree(n); |
| 3274 | kfree(s); | 3274 | kfree(s); |
| 3275 | } | 3275 | } |
| 3276 | err: | ||
| 3276 | up_write(&slub_lock); | 3277 | up_write(&slub_lock); |
| 3277 | 3278 | ||
| 3278 | err: | ||
| 3279 | if (flags & SLAB_PANIC) | 3279 | if (flags & SLAB_PANIC) |
| 3280 | panic("Cannot create slabcache %s\n", name); | 3280 | panic("Cannot create slabcache %s\n", name); |
| 3281 | else | 3281 | else |
| @@ -3401,13 +3401,13 @@ static int validate_slab(struct kmem_cache *s, struct page *page, | |||
| 3401 | 3401 | ||
| 3402 | for_each_free_object(p, s, page->freelist) { | 3402 | for_each_free_object(p, s, page->freelist) { |
| 3403 | set_bit(slab_index(p, s, addr), map); | 3403 | set_bit(slab_index(p, s, addr), map); |
| 3404 | if (!check_object(s, page, p, 0)) | 3404 | if (!check_object(s, page, p, SLUB_RED_INACTIVE)) |
| 3405 | return 0; | 3405 | return 0; |
| 3406 | } | 3406 | } |
| 3407 | 3407 | ||
| 3408 | for_each_object(p, s, addr, page->objects) | 3408 | for_each_object(p, s, addr, page->objects) |
| 3409 | if (!test_bit(slab_index(p, s, addr), map)) | 3409 | if (!test_bit(slab_index(p, s, addr), map)) |
| 3410 | if (!check_object(s, page, p, 1)) | 3410 | if (!check_object(s, page, p, SLUB_RED_ACTIVE)) |
| 3411 | return 0; | 3411 | return 0; |
| 3412 | return 1; | 3412 | return 1; |
| 3413 | } | 3413 | } |
| @@ -3862,6 +3862,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
| 3862 | x += sprintf(buf + x, " N%d=%lu", | 3862 | x += sprintf(buf + x, " N%d=%lu", |
| 3863 | node, nodes[node]); | 3863 | node, nodes[node]); |
| 3864 | #endif | 3864 | #endif |
| 3865 | up_read(&slub_lock); | ||
| 3865 | kfree(nodes); | 3866 | kfree(nodes); |
| 3866 | return x + sprintf(buf + x, "\n"); | 3867 | return x + sprintf(buf + x, "\n"); |
| 3867 | } | 3868 | } |
| @@ -378,6 +378,7 @@ void release_pages(struct page **pages, int nr, int cold) | |||
| 378 | 378 | ||
| 379 | pagevec_free(&pages_to_free); | 379 | pagevec_free(&pages_to_free); |
| 380 | } | 380 | } |
| 381 | EXPORT_SYMBOL(release_pages); | ||
| 381 | 382 | ||
| 382 | /* | 383 | /* |
| 383 | * The pages which we're about to release may be in the deferred lru-addition | 384 | * The pages which we're about to release may be in the deferred lru-addition |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 9fc7bac7db0c..67ddaaf98c74 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/capability.h> | 30 | #include <linux/capability.h> |
| 31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
| 32 | #include <linux/memcontrol.h> | 32 | #include <linux/memcontrol.h> |
| 33 | #include <linux/poll.h> | ||
| 33 | 34 | ||
| 34 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
| 35 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
| @@ -58,6 +59,10 @@ static struct swap_info_struct *swap_info[MAX_SWAPFILES]; | |||
| 58 | 59 | ||
| 59 | static DEFINE_MUTEX(swapon_mutex); | 60 | static DEFINE_MUTEX(swapon_mutex); |
| 60 | 61 | ||
| 62 | static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); | ||
| 63 | /* Activity counter to indicate that a swapon or swapoff has occurred */ | ||
| 64 | static atomic_t proc_poll_event = ATOMIC_INIT(0); | ||
| 65 | |||
| 61 | static inline unsigned char swap_count(unsigned char ent) | 66 | static inline unsigned char swap_count(unsigned char ent) |
| 62 | { | 67 | { |
| 63 | return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ | 68 | return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ |
| @@ -1680,6 +1685,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) | |||
| 1680 | } | 1685 | } |
| 1681 | filp_close(swap_file, NULL); | 1686 | filp_close(swap_file, NULL); |
| 1682 | err = 0; | 1687 | err = 0; |
| 1688 | atomic_inc(&proc_poll_event); | ||
| 1689 | wake_up_interruptible(&proc_poll_wait); | ||
| 1683 | 1690 | ||
| 1684 | out_dput: | 1691 | out_dput: |
| 1685 | filp_close(victim, NULL); | 1692 | filp_close(victim, NULL); |
| @@ -1688,6 +1695,25 @@ out: | |||
| 1688 | } | 1695 | } |
| 1689 | 1696 | ||
| 1690 | #ifdef CONFIG_PROC_FS | 1697 | #ifdef CONFIG_PROC_FS |
| 1698 | struct proc_swaps { | ||
| 1699 | struct seq_file seq; | ||
| 1700 | int event; | ||
| 1701 | }; | ||
| 1702 | |||
| 1703 | static unsigned swaps_poll(struct file *file, poll_table *wait) | ||
| 1704 | { | ||
| 1705 | struct proc_swaps *s = file->private_data; | ||
| 1706 | |||
| 1707 | poll_wait(file, &proc_poll_wait, wait); | ||
| 1708 | |||
| 1709 | if (s->event != atomic_read(&proc_poll_event)) { | ||
| 1710 | s->event = atomic_read(&proc_poll_event); | ||
| 1711 | return POLLIN | POLLRDNORM | POLLERR | POLLPRI; | ||
| 1712 | } | ||
| 1713 | |||
| 1714 | return POLLIN | POLLRDNORM; | ||
| 1715 | } | ||
| 1716 | |||
| 1691 | /* iterator */ | 1717 | /* iterator */ |
| 1692 | static void *swap_start(struct seq_file *swap, loff_t *pos) | 1718 | static void *swap_start(struct seq_file *swap, loff_t *pos) |
| 1693 | { | 1719 | { |
| @@ -1771,7 +1797,24 @@ static const struct seq_operations swaps_op = { | |||
| 1771 | 1797 | ||
| 1772 | static int swaps_open(struct inode *inode, struct file *file) | 1798 | static int swaps_open(struct inode *inode, struct file *file) |
| 1773 | { | 1799 | { |
| 1774 | return seq_open(file, &swaps_op); | 1800 | struct proc_swaps *s; |
| 1801 | int ret; | ||
| 1802 | |||
| 1803 | s = kmalloc(sizeof(struct proc_swaps), GFP_KERNEL); | ||
| 1804 | if (!s) | ||
| 1805 | return -ENOMEM; | ||
| 1806 | |||
| 1807 | file->private_data = s; | ||
| 1808 | |||
| 1809 | ret = seq_open(file, &swaps_op); | ||
| 1810 | if (ret) { | ||
| 1811 | kfree(s); | ||
| 1812 | return ret; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | s->seq.private = s; | ||
| 1816 | s->event = atomic_read(&proc_poll_event); | ||
| 1817 | return ret; | ||
| 1775 | } | 1818 | } |
| 1776 | 1819 | ||
| 1777 | static const struct file_operations proc_swaps_operations = { | 1820 | static const struct file_operations proc_swaps_operations = { |
| @@ -1779,6 +1822,7 @@ static const struct file_operations proc_swaps_operations = { | |||
| 1779 | .read = seq_read, | 1822 | .read = seq_read, |
| 1780 | .llseek = seq_lseek, | 1823 | .llseek = seq_lseek, |
| 1781 | .release = seq_release, | 1824 | .release = seq_release, |
| 1825 | .poll = swaps_poll, | ||
| 1782 | }; | 1826 | }; |
| 1783 | 1827 | ||
| 1784 | static int __init procswaps_init(void) | 1828 | static int __init procswaps_init(void) |
| @@ -2084,6 +2128,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
| 2084 | swap_info[prev]->next = type; | 2128 | swap_info[prev]->next = type; |
| 2085 | spin_unlock(&swap_lock); | 2129 | spin_unlock(&swap_lock); |
| 2086 | mutex_unlock(&swapon_mutex); | 2130 | mutex_unlock(&swapon_mutex); |
| 2131 | atomic_inc(&proc_poll_event); | ||
| 2132 | wake_up_interruptible(&proc_poll_wait); | ||
| 2133 | |||
| 2087 | error = 0; | 2134 | error = 0; |
| 2088 | goto out; | 2135 | goto out; |
| 2089 | bad_swap: | 2136 | bad_swap: |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9f909622a25e..eb5cc7d00c5a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -31,8 +31,6 @@ | |||
| 31 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
| 32 | #include <asm/shmparam.h> | 32 | #include <asm/shmparam.h> |
| 33 | 33 | ||
| 34 | bool vmap_lazy_unmap __read_mostly = true; | ||
| 35 | |||
| 36 | /*** Page table manipulation functions ***/ | 34 | /*** Page table manipulation functions ***/ |
| 37 | 35 | ||
| 38 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | 36 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) |
| @@ -293,13 +291,13 @@ static void __insert_vmap_area(struct vmap_area *va) | |||
| 293 | struct rb_node *tmp; | 291 | struct rb_node *tmp; |
| 294 | 292 | ||
| 295 | while (*p) { | 293 | while (*p) { |
| 296 | struct vmap_area *tmp; | 294 | struct vmap_area *tmp_va; |
| 297 | 295 | ||
| 298 | parent = *p; | 296 | parent = *p; |
| 299 | tmp = rb_entry(parent, struct vmap_area, rb_node); | 297 | tmp_va = rb_entry(parent, struct vmap_area, rb_node); |
| 300 | if (va->va_start < tmp->va_end) | 298 | if (va->va_start < tmp_va->va_end) |
| 301 | p = &(*p)->rb_left; | 299 | p = &(*p)->rb_left; |
| 302 | else if (va->va_end > tmp->va_start) | 300 | else if (va->va_end > tmp_va->va_start) |
| 303 | p = &(*p)->rb_right; | 301 | p = &(*p)->rb_right; |
| 304 | else | 302 | else |
| 305 | BUG(); | 303 | BUG(); |
| @@ -503,9 +501,6 @@ static unsigned long lazy_max_pages(void) | |||
| 503 | { | 501 | { |
| 504 | unsigned int log; | 502 | unsigned int log; |
| 505 | 503 | ||
| 506 | if (!vmap_lazy_unmap) | ||
| 507 | return 0; | ||
| 508 | |||
| 509 | log = fls(num_online_cpus()); | 504 | log = fls(num_online_cpus()); |
| 510 | 505 | ||
| 511 | return log * (32UL * 1024 * 1024 / PAGE_SIZE); | 506 | return log * (32UL * 1024 * 1024 / PAGE_SIZE); |
| @@ -566,7 +561,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
| 566 | if (va->va_end > *end) | 561 | if (va->va_end > *end) |
| 567 | *end = va->va_end; | 562 | *end = va->va_end; |
| 568 | nr += (va->va_end - va->va_start) >> PAGE_SHIFT; | 563 | nr += (va->va_end - va->va_start) >> PAGE_SHIFT; |
| 569 | unmap_vmap_area(va); | ||
| 570 | list_add_tail(&va->purge_list, &valist); | 564 | list_add_tail(&va->purge_list, &valist); |
| 571 | va->flags |= VM_LAZY_FREEING; | 565 | va->flags |= VM_LAZY_FREEING; |
| 572 | va->flags &= ~VM_LAZY_FREE; | 566 | va->flags &= ~VM_LAZY_FREE; |
| @@ -611,10 +605,11 @@ static void purge_vmap_area_lazy(void) | |||
| 611 | } | 605 | } |
| 612 | 606 | ||
| 613 | /* | 607 | /* |
| 614 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been | 608 | * Free a vmap area, caller ensuring that the area has been unmapped |
| 615 | * called for the correct range previously. | 609 | * and flush_cache_vunmap had been called for the correct range |
| 610 | * previously. | ||
| 616 | */ | 611 | */ |
| 617 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) | 612 | static void free_vmap_area_noflush(struct vmap_area *va) |
| 618 | { | 613 | { |
| 619 | va->flags |= VM_LAZY_FREE; | 614 | va->flags |= VM_LAZY_FREE; |
| 620 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); | 615 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); |
| @@ -623,6 +618,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va) | |||
| 623 | } | 618 | } |
| 624 | 619 | ||
| 625 | /* | 620 | /* |
| 621 | * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been | ||
| 622 | * called for the correct range previously. | ||
| 623 | */ | ||
| 624 | static void free_unmap_vmap_area_noflush(struct vmap_area *va) | ||
| 625 | { | ||
| 626 | unmap_vmap_area(va); | ||
| 627 | free_vmap_area_noflush(va); | ||
| 628 | } | ||
| 629 | |||
| 630 | /* | ||
| 626 | * Free and unmap a vmap area | 631 | * Free and unmap a vmap area |
| 627 | */ | 632 | */ |
| 628 | static void free_unmap_vmap_area(struct vmap_area *va) | 633 | static void free_unmap_vmap_area(struct vmap_area *va) |
| @@ -798,7 +803,7 @@ static void free_vmap_block(struct vmap_block *vb) | |||
| 798 | spin_unlock(&vmap_block_tree_lock); | 803 | spin_unlock(&vmap_block_tree_lock); |
| 799 | BUG_ON(tmp != vb); | 804 | BUG_ON(tmp != vb); |
| 800 | 805 | ||
| 801 | free_unmap_vmap_area_noflush(vb->va); | 806 | free_vmap_area_noflush(vb->va); |
| 802 | call_rcu(&vb->rcu_head, rcu_free_vb); | 807 | call_rcu(&vb->rcu_head, rcu_free_vb); |
| 803 | } | 808 | } |
| 804 | 809 | ||
| @@ -936,6 +941,8 @@ static void vb_free(const void *addr, unsigned long size) | |||
| 936 | rcu_read_unlock(); | 941 | rcu_read_unlock(); |
| 937 | BUG_ON(!vb); | 942 | BUG_ON(!vb); |
| 938 | 943 | ||
| 944 | vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); | ||
| 945 | |||
| 939 | spin_lock(&vb->lock); | 946 | spin_lock(&vb->lock); |
| 940 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); | 947 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); |
| 941 | 948 | ||
| @@ -988,7 +995,6 @@ void vm_unmap_aliases(void) | |||
| 988 | 995 | ||
| 989 | s = vb->va->va_start + (i << PAGE_SHIFT); | 996 | s = vb->va->va_start + (i << PAGE_SHIFT); |
| 990 | e = vb->va->va_start + (j << PAGE_SHIFT); | 997 | e = vb->va->va_start + (j << PAGE_SHIFT); |
| 991 | vunmap_page_range(s, e); | ||
| 992 | flush = 1; | 998 | flush = 1; |
| 993 | 999 | ||
| 994 | if (s < start) | 1000 | if (s < start) |
| @@ -1596,6 +1602,13 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | |||
| 1596 | } | 1602 | } |
| 1597 | EXPORT_SYMBOL(__vmalloc); | 1603 | EXPORT_SYMBOL(__vmalloc); |
| 1598 | 1604 | ||
| 1605 | static inline void *__vmalloc_node_flags(unsigned long size, | ||
| 1606 | int node, gfp_t flags) | ||
| 1607 | { | ||
| 1608 | return __vmalloc_node(size, 1, flags, PAGE_KERNEL, | ||
| 1609 | node, __builtin_return_address(0)); | ||
| 1610 | } | ||
| 1611 | |||
| 1599 | /** | 1612 | /** |
| 1600 | * vmalloc - allocate virtually contiguous memory | 1613 | * vmalloc - allocate virtually contiguous memory |
| 1601 | * @size: allocation size | 1614 | * @size: allocation size |
| @@ -1607,12 +1620,28 @@ EXPORT_SYMBOL(__vmalloc); | |||
| 1607 | */ | 1620 | */ |
| 1608 | void *vmalloc(unsigned long size) | 1621 | void *vmalloc(unsigned long size) |
| 1609 | { | 1622 | { |
| 1610 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, | 1623 | return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM); |
| 1611 | -1, __builtin_return_address(0)); | ||
| 1612 | } | 1624 | } |
| 1613 | EXPORT_SYMBOL(vmalloc); | 1625 | EXPORT_SYMBOL(vmalloc); |
| 1614 | 1626 | ||
| 1615 | /** | 1627 | /** |
| 1628 | * vzalloc - allocate virtually contiguous memory with zero fill | ||
| 1629 | * @size: allocation size | ||
| 1630 | * Allocate enough pages to cover @size from the page level | ||
| 1631 | * allocator and map them into contiguous kernel virtual space. | ||
| 1632 | * The memory allocated is set to zero. | ||
| 1633 | * | ||
| 1634 | * For tight control over page level allocator and protection flags | ||
| 1635 | * use __vmalloc() instead. | ||
| 1636 | */ | ||
| 1637 | void *vzalloc(unsigned long size) | ||
| 1638 | { | ||
| 1639 | return __vmalloc_node_flags(size, -1, | ||
| 1640 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | ||
| 1641 | } | ||
| 1642 | EXPORT_SYMBOL(vzalloc); | ||
| 1643 | |||
| 1644 | /** | ||
| 1616 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace | 1645 | * vmalloc_user - allocate zeroed virtually contiguous memory for userspace |
| 1617 | * @size: allocation size | 1646 | * @size: allocation size |
| 1618 | * | 1647 | * |
| @@ -1653,6 +1682,25 @@ void *vmalloc_node(unsigned long size, int node) | |||
| 1653 | } | 1682 | } |
| 1654 | EXPORT_SYMBOL(vmalloc_node); | 1683 | EXPORT_SYMBOL(vmalloc_node); |
| 1655 | 1684 | ||
| 1685 | /** | ||
| 1686 | * vzalloc_node - allocate memory on a specific node with zero fill | ||
| 1687 | * @size: allocation size | ||
| 1688 | * @node: numa node | ||
| 1689 | * | ||
| 1690 | * Allocate enough pages to cover @size from the page level | ||
| 1691 | * allocator and map them into contiguous kernel virtual space. | ||
| 1692 | * The memory allocated is set to zero. | ||
| 1693 | * | ||
| 1694 | * For tight control over page level allocator and protection flags | ||
| 1695 | * use __vmalloc_node() instead. | ||
| 1696 | */ | ||
| 1697 | void *vzalloc_node(unsigned long size, int node) | ||
| 1698 | { | ||
| 1699 | return __vmalloc_node_flags(size, node, | ||
| 1700 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); | ||
| 1701 | } | ||
| 1702 | EXPORT_SYMBOL(vzalloc_node); | ||
| 1703 | |||
| 1656 | #ifndef PAGE_KERNEL_EXEC | 1704 | #ifndef PAGE_KERNEL_EXEC |
| 1657 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | 1705 | # define PAGE_KERNEL_EXEC PAGE_KERNEL |
| 1658 | #endif | 1706 | #endif |
| @@ -2350,6 +2398,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |||
| 2350 | 2398 | ||
| 2351 | #ifdef CONFIG_PROC_FS | 2399 | #ifdef CONFIG_PROC_FS |
| 2352 | static void *s_start(struct seq_file *m, loff_t *pos) | 2400 | static void *s_start(struct seq_file *m, loff_t *pos) |
| 2401 | __acquires(&vmlist_lock) | ||
| 2353 | { | 2402 | { |
| 2354 | loff_t n = *pos; | 2403 | loff_t n = *pos; |
| 2355 | struct vm_struct *v; | 2404 | struct vm_struct *v; |
| @@ -2376,6 +2425,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) | |||
| 2376 | } | 2425 | } |
| 2377 | 2426 | ||
| 2378 | static void s_stop(struct seq_file *m, void *p) | 2427 | static void s_stop(struct seq_file *m, void *p) |
| 2428 | __releases(&vmlist_lock) | ||
| 2379 | { | 2429 | { |
| 2380 | read_unlock(&vmlist_lock); | 2430 | read_unlock(&vmlist_lock); |
| 2381 | } | 2431 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index b94c9464f262..d31d7ce52c0e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -51,6 +51,12 @@ | |||
| 51 | #define CREATE_TRACE_POINTS | 51 | #define CREATE_TRACE_POINTS |
| 52 | #include <trace/events/vmscan.h> | 52 | #include <trace/events/vmscan.h> |
| 53 | 53 | ||
| 54 | enum lumpy_mode { | ||
| 55 | LUMPY_MODE_NONE, | ||
| 56 | LUMPY_MODE_ASYNC, | ||
| 57 | LUMPY_MODE_SYNC, | ||
| 58 | }; | ||
| 59 | |||
| 54 | struct scan_control { | 60 | struct scan_control { |
| 55 | /* Incremented by the number of inactive pages that were scanned */ | 61 | /* Incremented by the number of inactive pages that were scanned */ |
| 56 | unsigned long nr_scanned; | 62 | unsigned long nr_scanned; |
| @@ -82,7 +88,7 @@ struct scan_control { | |||
| 82 | * Intend to reclaim enough continuous memory rather than reclaim | 88 | * Intend to reclaim enough continuous memory rather than reclaim |
| 83 | * enough amount of memory. i.e, mode for high order allocation. | 89 | * enough amount of memory. i.e, mode for high order allocation. |
| 84 | */ | 90 | */ |
| 85 | bool lumpy_reclaim_mode; | 91 | enum lumpy_mode lumpy_reclaim_mode; |
| 86 | 92 | ||
| 87 | /* Which cgroup do we reclaim from */ | 93 | /* Which cgroup do we reclaim from */ |
| 88 | struct mem_cgroup *mem_cgroup; | 94 | struct mem_cgroup *mem_cgroup; |
| @@ -265,6 +271,36 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
| 265 | return ret; | 271 | return ret; |
| 266 | } | 272 | } |
| 267 | 273 | ||
| 274 | static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc, | ||
| 275 | bool sync) | ||
| 276 | { | ||
| 277 | enum lumpy_mode mode = sync ? LUMPY_MODE_SYNC : LUMPY_MODE_ASYNC; | ||
| 278 | |||
| 279 | /* | ||
| 280 | * Some reclaim have alredy been failed. No worth to try synchronous | ||
| 281 | * lumpy reclaim. | ||
| 282 | */ | ||
| 283 | if (sync && sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) | ||
| 284 | return; | ||
| 285 | |||
| 286 | /* | ||
| 287 | * If we need a large contiguous chunk of memory, or have | ||
| 288 | * trouble getting a small set of contiguous pages, we | ||
| 289 | * will reclaim both active and inactive pages. | ||
| 290 | */ | ||
| 291 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | ||
| 292 | sc->lumpy_reclaim_mode = mode; | ||
| 293 | else if (sc->order && priority < DEF_PRIORITY - 2) | ||
| 294 | sc->lumpy_reclaim_mode = mode; | ||
| 295 | else | ||
| 296 | sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; | ||
| 297 | } | ||
| 298 | |||
| 299 | static void disable_lumpy_reclaim_mode(struct scan_control *sc) | ||
| 300 | { | ||
| 301 | sc->lumpy_reclaim_mode = LUMPY_MODE_NONE; | ||
| 302 | } | ||
| 303 | |||
| 268 | static inline int is_page_cache_freeable(struct page *page) | 304 | static inline int is_page_cache_freeable(struct page *page) |
| 269 | { | 305 | { |
| 270 | /* | 306 | /* |
| @@ -275,7 +311,8 @@ static inline int is_page_cache_freeable(struct page *page) | |||
| 275 | return page_count(page) - page_has_private(page) == 2; | 311 | return page_count(page) - page_has_private(page) == 2; |
| 276 | } | 312 | } |
| 277 | 313 | ||
| 278 | static int may_write_to_queue(struct backing_dev_info *bdi) | 314 | static int may_write_to_queue(struct backing_dev_info *bdi, |
| 315 | struct scan_control *sc) | ||
| 279 | { | 316 | { |
| 280 | if (current->flags & PF_SWAPWRITE) | 317 | if (current->flags & PF_SWAPWRITE) |
| 281 | return 1; | 318 | return 1; |
| @@ -283,6 +320,10 @@ static int may_write_to_queue(struct backing_dev_info *bdi) | |||
| 283 | return 1; | 320 | return 1; |
| 284 | if (bdi == current->backing_dev_info) | 321 | if (bdi == current->backing_dev_info) |
| 285 | return 1; | 322 | return 1; |
| 323 | |||
| 324 | /* lumpy reclaim for hugepage often need a lot of write */ | ||
| 325 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | ||
| 326 | return 1; | ||
| 286 | return 0; | 327 | return 0; |
| 287 | } | 328 | } |
| 288 | 329 | ||
| @@ -307,12 +348,6 @@ static void handle_write_error(struct address_space *mapping, | |||
| 307 | unlock_page(page); | 348 | unlock_page(page); |
| 308 | } | 349 | } |
| 309 | 350 | ||
| 310 | /* Request for sync pageout. */ | ||
| 311 | enum pageout_io { | ||
| 312 | PAGEOUT_IO_ASYNC, | ||
| 313 | PAGEOUT_IO_SYNC, | ||
| 314 | }; | ||
| 315 | |||
| 316 | /* possible outcome of pageout() */ | 351 | /* possible outcome of pageout() */ |
| 317 | typedef enum { | 352 | typedef enum { |
| 318 | /* failed to write page out, page is locked */ | 353 | /* failed to write page out, page is locked */ |
| @@ -330,7 +365,7 @@ typedef enum { | |||
| 330 | * Calls ->writepage(). | 365 | * Calls ->writepage(). |
| 331 | */ | 366 | */ |
| 332 | static pageout_t pageout(struct page *page, struct address_space *mapping, | 367 | static pageout_t pageout(struct page *page, struct address_space *mapping, |
| 333 | enum pageout_io sync_writeback) | 368 | struct scan_control *sc) |
| 334 | { | 369 | { |
| 335 | /* | 370 | /* |
| 336 | * If the page is dirty, only perform writeback if that write | 371 | * If the page is dirty, only perform writeback if that write |
| @@ -366,7 +401,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 366 | } | 401 | } |
| 367 | if (mapping->a_ops->writepage == NULL) | 402 | if (mapping->a_ops->writepage == NULL) |
| 368 | return PAGE_ACTIVATE; | 403 | return PAGE_ACTIVATE; |
| 369 | if (!may_write_to_queue(mapping->backing_dev_info)) | 404 | if (!may_write_to_queue(mapping->backing_dev_info, sc)) |
| 370 | return PAGE_KEEP; | 405 | return PAGE_KEEP; |
| 371 | 406 | ||
| 372 | if (clear_page_dirty_for_io(page)) { | 407 | if (clear_page_dirty_for_io(page)) { |
| @@ -376,7 +411,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 376 | .nr_to_write = SWAP_CLUSTER_MAX, | 411 | .nr_to_write = SWAP_CLUSTER_MAX, |
| 377 | .range_start = 0, | 412 | .range_start = 0, |
| 378 | .range_end = LLONG_MAX, | 413 | .range_end = LLONG_MAX, |
| 379 | .nonblocking = 1, | ||
| 380 | .for_reclaim = 1, | 414 | .for_reclaim = 1, |
| 381 | }; | 415 | }; |
| 382 | 416 | ||
| @@ -394,7 +428,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 394 | * direct reclaiming a large contiguous area and the | 428 | * direct reclaiming a large contiguous area and the |
| 395 | * first attempt to free a range of pages fails. | 429 | * first attempt to free a range of pages fails. |
| 396 | */ | 430 | */ |
| 397 | if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC) | 431 | if (PageWriteback(page) && |
| 432 | sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC) | ||
| 398 | wait_on_page_writeback(page); | 433 | wait_on_page_writeback(page); |
| 399 | 434 | ||
| 400 | if (!PageWriteback(page)) { | 435 | if (!PageWriteback(page)) { |
| @@ -402,7 +437,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, | |||
| 402 | ClearPageReclaim(page); | 437 | ClearPageReclaim(page); |
| 403 | } | 438 | } |
| 404 | trace_mm_vmscan_writepage(page, | 439 | trace_mm_vmscan_writepage(page, |
| 405 | trace_reclaim_flags(page, sync_writeback)); | 440 | trace_reclaim_flags(page, sc->lumpy_reclaim_mode)); |
| 406 | inc_zone_page_state(page, NR_VMSCAN_WRITE); | 441 | inc_zone_page_state(page, NR_VMSCAN_WRITE); |
| 407 | return PAGE_SUCCESS; | 442 | return PAGE_SUCCESS; |
| 408 | } | 443 | } |
| @@ -580,7 +615,7 @@ static enum page_references page_check_references(struct page *page, | |||
| 580 | referenced_page = TestClearPageReferenced(page); | 615 | referenced_page = TestClearPageReferenced(page); |
| 581 | 616 | ||
| 582 | /* Lumpy reclaim - ignore references */ | 617 | /* Lumpy reclaim - ignore references */ |
| 583 | if (sc->lumpy_reclaim_mode) | 618 | if (sc->lumpy_reclaim_mode != LUMPY_MODE_NONE) |
| 584 | return PAGEREF_RECLAIM; | 619 | return PAGEREF_RECLAIM; |
| 585 | 620 | ||
| 586 | /* | 621 | /* |
| @@ -616,7 +651,7 @@ static enum page_references page_check_references(struct page *page, | |||
| 616 | } | 651 | } |
| 617 | 652 | ||
| 618 | /* Reclaim if clean, defer dirty pages to writeback */ | 653 | /* Reclaim if clean, defer dirty pages to writeback */ |
| 619 | if (referenced_page) | 654 | if (referenced_page && !PageSwapBacked(page)) |
| 620 | return PAGEREF_RECLAIM_CLEAN; | 655 | return PAGEREF_RECLAIM_CLEAN; |
| 621 | 656 | ||
| 622 | return PAGEREF_RECLAIM; | 657 | return PAGEREF_RECLAIM; |
| @@ -644,12 +679,14 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages) | |||
| 644 | * shrink_page_list() returns the number of reclaimed pages | 679 | * shrink_page_list() returns the number of reclaimed pages |
| 645 | */ | 680 | */ |
| 646 | static unsigned long shrink_page_list(struct list_head *page_list, | 681 | static unsigned long shrink_page_list(struct list_head *page_list, |
| 647 | struct scan_control *sc, | 682 | struct zone *zone, |
| 648 | enum pageout_io sync_writeback) | 683 | struct scan_control *sc) |
| 649 | { | 684 | { |
| 650 | LIST_HEAD(ret_pages); | 685 | LIST_HEAD(ret_pages); |
| 651 | LIST_HEAD(free_pages); | 686 | LIST_HEAD(free_pages); |
| 652 | int pgactivate = 0; | 687 | int pgactivate = 0; |
| 688 | unsigned long nr_dirty = 0; | ||
| 689 | unsigned long nr_congested = 0; | ||
| 653 | unsigned long nr_reclaimed = 0; | 690 | unsigned long nr_reclaimed = 0; |
| 654 | 691 | ||
| 655 | cond_resched(); | 692 | cond_resched(); |
| @@ -669,6 +706,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 669 | goto keep; | 706 | goto keep; |
| 670 | 707 | ||
| 671 | VM_BUG_ON(PageActive(page)); | 708 | VM_BUG_ON(PageActive(page)); |
| 709 | VM_BUG_ON(page_zone(page) != zone); | ||
| 672 | 710 | ||
| 673 | sc->nr_scanned++; | 711 | sc->nr_scanned++; |
| 674 | 712 | ||
| @@ -694,10 +732,13 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 694 | * for any page for which writeback has already | 732 | * for any page for which writeback has already |
| 695 | * started. | 733 | * started. |
| 696 | */ | 734 | */ |
| 697 | if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) | 735 | if (sc->lumpy_reclaim_mode == LUMPY_MODE_SYNC && |
| 736 | may_enter_fs) | ||
| 698 | wait_on_page_writeback(page); | 737 | wait_on_page_writeback(page); |
| 699 | else | 738 | else { |
| 700 | goto keep_locked; | 739 | unlock_page(page); |
| 740 | goto keep_lumpy; | ||
| 741 | } | ||
| 701 | } | 742 | } |
| 702 | 743 | ||
| 703 | references = page_check_references(page, sc); | 744 | references = page_check_references(page, sc); |
| @@ -743,6 +784,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 743 | } | 784 | } |
| 744 | 785 | ||
| 745 | if (PageDirty(page)) { | 786 | if (PageDirty(page)) { |
| 787 | nr_dirty++; | ||
| 788 | |||
| 746 | if (references == PAGEREF_RECLAIM_CLEAN) | 789 | if (references == PAGEREF_RECLAIM_CLEAN) |
| 747 | goto keep_locked; | 790 | goto keep_locked; |
| 748 | if (!may_enter_fs) | 791 | if (!may_enter_fs) |
| @@ -751,14 +794,18 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
| 751 | goto keep_locked; | 794 | goto keep_locked; |
| 752 | 795 | ||
| 753 | /* Page is dirty, try to write it out here */ | 796 | /* Page is dirty, try to write it out here */ |
| 754 | switch (pageout(page, mapping, sync_writeback)) { | 797 | switch (pageout(page, mapping, sc)) { |
| 755 | case PAGE_KEEP: | 798 | case PAGE_KEEP: |
| 799 | nr_congested++; | ||
| 756 | goto keep_locked; | 800 | goto keep_locked; |
| 757 | case PAGE_ACTIVATE: | 801 | case PAGE_ACTIVATE: |
| 758 | goto activate_locked; | 802 | goto activate_locked; |
| 759 | case PAGE_SUCCESS: | 803 | case PAGE_SUCCESS: |
| 760 | if (PageWriteback(page) || PageDirty(page)) | 804 | if (PageWriteback(page)) |
| 805 | goto keep_lumpy; | ||
| 806 | if (PageDirty(page)) | ||
| 761 | goto keep; | 807 | goto keep; |
| 808 | |||
| 762 | /* | 809 | /* |
| 763 | * A synchronous write - probably a ramdisk. Go | 810 | * A synchronous write - probably a ramdisk. Go |
| 764 | * ahead and try to reclaim the page. | 811 | * ahead and try to reclaim the page. |
| @@ -841,6 +888,7 @@ cull_mlocked: | |||
| 841 | try_to_free_swap(page); | 888 | try_to_free_swap(page); |
| 842 | unlock_page(page); | 889 | unlock_page(page); |
| 843 | putback_lru_page(page); | 890 | putback_lru_page(page); |
| 891 | disable_lumpy_reclaim_mode(sc); | ||
| 844 | continue; | 892 | continue; |
| 845 | 893 | ||
| 846 | activate_locked: | 894 | activate_locked: |
| @@ -853,10 +901,21 @@ activate_locked: | |||
| 853 | keep_locked: | 901 | keep_locked: |
| 854 | unlock_page(page); | 902 | unlock_page(page); |
| 855 | keep: | 903 | keep: |
| 904 | disable_lumpy_reclaim_mode(sc); | ||
| 905 | keep_lumpy: | ||
| 856 | list_add(&page->lru, &ret_pages); | 906 | list_add(&page->lru, &ret_pages); |
| 857 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); | 907 | VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); |
| 858 | } | 908 | } |
| 859 | 909 | ||
| 910 | /* | ||
| 911 | * Tag a zone as congested if all the dirty pages encountered were | ||
| 912 | * backed by a congested BDI. In this case, reclaimers should just | ||
| 913 | * back off and wait for congestion to clear because further reclaim | ||
| 914 | * will encounter the same problem | ||
| 915 | */ | ||
| 916 | if (nr_dirty == nr_congested && nr_dirty != 0) | ||
| 917 | zone_set_flag(zone, ZONE_CONGESTED); | ||
| 918 | |||
| 860 | free_page_list(&free_pages); | 919 | free_page_list(&free_pages); |
| 861 | 920 | ||
| 862 | list_splice(&ret_pages, page_list); | 921 | list_splice(&ret_pages, page_list); |
| @@ -1006,7 +1065,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
| 1006 | 1065 | ||
| 1007 | /* Check that we have not crossed a zone boundary. */ | 1066 | /* Check that we have not crossed a zone boundary. */ |
| 1008 | if (unlikely(page_zone_id(cursor_page) != zone_id)) | 1067 | if (unlikely(page_zone_id(cursor_page) != zone_id)) |
| 1009 | continue; | 1068 | break; |
| 1010 | 1069 | ||
| 1011 | /* | 1070 | /* |
| 1012 | * If we don't have enough swap space, reclaiming of | 1071 | * If we don't have enough swap space, reclaiming of |
| @@ -1014,8 +1073,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
| 1014 | * pointless. | 1073 | * pointless. |
| 1015 | */ | 1074 | */ |
| 1016 | if (nr_swap_pages <= 0 && PageAnon(cursor_page) && | 1075 | if (nr_swap_pages <= 0 && PageAnon(cursor_page) && |
| 1017 | !PageSwapCache(cursor_page)) | 1076 | !PageSwapCache(cursor_page)) |
| 1018 | continue; | 1077 | break; |
| 1019 | 1078 | ||
| 1020 | if (__isolate_lru_page(cursor_page, mode, file) == 0) { | 1079 | if (__isolate_lru_page(cursor_page, mode, file) == 0) { |
| 1021 | list_move(&cursor_page->lru, dst); | 1080 | list_move(&cursor_page->lru, dst); |
| @@ -1026,11 +1085,16 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
| 1026 | nr_lumpy_dirty++; | 1085 | nr_lumpy_dirty++; |
| 1027 | scan++; | 1086 | scan++; |
| 1028 | } else { | 1087 | } else { |
| 1029 | if (mode == ISOLATE_BOTH && | 1088 | /* the page is freed already. */ |
| 1030 | page_count(cursor_page)) | 1089 | if (!page_count(cursor_page)) |
| 1031 | nr_lumpy_failed++; | 1090 | continue; |
| 1091 | break; | ||
| 1032 | } | 1092 | } |
| 1033 | } | 1093 | } |
| 1094 | |||
| 1095 | /* If we break out of the loop above, lumpy reclaim failed */ | ||
| 1096 | if (pfn < end_pfn) | ||
| 1097 | nr_lumpy_failed++; | ||
| 1034 | } | 1098 | } |
| 1035 | 1099 | ||
| 1036 | *scanned = scan; | 1100 | *scanned = scan; |
| @@ -1253,7 +1317,7 @@ static inline bool should_reclaim_stall(unsigned long nr_taken, | |||
| 1253 | return false; | 1317 | return false; |
| 1254 | 1318 | ||
| 1255 | /* Only stall on lumpy reclaim */ | 1319 | /* Only stall on lumpy reclaim */ |
| 1256 | if (!sc->lumpy_reclaim_mode) | 1320 | if (sc->lumpy_reclaim_mode == LUMPY_MODE_NONE) |
| 1257 | return false; | 1321 | return false; |
| 1258 | 1322 | ||
| 1259 | /* If we have relaimed everything on the isolated list, no stall */ | 1323 | /* If we have relaimed everything on the isolated list, no stall */ |
| @@ -1286,7 +1350,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1286 | unsigned long nr_scanned; | 1350 | unsigned long nr_scanned; |
| 1287 | unsigned long nr_reclaimed = 0; | 1351 | unsigned long nr_reclaimed = 0; |
| 1288 | unsigned long nr_taken; | 1352 | unsigned long nr_taken; |
| 1289 | unsigned long nr_active; | ||
| 1290 | unsigned long nr_anon; | 1353 | unsigned long nr_anon; |
| 1291 | unsigned long nr_file; | 1354 | unsigned long nr_file; |
| 1292 | 1355 | ||
| @@ -1298,15 +1361,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1298 | return SWAP_CLUSTER_MAX; | 1361 | return SWAP_CLUSTER_MAX; |
| 1299 | } | 1362 | } |
| 1300 | 1363 | ||
| 1301 | 1364 | set_lumpy_reclaim_mode(priority, sc, false); | |
| 1302 | lru_add_drain(); | 1365 | lru_add_drain(); |
| 1303 | spin_lock_irq(&zone->lru_lock); | 1366 | spin_lock_irq(&zone->lru_lock); |
| 1304 | 1367 | ||
| 1305 | if (scanning_global_lru(sc)) { | 1368 | if (scanning_global_lru(sc)) { |
| 1306 | nr_taken = isolate_pages_global(nr_to_scan, | 1369 | nr_taken = isolate_pages_global(nr_to_scan, |
| 1307 | &page_list, &nr_scanned, sc->order, | 1370 | &page_list, &nr_scanned, sc->order, |
| 1308 | sc->lumpy_reclaim_mode ? | 1371 | sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? |
| 1309 | ISOLATE_BOTH : ISOLATE_INACTIVE, | 1372 | ISOLATE_INACTIVE : ISOLATE_BOTH, |
| 1310 | zone, 0, file); | 1373 | zone, 0, file); |
| 1311 | zone->pages_scanned += nr_scanned; | 1374 | zone->pages_scanned += nr_scanned; |
| 1312 | if (current_is_kswapd()) | 1375 | if (current_is_kswapd()) |
| @@ -1318,8 +1381,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1318 | } else { | 1381 | } else { |
| 1319 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, | 1382 | nr_taken = mem_cgroup_isolate_pages(nr_to_scan, |
| 1320 | &page_list, &nr_scanned, sc->order, | 1383 | &page_list, &nr_scanned, sc->order, |
| 1321 | sc->lumpy_reclaim_mode ? | 1384 | sc->lumpy_reclaim_mode == LUMPY_MODE_NONE ? |
| 1322 | ISOLATE_BOTH : ISOLATE_INACTIVE, | 1385 | ISOLATE_INACTIVE : ISOLATE_BOTH, |
| 1323 | zone, sc->mem_cgroup, | 1386 | zone, sc->mem_cgroup, |
| 1324 | 0, file); | 1387 | 0, file); |
| 1325 | /* | 1388 | /* |
| @@ -1337,20 +1400,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1337 | 1400 | ||
| 1338 | spin_unlock_irq(&zone->lru_lock); | 1401 | spin_unlock_irq(&zone->lru_lock); |
| 1339 | 1402 | ||
| 1340 | nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); | 1403 | nr_reclaimed = shrink_page_list(&page_list, zone, sc); |
| 1341 | 1404 | ||
| 1342 | /* Check if we should syncronously wait for writeback */ | 1405 | /* Check if we should syncronously wait for writeback */ |
| 1343 | if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { | 1406 | if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { |
| 1344 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 1407 | set_lumpy_reclaim_mode(priority, sc, true); |
| 1345 | 1408 | nr_reclaimed += shrink_page_list(&page_list, zone, sc); | |
| 1346 | /* | ||
| 1347 | * The attempt at page out may have made some | ||
| 1348 | * of the pages active, mark them inactive again. | ||
| 1349 | */ | ||
| 1350 | nr_active = clear_active_flags(&page_list, NULL); | ||
| 1351 | count_vm_events(PGDEACTIVATE, nr_active); | ||
| 1352 | |||
| 1353 | nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC); | ||
| 1354 | } | 1409 | } |
| 1355 | 1410 | ||
| 1356 | local_irq_disable(); | 1411 | local_irq_disable(); |
| @@ -1359,6 +1414,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
| 1359 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); | 1414 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); |
| 1360 | 1415 | ||
| 1361 | putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list); | 1416 | putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list); |
| 1417 | |||
| 1418 | trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, | ||
| 1419 | zone_idx(zone), | ||
| 1420 | nr_scanned, nr_reclaimed, | ||
| 1421 | priority, | ||
| 1422 | trace_shrink_flags(file, sc->lumpy_reclaim_mode)); | ||
| 1362 | return nr_reclaimed; | 1423 | return nr_reclaimed; |
| 1363 | } | 1424 | } |
| 1364 | 1425 | ||
| @@ -1506,6 +1567,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
| 1506 | spin_unlock_irq(&zone->lru_lock); | 1567 | spin_unlock_irq(&zone->lru_lock); |
| 1507 | } | 1568 | } |
| 1508 | 1569 | ||
| 1570 | #ifdef CONFIG_SWAP | ||
| 1509 | static int inactive_anon_is_low_global(struct zone *zone) | 1571 | static int inactive_anon_is_low_global(struct zone *zone) |
| 1510 | { | 1572 | { |
| 1511 | unsigned long active, inactive; | 1573 | unsigned long active, inactive; |
| @@ -1531,12 +1593,26 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc) | |||
| 1531 | { | 1593 | { |
| 1532 | int low; | 1594 | int low; |
| 1533 | 1595 | ||
| 1596 | /* | ||
| 1597 | * If we don't have swap space, anonymous page deactivation | ||
| 1598 | * is pointless. | ||
| 1599 | */ | ||
| 1600 | if (!total_swap_pages) | ||
| 1601 | return 0; | ||
| 1602 | |||
| 1534 | if (scanning_global_lru(sc)) | 1603 | if (scanning_global_lru(sc)) |
| 1535 | low = inactive_anon_is_low_global(zone); | 1604 | low = inactive_anon_is_low_global(zone); |
| 1536 | else | 1605 | else |
| 1537 | low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); | 1606 | low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup); |
| 1538 | return low; | 1607 | return low; |
| 1539 | } | 1608 | } |
| 1609 | #else | ||
| 1610 | static inline int inactive_anon_is_low(struct zone *zone, | ||
| 1611 | struct scan_control *sc) | ||
| 1612 | { | ||
| 1613 | return 0; | ||
| 1614 | } | ||
| 1615 | #endif | ||
| 1540 | 1616 | ||
| 1541 | static int inactive_file_is_low_global(struct zone *zone) | 1617 | static int inactive_file_is_low_global(struct zone *zone) |
| 1542 | { | 1618 | { |
| @@ -1721,21 +1797,6 @@ out: | |||
| 1721 | } | 1797 | } |
| 1722 | } | 1798 | } |
| 1723 | 1799 | ||
| 1724 | static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc) | ||
| 1725 | { | ||
| 1726 | /* | ||
| 1727 | * If we need a large contiguous chunk of memory, or have | ||
| 1728 | * trouble getting a small set of contiguous pages, we | ||
| 1729 | * will reclaim both active and inactive pages. | ||
| 1730 | */ | ||
| 1731 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | ||
| 1732 | sc->lumpy_reclaim_mode = 1; | ||
| 1733 | else if (sc->order && priority < DEF_PRIORITY - 2) | ||
| 1734 | sc->lumpy_reclaim_mode = 1; | ||
| 1735 | else | ||
| 1736 | sc->lumpy_reclaim_mode = 0; | ||
| 1737 | } | ||
| 1738 | |||
| 1739 | /* | 1800 | /* |
| 1740 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. | 1801 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. |
| 1741 | */ | 1802 | */ |
| @@ -1750,8 +1811,6 @@ static void shrink_zone(int priority, struct zone *zone, | |||
| 1750 | 1811 | ||
| 1751 | get_scan_count(zone, sc, nr, priority); | 1812 | get_scan_count(zone, sc, nr, priority); |
| 1752 | 1813 | ||
| 1753 | set_lumpy_reclaim_mode(priority, sc); | ||
| 1754 | |||
| 1755 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || | 1814 | while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || |
| 1756 | nr[LRU_INACTIVE_FILE]) { | 1815 | nr[LRU_INACTIVE_FILE]) { |
| 1757 | for_each_evictable_lru(l) { | 1816 | for_each_evictable_lru(l) { |
| @@ -1782,7 +1841,7 @@ static void shrink_zone(int priority, struct zone *zone, | |||
| 1782 | * Even if we did not try to evict anon pages at all, we want to | 1841 | * Even if we did not try to evict anon pages at all, we want to |
| 1783 | * rebalance the anon lru active/inactive ratio. | 1842 | * rebalance the anon lru active/inactive ratio. |
| 1784 | */ | 1843 | */ |
| 1785 | if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0) | 1844 | if (inactive_anon_is_low(zone, sc)) |
| 1786 | shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); | 1845 | shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0); |
| 1787 | 1846 | ||
| 1788 | throttle_vm_writeout(sc->gfp_mask); | 1847 | throttle_vm_writeout(sc->gfp_mask); |
| @@ -1937,21 +1996,16 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
| 1937 | 1996 | ||
| 1938 | /* Take a nap, wait for some writeback to complete */ | 1997 | /* Take a nap, wait for some writeback to complete */ |
| 1939 | if (!sc->hibernation_mode && sc->nr_scanned && | 1998 | if (!sc->hibernation_mode && sc->nr_scanned && |
| 1940 | priority < DEF_PRIORITY - 2) | 1999 | priority < DEF_PRIORITY - 2) { |
| 1941 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 2000 | struct zone *preferred_zone; |
| 2001 | |||
| 2002 | first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), | ||
| 2003 | NULL, &preferred_zone); | ||
| 2004 | wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); | ||
| 2005 | } | ||
| 1942 | } | 2006 | } |
| 1943 | 2007 | ||
| 1944 | out: | 2008 | out: |
| 1945 | /* | ||
| 1946 | * Now that we've scanned all the zones at this priority level, note | ||
| 1947 | * that level within the zone so that the next thread which performs | ||
| 1948 | * scanning of this zone will immediately start out at this priority | ||
| 1949 | * level. This affects only the decision whether or not to bring | ||
| 1950 | * mapped pages onto the inactive list. | ||
| 1951 | */ | ||
| 1952 | if (priority < 0) | ||
| 1953 | priority = 0; | ||
| 1954 | |||
| 1955 | delayacct_freepages_end(); | 2009 | delayacct_freepages_end(); |
| 1956 | put_mems_allowed(); | 2010 | put_mems_allowed(); |
| 1957 | 2011 | ||
| @@ -2247,6 +2301,15 @@ loop_again: | |||
| 2247 | if (!zone_watermark_ok(zone, order, | 2301 | if (!zone_watermark_ok(zone, order, |
| 2248 | min_wmark_pages(zone), end_zone, 0)) | 2302 | min_wmark_pages(zone), end_zone, 0)) |
| 2249 | has_under_min_watermark_zone = 1; | 2303 | has_under_min_watermark_zone = 1; |
| 2304 | } else { | ||
| 2305 | /* | ||
| 2306 | * If a zone reaches its high watermark, | ||
| 2307 | * consider it to be no longer congested. It's | ||
| 2308 | * possible there are dirty pages backed by | ||
| 2309 | * congested BDIs but as pressure is relieved, | ||
| 2310 | * spectulatively avoid congestion waits | ||
| 2311 | */ | ||
| 2312 | zone_clear_flag(zone, ZONE_CONGESTED); | ||
| 2250 | } | 2313 | } |
| 2251 | 2314 | ||
| 2252 | } | 2315 | } |
| @@ -2987,6 +3050,7 @@ int scan_unevictable_handler(struct ctl_table *table, int write, | |||
| 2987 | return 0; | 3050 | return 0; |
| 2988 | } | 3051 | } |
| 2989 | 3052 | ||
| 3053 | #ifdef CONFIG_NUMA | ||
| 2990 | /* | 3054 | /* |
| 2991 | * per node 'scan_unevictable_pages' attribute. On demand re-scan of | 3055 | * per node 'scan_unevictable_pages' attribute. On demand re-scan of |
| 2992 | * a specified node's per zone unevictable lists for evictable pages. | 3056 | * a specified node's per zone unevictable lists for evictable pages. |
| @@ -3033,4 +3097,4 @@ void scan_unevictable_unregister_node(struct node *node) | |||
| 3033 | { | 3097 | { |
| 3034 | sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); | 3098 | sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages); |
| 3035 | } | 3099 | } |
| 3036 | 3100 | #endif | |
diff --git a/mm/vmstat.c b/mm/vmstat.c index 355a9e669aaa..8f62f17ee1c7 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | #include <linux/vmstat.h> | 17 | #include <linux/vmstat.h> |
| 18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
| 19 | #include <linux/math64.h> | 19 | #include <linux/math64.h> |
| 20 | #include <linux/writeback.h> | ||
| 21 | #include <linux/compaction.h> | ||
| 20 | 22 | ||
| 21 | #ifdef CONFIG_VM_EVENT_COUNTERS | 23 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 22 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | 24 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; |
| @@ -394,6 +396,7 @@ void zone_statistics(struct zone *preferred_zone, struct zone *z) | |||
| 394 | #endif | 396 | #endif |
| 395 | 397 | ||
| 396 | #ifdef CONFIG_COMPACTION | 398 | #ifdef CONFIG_COMPACTION |
| 399 | |||
| 397 | struct contig_page_info { | 400 | struct contig_page_info { |
| 398 | unsigned long free_pages; | 401 | unsigned long free_pages; |
| 399 | unsigned long free_blocks_total; | 402 | unsigned long free_blocks_total; |
| @@ -745,6 +748,9 @@ static const char * const vmstat_text[] = { | |||
| 745 | "nr_isolated_anon", | 748 | "nr_isolated_anon", |
| 746 | "nr_isolated_file", | 749 | "nr_isolated_file", |
| 747 | "nr_shmem", | 750 | "nr_shmem", |
| 751 | "nr_dirtied", | ||
| 752 | "nr_written", | ||
| 753 | |||
| 748 | #ifdef CONFIG_NUMA | 754 | #ifdef CONFIG_NUMA |
| 749 | "numa_hit", | 755 | "numa_hit", |
| 750 | "numa_miss", | 756 | "numa_miss", |
| @@ -753,6 +759,8 @@ static const char * const vmstat_text[] = { | |||
| 753 | "numa_local", | 759 | "numa_local", |
| 754 | "numa_other", | 760 | "numa_other", |
| 755 | #endif | 761 | #endif |
| 762 | "nr_dirty_threshold", | ||
| 763 | "nr_dirty_background_threshold", | ||
| 756 | 764 | ||
| 757 | #ifdef CONFIG_VM_EVENT_COUNTERS | 765 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 758 | "pgpgin", | 766 | "pgpgin", |
| @@ -904,36 +912,44 @@ static const struct file_operations proc_zoneinfo_file_operations = { | |||
| 904 | .release = seq_release, | 912 | .release = seq_release, |
| 905 | }; | 913 | }; |
| 906 | 914 | ||
| 915 | enum writeback_stat_item { | ||
| 916 | NR_DIRTY_THRESHOLD, | ||
| 917 | NR_DIRTY_BG_THRESHOLD, | ||
| 918 | NR_VM_WRITEBACK_STAT_ITEMS, | ||
| 919 | }; | ||
| 920 | |||
| 907 | static void *vmstat_start(struct seq_file *m, loff_t *pos) | 921 | static void *vmstat_start(struct seq_file *m, loff_t *pos) |
| 908 | { | 922 | { |
| 909 | unsigned long *v; | 923 | unsigned long *v; |
| 910 | #ifdef CONFIG_VM_EVENT_COUNTERS | 924 | int i, stat_items_size; |
| 911 | unsigned long *e; | ||
| 912 | #endif | ||
| 913 | int i; | ||
| 914 | 925 | ||
| 915 | if (*pos >= ARRAY_SIZE(vmstat_text)) | 926 | if (*pos >= ARRAY_SIZE(vmstat_text)) |
| 916 | return NULL; | 927 | return NULL; |
| 928 | stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) + | ||
| 929 | NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long); | ||
| 917 | 930 | ||
| 918 | #ifdef CONFIG_VM_EVENT_COUNTERS | 931 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 919 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) | 932 | stat_items_size += sizeof(struct vm_event_state); |
| 920 | + sizeof(struct vm_event_state), GFP_KERNEL); | ||
| 921 | #else | ||
| 922 | v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), | ||
| 923 | GFP_KERNEL); | ||
| 924 | #endif | 933 | #endif |
| 934 | |||
| 935 | v = kmalloc(stat_items_size, GFP_KERNEL); | ||
| 925 | m->private = v; | 936 | m->private = v; |
| 926 | if (!v) | 937 | if (!v) |
| 927 | return ERR_PTR(-ENOMEM); | 938 | return ERR_PTR(-ENOMEM); |
| 928 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | 939 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) |
| 929 | v[i] = global_page_state(i); | 940 | v[i] = global_page_state(i); |
| 941 | v += NR_VM_ZONE_STAT_ITEMS; | ||
| 942 | |||
| 943 | global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, | ||
| 944 | v + NR_DIRTY_THRESHOLD); | ||
| 945 | v += NR_VM_WRITEBACK_STAT_ITEMS; | ||
| 946 | |||
| 930 | #ifdef CONFIG_VM_EVENT_COUNTERS | 947 | #ifdef CONFIG_VM_EVENT_COUNTERS |
| 931 | e = v + NR_VM_ZONE_STAT_ITEMS; | 948 | all_vm_events(v); |
| 932 | all_vm_events(e); | 949 | v[PGPGIN] /= 2; /* sectors -> kbytes */ |
| 933 | e[PGPGIN] /= 2; /* sectors -> kbytes */ | 950 | v[PGPGOUT] /= 2; |
| 934 | e[PGPGOUT] /= 2; | ||
| 935 | #endif | 951 | #endif |
| 936 | return v + *pos; | 952 | return (unsigned long *)m->private + *pos; |
| 937 | } | 953 | } |
| 938 | 954 | ||
| 939 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) | 955 | static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) |
