diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mlock.c | 7 | ||||
-rw-r--r-- | mm/page-writeback.c | 13 | ||||
-rw-r--r-- | mm/page_alloc.c | 27 | ||||
-rw-r--r-- | mm/page_io.c | 2 | ||||
-rw-r--r-- | mm/swapfile.c | 4 | ||||
-rw-r--r-- | mm/util.c | 20 | ||||
-rw-r--r-- | mm/vmalloc.c | 10 | ||||
-rw-r--r-- | mm/vmscan.c | 28 |
8 files changed, 79 insertions, 32 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 037161d61b4e..cbe9e0581b75 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size) | |||
660 | return buffer; | 660 | return buffer; |
661 | } | 661 | } |
662 | 662 | ||
663 | void free_locked_buffer(void *buffer, size_t size) | 663 | void release_locked_buffer(void *buffer, size_t size) |
664 | { | 664 | { |
665 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | 665 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; |
666 | 666 | ||
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size) | |||
670 | current->mm->locked_vm -= pgsz; | 670 | current->mm->locked_vm -= pgsz; |
671 | 671 | ||
672 | up_write(¤t->mm->mmap_sem); | 672 | up_write(¤t->mm->mmap_sem); |
673 | } | ||
674 | |||
675 | void free_locked_buffer(void *buffer, size_t size) | ||
676 | { | ||
677 | release_locked_buffer(buffer, size); | ||
673 | 678 | ||
674 | kfree(buffer); | 679 | kfree(buffer); |
675 | } | 680 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3c84128596ba..74dc57c74349 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi) | |||
240 | } | 240 | } |
241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); |
242 | 242 | ||
243 | static inline void task_dirty_inc(struct task_struct *tsk) | 243 | void task_dirty_inc(struct task_struct *tsk) |
244 | { | 244 | { |
245 | prop_inc_single(&vm_dirties, &tsk->dirties); | 245 | prop_inc_single(&vm_dirties, &tsk->dirties); |
246 | } | 246 | } |
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
1231 | __inc_bdi_stat(mapping->backing_dev_info, | 1231 | __inc_bdi_stat(mapping->backing_dev_info, |
1232 | BDI_RECLAIMABLE); | 1232 | BDI_RECLAIMABLE); |
1233 | task_dirty_inc(current); | ||
1233 | task_io_account_write(PAGE_CACHE_SIZE); | 1234 | task_io_account_write(PAGE_CACHE_SIZE); |
1234 | } | 1235 | } |
1235 | radix_tree_tag_set(&mapping->page_tree, | 1236 | radix_tree_tag_set(&mapping->page_tree, |
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage); | |||
1262 | * If the mapping doesn't provide a set_page_dirty a_op, then | 1263 | * If the mapping doesn't provide a set_page_dirty a_op, then |
1263 | * just fall through and assume that it wants buffer_heads. | 1264 | * just fall through and assume that it wants buffer_heads. |
1264 | */ | 1265 | */ |
1265 | static int __set_page_dirty(struct page *page) | 1266 | int set_page_dirty(struct page *page) |
1266 | { | 1267 | { |
1267 | struct address_space *mapping = page_mapping(page); | 1268 | struct address_space *mapping = page_mapping(page); |
1268 | 1269 | ||
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page) | |||
1280 | } | 1281 | } |
1281 | return 0; | 1282 | return 0; |
1282 | } | 1283 | } |
1283 | |||
1284 | int set_page_dirty(struct page *page) | ||
1285 | { | ||
1286 | int ret = __set_page_dirty(page); | ||
1287 | if (ret) | ||
1288 | task_dirty_inc(current); | ||
1289 | return ret; | ||
1290 | } | ||
1291 | EXPORT_SYMBOL(set_page_dirty); | 1284 | EXPORT_SYMBOL(set_page_dirty); |
1292 | 1285 | ||
1293 | /* | 1286 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5675b3073854..5c44ed49ca93 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid) | |||
2989 | * was used and there are no special requirements, this is a convenient | 2989 | * was used and there are no special requirements, this is a convenient |
2990 | * alternative | 2990 | * alternative |
2991 | */ | 2991 | */ |
2992 | int __meminit early_pfn_to_nid(unsigned long pfn) | 2992 | int __meminit __early_pfn_to_nid(unsigned long pfn) |
2993 | { | 2993 | { |
2994 | int i; | 2994 | int i; |
2995 | 2995 | ||
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn) | |||
3000 | if (start_pfn <= pfn && pfn < end_pfn) | 3000 | if (start_pfn <= pfn && pfn < end_pfn) |
3001 | return early_node_map[i].nid; | 3001 | return early_node_map[i].nid; |
3002 | } | 3002 | } |
3003 | /* This is a memory hole */ | ||
3004 | return -1; | ||
3005 | } | ||
3006 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
3007 | |||
3008 | int __meminit early_pfn_to_nid(unsigned long pfn) | ||
3009 | { | ||
3010 | int nid; | ||
3003 | 3011 | ||
3012 | nid = __early_pfn_to_nid(pfn); | ||
3013 | if (nid >= 0) | ||
3014 | return nid; | ||
3015 | /* just returns 0 */ | ||
3004 | return 0; | 3016 | return 0; |
3005 | } | 3017 | } |
3006 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | 3018 | |
3019 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | ||
3020 | bool __meminit early_pfn_in_nid(unsigned long pfn, int node) | ||
3021 | { | ||
3022 | int nid; | ||
3023 | |||
3024 | nid = __early_pfn_to_nid(pfn); | ||
3025 | if (nid >= 0 && nid != node) | ||
3026 | return false; | ||
3027 | return true; | ||
3028 | } | ||
3029 | #endif | ||
3007 | 3030 | ||
3008 | /* Basic iterator support to walk early_node_map[] */ | 3031 | /* Basic iterator support to walk early_node_map[] */ |
3009 | #define for_each_active_range_index_in_nid(i, nid) \ | 3032 | #define for_each_active_range_index_in_nid(i, nid) \ |
diff --git a/mm/page_io.c b/mm/page_io.c index dc6ce0afbded..3023c475e041 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
111 | goto out; | 111 | goto out; |
112 | } | 112 | } |
113 | if (wbc->sync_mode == WB_SYNC_ALL) | 113 | if (wbc->sync_mode == WB_SYNC_ALL) |
114 | rw |= (1 << BIO_RW_SYNC); | 114 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); |
115 | count_vm_event(PSWPOUT); | 115 | count_vm_event(PSWPOUT); |
116 | set_page_writeback(page); | 116 | set_page_writeback(page); |
117 | unlock_page(page); | 117 | unlock_page(page); |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 7e6304dfafab..312fafe0ab6e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -635,7 +635,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) | |||
635 | 635 | ||
636 | if (!bdev) { | 636 | if (!bdev) { |
637 | if (bdev_p) | 637 | if (bdev_p) |
638 | *bdev_p = sis->bdev; | 638 | *bdev_p = bdget(sis->bdev->bd_dev); |
639 | 639 | ||
640 | spin_unlock(&swap_lock); | 640 | spin_unlock(&swap_lock); |
641 | return i; | 641 | return i; |
@@ -647,7 +647,7 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) | |||
647 | struct swap_extent, list); | 647 | struct swap_extent, list); |
648 | if (se->start_block == offset) { | 648 | if (se->start_block == offset) { |
649 | if (bdev_p) | 649 | if (bdev_p) |
650 | *bdev_p = sis->bdev; | 650 | *bdev_p = bdget(sis->bdev->bd_dev); |
651 | 651 | ||
652 | spin_unlock(&swap_lock); | 652 | spin_unlock(&swap_lock); |
653 | bdput(bdev); | 653 | bdput(bdev); |
@@ -129,6 +129,26 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
129 | } | 129 | } |
130 | EXPORT_SYMBOL(krealloc); | 130 | EXPORT_SYMBOL(krealloc); |
131 | 131 | ||
132 | /** | ||
133 | * kzfree - like kfree but zero memory | ||
134 | * @p: object to free memory of | ||
135 | * | ||
136 | * The memory of the object @p points to is zeroed before freed. | ||
137 | * If @p is %NULL, kzfree() does nothing. | ||
138 | */ | ||
139 | void kzfree(const void *p) | ||
140 | { | ||
141 | size_t ks; | ||
142 | void *mem = (void *)p; | ||
143 | |||
144 | if (unlikely(ZERO_OR_NULL_PTR(mem))) | ||
145 | return; | ||
146 | ks = ksize(mem); | ||
147 | memset(mem, 0, ks); | ||
148 | kfree(mem); | ||
149 | } | ||
150 | EXPORT_SYMBOL(kzfree); | ||
151 | |||
132 | /* | 152 | /* |
133 | * strndup_user - duplicate an existing string from user space | 153 | * strndup_user - duplicate an existing string from user space |
134 | * @s: The string to duplicate | 154 | * @s: The string to duplicate |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 75f49d312e8c..903cad46e796 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1012,6 +1012,8 @@ void __init vmalloc_init(void) | |||
1012 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 1012 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
1013 | { | 1013 | { |
1014 | unsigned long end = addr + size; | 1014 | unsigned long end = addr + size; |
1015 | |||
1016 | flush_cache_vunmap(addr, end); | ||
1015 | vunmap_page_range(addr, end); | 1017 | vunmap_page_range(addr, end); |
1016 | flush_tlb_kernel_range(addr, end); | 1018 | flush_tlb_kernel_range(addr, end); |
1017 | } | 1019 | } |
@@ -1106,6 +1108,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | |||
1106 | } | 1108 | } |
1107 | EXPORT_SYMBOL_GPL(__get_vm_area); | 1109 | EXPORT_SYMBOL_GPL(__get_vm_area); |
1108 | 1110 | ||
1111 | struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | ||
1112 | unsigned long start, unsigned long end, | ||
1113 | void *caller) | ||
1114 | { | ||
1115 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, | ||
1116 | caller); | ||
1117 | } | ||
1118 | |||
1109 | /** | 1119 | /** |
1110 | * get_vm_area - reserve a contiguous kernel virtual area | 1120 | * get_vm_area - reserve a contiguous kernel virtual area |
1111 | * @size: size of the area | 1121 | * @size: size of the area |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 9a27c44aa327..6177e3bcd66b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2057,31 +2057,31 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
2057 | int pass, struct scan_control *sc) | 2057 | int pass, struct scan_control *sc) |
2058 | { | 2058 | { |
2059 | struct zone *zone; | 2059 | struct zone *zone; |
2060 | unsigned long nr_to_scan, ret = 0; | 2060 | unsigned long ret = 0; |
2061 | enum lru_list l; | ||
2062 | 2061 | ||
2063 | for_each_zone(zone) { | 2062 | for_each_zone(zone) { |
2063 | enum lru_list l; | ||
2064 | 2064 | ||
2065 | if (!populated_zone(zone)) | 2065 | if (!populated_zone(zone)) |
2066 | continue; | 2066 | continue; |
2067 | |||
2068 | if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) | 2067 | if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) |
2069 | continue; | 2068 | continue; |
2070 | 2069 | ||
2071 | for_each_evictable_lru(l) { | 2070 | for_each_evictable_lru(l) { |
2071 | enum zone_stat_item ls = NR_LRU_BASE + l; | ||
2072 | unsigned long lru_pages = zone_page_state(zone, ls); | ||
2073 | |||
2072 | /* For pass = 0, we don't shrink the active list */ | 2074 | /* For pass = 0, we don't shrink the active list */ |
2073 | if (pass == 0 && | 2075 | if (pass == 0 && (l == LRU_ACTIVE_ANON || |
2074 | (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE)) | 2076 | l == LRU_ACTIVE_FILE)) |
2075 | continue; | 2077 | continue; |
2076 | 2078 | ||
2077 | zone->lru[l].nr_scan += | 2079 | zone->lru[l].nr_scan += (lru_pages >> prio) + 1; |
2078 | (zone_page_state(zone, NR_LRU_BASE + l) | ||
2079 | >> prio) + 1; | ||
2080 | if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { | 2080 | if (zone->lru[l].nr_scan >= nr_pages || pass > 3) { |
2081 | unsigned long nr_to_scan; | ||
2082 | |||
2081 | zone->lru[l].nr_scan = 0; | 2083 | zone->lru[l].nr_scan = 0; |
2082 | nr_to_scan = min(nr_pages, | 2084 | nr_to_scan = min(nr_pages, lru_pages); |
2083 | zone_page_state(zone, | ||
2084 | NR_LRU_BASE + l)); | ||
2085 | ret += shrink_list(l, nr_to_scan, zone, | 2085 | ret += shrink_list(l, nr_to_scan, zone, |
2086 | sc, prio); | 2086 | sc, prio); |
2087 | if (ret >= nr_pages) | 2087 | if (ret >= nr_pages) |
@@ -2089,7 +2089,6 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, | |||
2089 | } | 2089 | } |
2090 | } | 2090 | } |
2091 | } | 2091 | } |
2092 | |||
2093 | return ret; | 2092 | return ret; |
2094 | } | 2093 | } |
2095 | 2094 | ||
@@ -2112,7 +2111,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2112 | .may_swap = 0, | 2111 | .may_swap = 0, |
2113 | .swap_cluster_max = nr_pages, | 2112 | .swap_cluster_max = nr_pages, |
2114 | .may_writepage = 1, | 2113 | .may_writepage = 1, |
2115 | .swappiness = vm_swappiness, | ||
2116 | .isolate_pages = isolate_pages_global, | 2114 | .isolate_pages = isolate_pages_global, |
2117 | }; | 2115 | }; |
2118 | 2116 | ||
@@ -2146,10 +2144,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages) | |||
2146 | int prio; | 2144 | int prio; |
2147 | 2145 | ||
2148 | /* Force reclaiming mapped pages in the passes #3 and #4 */ | 2146 | /* Force reclaiming mapped pages in the passes #3 and #4 */ |
2149 | if (pass > 2) { | 2147 | if (pass > 2) |
2150 | sc.may_swap = 1; | 2148 | sc.may_swap = 1; |
2151 | sc.swappiness = 100; | ||
2152 | } | ||
2153 | 2149 | ||
2154 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { | 2150 | for (prio = DEF_PRIORITY; prio >= 0; prio--) { |
2155 | unsigned long nr_to_scan = nr_pages - ret; | 2151 | unsigned long nr_to_scan = nr_pages - ret; |