diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mlock.c | 7 | ||||
-rw-r--r-- | mm/page-writeback.c | 13 | ||||
-rw-r--r-- | mm/page_alloc.c | 27 | ||||
-rw-r--r-- | mm/page_io.c | 2 | ||||
-rw-r--r-- | mm/util.c | 20 | ||||
-rw-r--r-- | mm/vmalloc.c | 10 |
6 files changed, 65 insertions, 14 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 037161d61b4e..cbe9e0581b75 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size) | |||
660 | return buffer; | 660 | return buffer; |
661 | } | 661 | } |
662 | 662 | ||
663 | void free_locked_buffer(void *buffer, size_t size) | 663 | void release_locked_buffer(void *buffer, size_t size) |
664 | { | 664 | { |
665 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; | 665 | unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; |
666 | 666 | ||
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size) | |||
670 | current->mm->locked_vm -= pgsz; | 670 | current->mm->locked_vm -= pgsz; |
671 | 671 | ||
672 | up_write(¤t->mm->mmap_sem); | 672 | up_write(¤t->mm->mmap_sem); |
673 | } | ||
674 | |||
675 | void free_locked_buffer(void *buffer, size_t size) | ||
676 | { | ||
677 | release_locked_buffer(buffer, size); | ||
673 | 678 | ||
674 | kfree(buffer); | 679 | kfree(buffer); |
675 | } | 680 | } |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3c84128596ba..74dc57c74349 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi) | |||
240 | } | 240 | } |
241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | 241 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); |
242 | 242 | ||
243 | static inline void task_dirty_inc(struct task_struct *tsk) | 243 | void task_dirty_inc(struct task_struct *tsk) |
244 | { | 244 | { |
245 | prop_inc_single(&vm_dirties, &tsk->dirties); | 245 | prop_inc_single(&vm_dirties, &tsk->dirties); |
246 | } | 246 | } |
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 1230 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
1231 | __inc_bdi_stat(mapping->backing_dev_info, | 1231 | __inc_bdi_stat(mapping->backing_dev_info, |
1232 | BDI_RECLAIMABLE); | 1232 | BDI_RECLAIMABLE); |
1233 | task_dirty_inc(current); | ||
1233 | task_io_account_write(PAGE_CACHE_SIZE); | 1234 | task_io_account_write(PAGE_CACHE_SIZE); |
1234 | } | 1235 | } |
1235 | radix_tree_tag_set(&mapping->page_tree, | 1236 | radix_tree_tag_set(&mapping->page_tree, |
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage); | |||
1262 | * If the mapping doesn't provide a set_page_dirty a_op, then | 1263 | * If the mapping doesn't provide a set_page_dirty a_op, then |
1263 | * just fall through and assume that it wants buffer_heads. | 1264 | * just fall through and assume that it wants buffer_heads. |
1264 | */ | 1265 | */ |
1265 | static int __set_page_dirty(struct page *page) | 1266 | int set_page_dirty(struct page *page) |
1266 | { | 1267 | { |
1267 | struct address_space *mapping = page_mapping(page); | 1268 | struct address_space *mapping = page_mapping(page); |
1268 | 1269 | ||
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page) | |||
1280 | } | 1281 | } |
1281 | return 0; | 1282 | return 0; |
1282 | } | 1283 | } |
1283 | |||
1284 | int set_page_dirty(struct page *page) | ||
1285 | { | ||
1286 | int ret = __set_page_dirty(page); | ||
1287 | if (ret) | ||
1288 | task_dirty_inc(current); | ||
1289 | return ret; | ||
1290 | } | ||
1291 | EXPORT_SYMBOL(set_page_dirty); | 1284 | EXPORT_SYMBOL(set_page_dirty); |
1292 | 1285 | ||
1293 | /* | 1286 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5675b3073854..5c44ed49ca93 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid) | |||
2989 | * was used and there are no special requirements, this is a convenient | 2989 | * was used and there are no special requirements, this is a convenient |
2990 | * alternative | 2990 | * alternative |
2991 | */ | 2991 | */ |
2992 | int __meminit early_pfn_to_nid(unsigned long pfn) | 2992 | int __meminit __early_pfn_to_nid(unsigned long pfn) |
2993 | { | 2993 | { |
2994 | int i; | 2994 | int i; |
2995 | 2995 | ||
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn) | |||
3000 | if (start_pfn <= pfn && pfn < end_pfn) | 3000 | if (start_pfn <= pfn && pfn < end_pfn) |
3001 | return early_node_map[i].nid; | 3001 | return early_node_map[i].nid; |
3002 | } | 3002 | } |
3003 | /* This is a memory hole */ | ||
3004 | return -1; | ||
3005 | } | ||
3006 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | ||
3007 | |||
3008 | int __meminit early_pfn_to_nid(unsigned long pfn) | ||
3009 | { | ||
3010 | int nid; | ||
3003 | 3011 | ||
3012 | nid = __early_pfn_to_nid(pfn); | ||
3013 | if (nid >= 0) | ||
3014 | return nid; | ||
3015 | /* just returns 0 */ | ||
3004 | return 0; | 3016 | return 0; |
3005 | } | 3017 | } |
3006 | #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ | 3018 | |
3019 | #ifdef CONFIG_NODES_SPAN_OTHER_NODES | ||
3020 | bool __meminit early_pfn_in_nid(unsigned long pfn, int node) | ||
3021 | { | ||
3022 | int nid; | ||
3023 | |||
3024 | nid = __early_pfn_to_nid(pfn); | ||
3025 | if (nid >= 0 && nid != node) | ||
3026 | return false; | ||
3027 | return true; | ||
3028 | } | ||
3029 | #endif | ||
3007 | 3030 | ||
3008 | /* Basic iterator support to walk early_node_map[] */ | 3031 | /* Basic iterator support to walk early_node_map[] */ |
3009 | #define for_each_active_range_index_in_nid(i, nid) \ | 3032 | #define for_each_active_range_index_in_nid(i, nid) \ |
diff --git a/mm/page_io.c b/mm/page_io.c index dc6ce0afbded..3023c475e041 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
111 | goto out; | 111 | goto out; |
112 | } | 112 | } |
113 | if (wbc->sync_mode == WB_SYNC_ALL) | 113 | if (wbc->sync_mode == WB_SYNC_ALL) |
114 | rw |= (1 << BIO_RW_SYNC); | 114 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); |
115 | count_vm_event(PSWPOUT); | 115 | count_vm_event(PSWPOUT); |
116 | set_page_writeback(page); | 116 | set_page_writeback(page); |
117 | unlock_page(page); | 117 | unlock_page(page); |
@@ -129,6 +129,26 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) | |||
129 | } | 129 | } |
130 | EXPORT_SYMBOL(krealloc); | 130 | EXPORT_SYMBOL(krealloc); |
131 | 131 | ||
132 | /** | ||
133 | * kzfree - like kfree but zero memory | ||
134 | * @p: object to free memory of | ||
135 | * | ||
136 | * The memory of the object @p points to is zeroed before freed. | ||
137 | * If @p is %NULL, kzfree() does nothing. | ||
138 | */ | ||
139 | void kzfree(const void *p) | ||
140 | { | ||
141 | size_t ks; | ||
142 | void *mem = (void *)p; | ||
143 | |||
144 | if (unlikely(ZERO_OR_NULL_PTR(mem))) | ||
145 | return; | ||
146 | ks = ksize(mem); | ||
147 | memset(mem, 0, ks); | ||
148 | kfree(mem); | ||
149 | } | ||
150 | EXPORT_SYMBOL(kzfree); | ||
151 | |||
132 | /* | 152 | /* |
133 | * strndup_user - duplicate an existing string from user space | 153 | * strndup_user - duplicate an existing string from user space |
134 | * @s: The string to duplicate | 154 | * @s: The string to duplicate |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 75f49d312e8c..903cad46e796 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1012,6 +1012,8 @@ void __init vmalloc_init(void) | |||
1012 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 1012 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
1013 | { | 1013 | { |
1014 | unsigned long end = addr + size; | 1014 | unsigned long end = addr + size; |
1015 | |||
1016 | flush_cache_vunmap(addr, end); | ||
1015 | vunmap_page_range(addr, end); | 1017 | vunmap_page_range(addr, end); |
1016 | flush_tlb_kernel_range(addr, end); | 1018 | flush_tlb_kernel_range(addr, end); |
1017 | } | 1019 | } |
@@ -1106,6 +1108,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | |||
1106 | } | 1108 | } |
1107 | EXPORT_SYMBOL_GPL(__get_vm_area); | 1109 | EXPORT_SYMBOL_GPL(__get_vm_area); |
1108 | 1110 | ||
1111 | struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | ||
1112 | unsigned long start, unsigned long end, | ||
1113 | void *caller) | ||
1114 | { | ||
1115 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, | ||
1116 | caller); | ||
1117 | } | ||
1118 | |||
1109 | /** | 1119 | /** |
1110 | * get_vm_area - reserve a contiguous kernel virtual area | 1120 | * get_vm_area - reserve a contiguous kernel virtual area |
1111 | * @size: size of the area | 1121 | * @size: size of the area |