aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-19 03:00:35 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-19 03:00:35 -0500
commit72c26c9a26ea7f2f3d14f162c2ebb07805f724ea (patch)
treebf1b4bc0b69f96c79474f9edb9cf0e811c95f2dc /mm
parent37bd824a35a60abc73e5fa8816bd5f50c913d69b (diff)
parentba95fd47d177d46743ad94055908d22840370e06 (diff)
Merge branch 'linus' into tracing/blktrace
Conflicts: block/blktrace.c Semantic merge: kernel/trace/blktrace.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c7
-rw-r--r--mm/page-writeback.c13
-rw-r--r--mm/page_alloc.c27
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/vmalloc.c8
5 files changed, 43 insertions, 14 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 037161d61b4e..cbe9e0581b75 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -660,7 +660,7 @@ void *alloc_locked_buffer(size_t size)
660 return buffer; 660 return buffer;
661} 661}
662 662
663void free_locked_buffer(void *buffer, size_t size) 663void release_locked_buffer(void *buffer, size_t size)
664{ 664{
665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 665 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
666 666
@@ -670,6 +670,11 @@ void free_locked_buffer(void *buffer, size_t size)
670 current->mm->locked_vm -= pgsz; 670 current->mm->locked_vm -= pgsz;
671 671
672 up_write(&current->mm->mmap_sem); 672 up_write(&current->mm->mmap_sem);
673}
674
675void free_locked_buffer(void *buffer, size_t size)
676{
677 release_locked_buffer(buffer, size);
673 678
674 kfree(buffer); 679 kfree(buffer);
675} 680}
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3c84128596ba..74dc57c74349 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -240,7 +240,7 @@ void bdi_writeout_inc(struct backing_dev_info *bdi)
240} 240}
241EXPORT_SYMBOL_GPL(bdi_writeout_inc); 241EXPORT_SYMBOL_GPL(bdi_writeout_inc);
242 242
243static inline void task_dirty_inc(struct task_struct *tsk) 243void task_dirty_inc(struct task_struct *tsk)
244{ 244{
245 prop_inc_single(&vm_dirties, &tsk->dirties); 245 prop_inc_single(&vm_dirties, &tsk->dirties);
246} 246}
@@ -1230,6 +1230,7 @@ int __set_page_dirty_nobuffers(struct page *page)
1230 __inc_zone_page_state(page, NR_FILE_DIRTY); 1230 __inc_zone_page_state(page, NR_FILE_DIRTY);
1231 __inc_bdi_stat(mapping->backing_dev_info, 1231 __inc_bdi_stat(mapping->backing_dev_info,
1232 BDI_RECLAIMABLE); 1232 BDI_RECLAIMABLE);
1233 task_dirty_inc(current);
1233 task_io_account_write(PAGE_CACHE_SIZE); 1234 task_io_account_write(PAGE_CACHE_SIZE);
1234 } 1235 }
1235 radix_tree_tag_set(&mapping->page_tree, 1236 radix_tree_tag_set(&mapping->page_tree,
@@ -1262,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
1262 * If the mapping doesn't provide a set_page_dirty a_op, then 1263 * If the mapping doesn't provide a set_page_dirty a_op, then
1263 * just fall through and assume that it wants buffer_heads. 1264 * just fall through and assume that it wants buffer_heads.
1264 */ 1265 */
1265static int __set_page_dirty(struct page *page) 1266int set_page_dirty(struct page *page)
1266{ 1267{
1267 struct address_space *mapping = page_mapping(page); 1268 struct address_space *mapping = page_mapping(page);
1268 1269
@@ -1280,14 +1281,6 @@ static int __set_page_dirty(struct page *page)
1280 } 1281 }
1281 return 0; 1282 return 0;
1282} 1283}
1283
1284int set_page_dirty(struct page *page)
1285{
1286 int ret = __set_page_dirty(page);
1287 if (ret)
1288 task_dirty_inc(current);
1289 return ret;
1290}
1291EXPORT_SYMBOL(set_page_dirty); 1284EXPORT_SYMBOL(set_page_dirty);
1292 1285
1293/* 1286/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5675b3073854..5c44ed49ca93 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2989,7 +2989,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
2989 * was used and there are no special requirements, this is a convenient 2989 * was used and there are no special requirements, this is a convenient
2990 * alternative 2990 * alternative
2991 */ 2991 */
2992int __meminit early_pfn_to_nid(unsigned long pfn) 2992int __meminit __early_pfn_to_nid(unsigned long pfn)
2993{ 2993{
2994 int i; 2994 int i;
2995 2995
@@ -3000,10 +3000,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
3000 if (start_pfn <= pfn && pfn < end_pfn) 3000 if (start_pfn <= pfn && pfn < end_pfn)
3001 return early_node_map[i].nid; 3001 return early_node_map[i].nid;
3002 } 3002 }
3003 /* This is a memory hole */
3004 return -1;
3005}
3006#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3007
3008int __meminit early_pfn_to_nid(unsigned long pfn)
3009{
3010 int nid;
3003 3011
3012 nid = __early_pfn_to_nid(pfn);
3013 if (nid >= 0)
3014 return nid;
3015 /* just returns 0 */
3004 return 0; 3016 return 0;
3005} 3017}
3006#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 3018
3019#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3020bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3021{
3022 int nid;
3023
3024 nid = __early_pfn_to_nid(pfn);
3025 if (nid >= 0 && nid != node)
3026 return false;
3027 return true;
3028}
3029#endif
3007 3030
3008/* Basic iterator support to walk early_node_map[] */ 3031/* Basic iterator support to walk early_node_map[] */
3009#define for_each_active_range_index_in_nid(i, nid) \ 3032#define for_each_active_range_index_in_nid(i, nid) \
diff --git a/mm/page_io.c b/mm/page_io.c
index dc6ce0afbded..3023c475e041 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
111 goto out; 111 goto out;
112 } 112 }
113 if (wbc->sync_mode == WB_SYNC_ALL) 113 if (wbc->sync_mode == WB_SYNC_ALL)
114 rw |= (1 << BIO_RW_SYNC); 114 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
115 count_vm_event(PSWPOUT); 115 count_vm_event(PSWPOUT);
116 set_page_writeback(page); 116 set_page_writeback(page);
117 unlock_page(page); 117 unlock_page(page);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 75f49d312e8c..4dd2636d0b92 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1106,6 +1106,14 @@ struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1106} 1106}
1107EXPORT_SYMBOL_GPL(__get_vm_area); 1107EXPORT_SYMBOL_GPL(__get_vm_area);
1108 1108
1109struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1110 unsigned long start, unsigned long end,
1111 void *caller)
1112{
1113 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1114 caller);
1115}
1116
1109/** 1117/**
1110 * get_vm_area - reserve a contiguous kernel virtual area 1118 * get_vm_area - reserve a contiguous kernel virtual area
1111 * @size: size of the area 1119 * @size: size of the area