aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/nommu.c8
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/rmap.c18
4 files changed, 23 insertions, 8 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 67af4cea1e..5643cfed6b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -954,7 +954,8 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
954 goto out; 954 goto out;
955 } 955 }
956 956
957 err = do_migrate_pages(mm, &old, &new, MPOL_MF_MOVE); 957 err = do_migrate_pages(mm, &old, &new,
958 capable(CAP_SYS_ADMIN) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
958out: 959out:
959 mmput(mm); 960 mmput(mm);
960 return err; 961 return err;
diff --git a/mm/nommu.c b/mm/nommu.c
index 99d21020ec..4951f4786f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -53,7 +53,6 @@ DECLARE_RWSEM(nommu_vma_sem);
53struct vm_operations_struct generic_file_vm_ops = { 53struct vm_operations_struct generic_file_vm_ops = {
54}; 54};
55 55
56EXPORT_SYMBOL(vmalloc);
57EXPORT_SYMBOL(vfree); 56EXPORT_SYMBOL(vfree);
58EXPORT_SYMBOL(vmalloc_to_page); 57EXPORT_SYMBOL(vmalloc_to_page);
59EXPORT_SYMBOL(vmalloc_32); 58EXPORT_SYMBOL(vmalloc_32);
@@ -205,6 +204,13 @@ void *vmalloc(unsigned long size)
205{ 204{
206 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 205 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
207} 206}
207EXPORT_SYMBOL(vmalloc);
208
209void *vmalloc_node(unsigned long size, int node)
210{
211 return vmalloc(size);
212}
213EXPORT_SYMBOL(vmalloc_node);
208 214
209/* 215/*
210 * vmalloc_32 - allocate virtually continguos memory (32bit addressable) 216 * vmalloc_32 - allocate virtually continguos memory (32bit addressable)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 8123fad5a4..c86c737d24 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -302,7 +302,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
302{ 302{
303 struct mm_struct *mm = NULL; 303 struct mm_struct *mm = NULL;
304 task_t *p; 304 task_t *p;
305 unsigned long points; 305 unsigned long points = 0;
306 306
307 if (printk_ratelimit()) { 307 if (printk_ratelimit()) {
308 printk("oom-killer: gfp_mask=0x%x, order=%d\n", 308 printk("oom-killer: gfp_mask=0x%x, order=%d\n",
diff --git a/mm/rmap.c b/mm/rmap.c
index df2c41c2a9..d8ce5ff614 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -212,25 +212,33 @@ out:
212 * through real pte's pointing to valid pages and then releasing 212 * through real pte's pointing to valid pages and then releasing
213 * the page from the swap cache. 213 * the page from the swap cache.
214 * 214 *
215 * Must hold page lock on page. 215 * Must hold page lock on page and mmap_sem of one vma that contains
216 * the page.
216 */ 217 */
217void remove_from_swap(struct page *page) 218void remove_from_swap(struct page *page)
218{ 219{
219 struct anon_vma *anon_vma; 220 struct anon_vma *anon_vma;
220 struct vm_area_struct *vma; 221 struct vm_area_struct *vma;
222 unsigned long mapping;
221 223
222 if (!PageAnon(page) || !PageSwapCache(page)) 224 if (!PageSwapCache(page))
223 return; 225 return;
224 226
225 anon_vma = page_lock_anon_vma(page); 227 mapping = (unsigned long)page->mapping;
226 if (!anon_vma) 228
229 if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
227 return; 230 return;
228 231
232 /*
233 * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
234 */
235 anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
236 spin_lock(&anon_vma->lock);
237
229 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) 238 list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
230 remove_vma_swap(vma, page); 239 remove_vma_swap(vma, page);
231 240
232 spin_unlock(&anon_vma->lock); 241 spin_unlock(&anon_vma->lock);
233
234 delete_from_swap_cache(page); 242 delete_from_swap_cache(page);
235} 243}
236EXPORT_SYMBOL(remove_from_swap); 244EXPORT_SYMBOL(remove_from_swap);