diff options
author | Dan Carpenter <error27@gmail.com> | 2010-04-23 13:18:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-04-24 14:31:26 -0400 |
commit | 22eccdd7d2d94be48ae9b01fef5f52ccbb81dcd5 (patch) | |
tree | 346ba02239f0d3a1ed5d1d09fc3d323c29202270 /mm/ksm.c | |
parent | 453dc65931915abc61f92e12bba1fc4747ff5542 (diff) |
ksm: check for ERR_PTR from follow_page()
The follow_page() function can potentially return -EFAULT so I added
checks for this.
Also I silenced an uninitialized variable warning on my version of gcc
(version 4.3.2).
Signed-off-by: Dan Carpenter <error27@gmail.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 12 |
1 files changed, 6 insertions, 6 deletions
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
365 | do { | 365 | do { |
366 | cond_resched(); | 366 | cond_resched(); |
367 | page = follow_page(vma, addr, FOLL_GET); | 367 | page = follow_page(vma, addr, FOLL_GET); |
368 | if (!page) | 368 | if (IS_ERR_OR_NULL(page)) |
369 | break; | 369 | break; |
370 | if (PageKsm(page)) | 370 | if (PageKsm(page)) |
371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | 371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |||
447 | goto out; | 447 | goto out; |
448 | 448 | ||
449 | page = follow_page(vma, addr, FOLL_GET); | 449 | page = follow_page(vma, addr, FOLL_GET); |
450 | if (!page) | 450 | if (IS_ERR_OR_NULL(page)) |
451 | goto out; | 451 | goto out; |
452 | if (PageAnon(page)) { | 452 | if (PageAnon(page)) { |
453 | flush_anon_page(vma, page, addr); | 453 | flush_anon_page(vma, page, addr); |
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |||
1086 | cond_resched(); | 1086 | cond_resched(); |
1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | 1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
1088 | tree_page = get_mergeable_page(tree_rmap_item); | 1088 | tree_page = get_mergeable_page(tree_rmap_item); |
1089 | if (!tree_page) | 1089 | if (IS_ERR_OR_NULL(tree_page)) |
1090 | return NULL; | 1090 | return NULL; |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
@@ -1294,7 +1294,7 @@ next_mm: | |||
1294 | if (ksm_test_exit(mm)) | 1294 | if (ksm_test_exit(mm)) |
1295 | break; | 1295 | break; |
1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); | 1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1297 | if (*page && PageAnon(*page)) { | 1297 | if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { |
1298 | flush_anon_page(vma, *page, ksm_scan.address); | 1298 | flush_anon_page(vma, *page, ksm_scan.address); |
1299 | flush_dcache_page(*page); | 1299 | flush_dcache_page(*page); |
1300 | rmap_item = get_next_rmap_item(slot, | 1300 | rmap_item = get_next_rmap_item(slot, |
@@ -1308,7 +1308,7 @@ next_mm: | |||
1308 | up_read(&mm->mmap_sem); | 1308 | up_read(&mm->mmap_sem); |
1309 | return rmap_item; | 1309 | return rmap_item; |
1310 | } | 1310 | } |
1311 | if (*page) | 1311 | if (!IS_ERR_OR_NULL(*page)) |
1312 | put_page(*page); | 1312 | put_page(*page); |
1313 | ksm_scan.address += PAGE_SIZE; | 1313 | ksm_scan.address += PAGE_SIZE; |
1314 | cond_resched(); | 1314 | cond_resched(); |
@@ -1367,7 +1367,7 @@ next_mm: | |||
1367 | static void ksm_do_scan(unsigned int scan_npages) | 1367 | static void ksm_do_scan(unsigned int scan_npages) |
1368 | { | 1368 | { |
1369 | struct rmap_item *rmap_item; | 1369 | struct rmap_item *rmap_item; |
1370 | struct page *page; | 1370 | struct page *uninitialized_var(page); |
1371 | 1371 | ||
1372 | while (scan_npages--) { | 1372 | while (scan_npages--) { |
1373 | cond_resched(); | 1373 | cond_resched(); |