diff options
Diffstat (limited to 'mm/ksm.c')
-rw-r--r-- | mm/ksm.c | 12 |
1 files changed, 6 insertions, 6 deletions
@@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) | |||
365 | do { | 365 | do { |
366 | cond_resched(); | 366 | cond_resched(); |
367 | page = follow_page(vma, addr, FOLL_GET); | 367 | page = follow_page(vma, addr, FOLL_GET); |
368 | if (!page) | 368 | if (IS_ERR_OR_NULL(page)) |
369 | break; | 369 | break; |
370 | if (PageKsm(page)) | 370 | if (PageKsm(page)) |
371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, | 371 | ret = handle_mm_fault(vma->vm_mm, vma, addr, |
@@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item) | |||
447 | goto out; | 447 | goto out; |
448 | 448 | ||
449 | page = follow_page(vma, addr, FOLL_GET); | 449 | page = follow_page(vma, addr, FOLL_GET); |
450 | if (!page) | 450 | if (IS_ERR_OR_NULL(page)) |
451 | goto out; | 451 | goto out; |
452 | if (PageAnon(page)) { | 452 | if (PageAnon(page)) { |
453 | flush_anon_page(vma, page, addr); | 453 | flush_anon_page(vma, page, addr); |
@@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, | |||
1086 | cond_resched(); | 1086 | cond_resched(); |
1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); | 1087 | tree_rmap_item = rb_entry(*new, struct rmap_item, node); |
1088 | tree_page = get_mergeable_page(tree_rmap_item); | 1088 | tree_page = get_mergeable_page(tree_rmap_item); |
1089 | if (!tree_page) | 1089 | if (IS_ERR_OR_NULL(tree_page)) |
1090 | return NULL; | 1090 | return NULL; |
1091 | 1091 | ||
1092 | /* | 1092 | /* |
@@ -1294,7 +1294,7 @@ next_mm: | |||
1294 | if (ksm_test_exit(mm)) | 1294 | if (ksm_test_exit(mm)) |
1295 | break; | 1295 | break; |
1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); | 1296 | *page = follow_page(vma, ksm_scan.address, FOLL_GET); |
1297 | if (*page && PageAnon(*page)) { | 1297 | if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) { |
1298 | flush_anon_page(vma, *page, ksm_scan.address); | 1298 | flush_anon_page(vma, *page, ksm_scan.address); |
1299 | flush_dcache_page(*page); | 1299 | flush_dcache_page(*page); |
1300 | rmap_item = get_next_rmap_item(slot, | 1300 | rmap_item = get_next_rmap_item(slot, |
@@ -1308,7 +1308,7 @@ next_mm: | |||
1308 | up_read(&mm->mmap_sem); | 1308 | up_read(&mm->mmap_sem); |
1309 | return rmap_item; | 1309 | return rmap_item; |
1310 | } | 1310 | } |
1311 | if (*page) | 1311 | if (!IS_ERR_OR_NULL(*page)) |
1312 | put_page(*page); | 1312 | put_page(*page); |
1313 | ksm_scan.address += PAGE_SIZE; | 1313 | ksm_scan.address += PAGE_SIZE; |
1314 | cond_resched(); | 1314 | cond_resched(); |
@@ -1367,7 +1367,7 @@ next_mm: | |||
1367 | static void ksm_do_scan(unsigned int scan_npages) | 1367 | static void ksm_do_scan(unsigned int scan_npages) |
1368 | { | 1368 | { |
1369 | struct rmap_item *rmap_item; | 1369 | struct rmap_item *rmap_item; |
1370 | struct page *page; | 1370 | struct page *uninitialized_var(page); |
1371 | 1371 | ||
1372 | while (scan_npages--) { | 1372 | while (scan_npages--) { |
1373 | cond_resched(); | 1373 | cond_resched(); |