aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-08-09 20:19:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:45:02 -0400
commit15748048991e801a2d18ce5da4e0d528852bc106 (patch)
treee31dcdf36bbcfdd1c78546637d59faa963597bac
parent7ee92255470daa0edb93866aec6e27534cd9a177 (diff)
vmscan: avoid subtraction of unsigned types
'slab_reclaimable' and 'nr_pages' are unsigned. Subtraction is unsafe because negative results would be misinterpreted. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmscan.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1c3d960de9d2..1b4e4a597caa 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2600,7 +2600,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2600 .swappiness = vm_swappiness, 2600 .swappiness = vm_swappiness,
2601 .order = order, 2601 .order = order,
2602 }; 2602 };
2603 unsigned long slab_reclaimable; 2603 unsigned long nr_slab_pages0, nr_slab_pages1;
2604 2604
2605 cond_resched(); 2605 cond_resched();
2606 /* 2606 /*
@@ -2625,8 +2625,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2625 } while (priority >= 0 && sc.nr_reclaimed < nr_pages); 2625 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
2626 } 2626 }
2627 2627
2628 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2628 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2629 if (slab_reclaimable > zone->min_slab_pages) { 2629 if (nr_slab_pages0 > zone->min_slab_pages) {
2630 /* 2630 /*
2631 * shrink_slab() does not currently allow us to determine how 2631 * shrink_slab() does not currently allow us to determine how
2632 * many pages were freed in this zone. So we take the current 2632 * many pages were freed in this zone. So we take the current
@@ -2638,16 +2638,17 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2638 * take a long time. 2638 * take a long time.
2639 */ 2639 */
2640 while (shrink_slab(sc.nr_scanned, gfp_mask, order) && 2640 while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2641 zone_page_state(zone, NR_SLAB_RECLAIMABLE) > 2641 (zone_page_state(zone, NR_SLAB_RECLAIMABLE) + nr_pages >
2642 slab_reclaimable - nr_pages) 2642 nr_slab_pages0))
2643 ; 2643 ;
2644 2644
2645 /* 2645 /*
2646 * Update nr_reclaimed by the number of slab pages we 2646 * Update nr_reclaimed by the number of slab pages we
2647 * reclaimed from this zone. 2647 * reclaimed from this zone.
2648 */ 2648 */
2649 sc.nr_reclaimed += slab_reclaimable - 2649 nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2650 zone_page_state(zone, NR_SLAB_RECLAIMABLE); 2650 if (nr_slab_pages1 < nr_slab_pages0)
2651 sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1;
2651 } 2652 }
2652 2653
2653 p->reclaim_state = NULL; 2654 p->reclaim_state = NULL;