aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2009-09-21 20:02:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:35 -0400
commitb7c46d151cb82856a429709d1227ba1648028232 (patch)
treeb93012b4bb2ffd603c020b38d8f1f6f3b4714ff7
parentbba78819548a59a52e60f0b259997bbd011164ae (diff)
mm: drop unneeded double negations
Remove double negations where the operand is already boolean. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Mel Gorman <mel@csn.ul.ie> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/vmscan.c8
3 files changed, 6 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fd4529d86de5..9b10d8753784 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -648,7 +648,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
648 int nid = z->zone_pgdat->node_id; 648 int nid = z->zone_pgdat->node_id;
649 int zid = zone_idx(z); 649 int zid = zone_idx(z);
650 struct mem_cgroup_per_zone *mz; 650 struct mem_cgroup_per_zone *mz;
651 int lru = LRU_FILE * !!file + !!active; 651 int lru = LRU_FILE * file + active;
652 int ret; 652 int ret;
653 653
654 BUG_ON(!mem_cont); 654 BUG_ON(!mem_cont);
diff --git a/mm/memory.c b/mm/memory.c
index 05feaa11d87c..3cbeaaba5642 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -599,7 +599,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
599 if (page) { 599 if (page) {
600 get_page(page); 600 get_page(page);
601 page_dup_rmap(page); 601 page_dup_rmap(page);
602 rss[!!PageAnon(page)]++; 602 rss[PageAnon(page)]++;
603 } 603 }
604 604
605out_set_pte: 605out_set_pte:
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e219b47fc50b..cad5d528a6f0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -971,7 +971,7 @@ static unsigned long isolate_pages_global(unsigned long nr,
971 if (file) 971 if (file)
972 lru += LRU_FILE; 972 lru += LRU_FILE;
973 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order, 973 return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
974 mode, !!file); 974 mode, file);
975} 975}
976 976
977/* 977/*
@@ -1209,7 +1209,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1209 lru = page_lru(page); 1209 lru = page_lru(page);
1210 add_page_to_lru_list(zone, page, lru); 1210 add_page_to_lru_list(zone, page, lru);
1211 if (is_active_lru(lru)) { 1211 if (is_active_lru(lru)) {
1212 int file = !!is_file_lru(lru); 1212 int file = is_file_lru(lru);
1213 reclaim_stat->recent_rotated[file]++; 1213 reclaim_stat->recent_rotated[file]++;
1214 } 1214 }
1215 if (!pagevec_add(&pvec, page)) { 1215 if (!pagevec_add(&pvec, page)) {
@@ -1319,7 +1319,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1319 if (scanning_global_lru(sc)) { 1319 if (scanning_global_lru(sc)) {
1320 zone->pages_scanned += pgscanned; 1320 zone->pages_scanned += pgscanned;
1321 } 1321 }
1322 reclaim_stat->recent_scanned[!!file] += nr_taken; 1322 reclaim_stat->recent_scanned[file] += nr_taken;
1323 1323
1324 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1324 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1325 if (file) 1325 if (file)
@@ -1372,7 +1372,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1372 * helps balance scan pressure between file and anonymous pages in 1372 * helps balance scan pressure between file and anonymous pages in
1373 * get_scan_ratio. 1373 * get_scan_ratio.
1374 */ 1374 */
1375 reclaim_stat->recent_rotated[!!file] += nr_rotated; 1375 reclaim_stat->recent_rotated[file] += nr_rotated;
1376 1376
1377 move_active_pages_to_lru(zone, &l_active, 1377 move_active_pages_to_lru(zone, &l_active,
1378 LRU_ACTIVE + file * LRU_FILE); 1378 LRU_ACTIVE + file * LRU_FILE);