diff options
author | Mel Gorman <mgorman@suse.de> | 2014-06-04 19:10:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:09 -0400 |
commit | b745bc85f21ea707e4ea1a91948055fa3e72c77b (patch) | |
tree | e4d4e8b52ca84acf64b2f333485ecb6edcab8738 /mm/page_alloc.c | |
parent | 7aeb09f9104b760fc53c98cb7d20d06640baf9e6 (diff) |
mm: page_alloc: convert hot/cold parameter and immediate callers to bool
cold is a bool, make it one. Make the likely case the "if" part of the
block instead of the else as according to the optimisation manual this is
preferred.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 37ef1b87f1f3..09345ab7fb63 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1199,7 +1199,7 @@ retry_reserve: | |||
1199 | */ | 1199 | */ |
1200 | static int rmqueue_bulk(struct zone *zone, unsigned int order, | 1200 | static int rmqueue_bulk(struct zone *zone, unsigned int order, |
1201 | unsigned long count, struct list_head *list, | 1201 | unsigned long count, struct list_head *list, |
1202 | int migratetype, int cold) | 1202 | int migratetype, bool cold) |
1203 | { | 1203 | { |
1204 | int i; | 1204 | int i; |
1205 | 1205 | ||
@@ -1218,7 +1218,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, | |||
1218 | * merge IO requests if the physical pages are ordered | 1218 | * merge IO requests if the physical pages are ordered |
1219 | * properly. | 1219 | * properly. |
1220 | */ | 1220 | */ |
1221 | if (likely(cold == 0)) | 1221 | if (likely(!cold)) |
1222 | list_add(&page->lru, list); | 1222 | list_add(&page->lru, list); |
1223 | else | 1223 | else |
1224 | list_add_tail(&page->lru, list); | 1224 | list_add_tail(&page->lru, list); |
@@ -1379,9 +1379,9 @@ void mark_free_pages(struct zone *zone) | |||
1379 | 1379 | ||
1380 | /* | 1380 | /* |
1381 | * Free a 0-order page | 1381 | * Free a 0-order page |
1382 | * cold == 1 ? free a cold page : free a hot page | 1382 | * cold == true ? free a cold page : free a hot page |
1383 | */ | 1383 | */ |
1384 | void free_hot_cold_page(struct page *page, int cold) | 1384 | void free_hot_cold_page(struct page *page, bool cold) |
1385 | { | 1385 | { |
1386 | struct zone *zone = page_zone(page); | 1386 | struct zone *zone = page_zone(page); |
1387 | struct per_cpu_pages *pcp; | 1387 | struct per_cpu_pages *pcp; |
@@ -1413,10 +1413,10 @@ void free_hot_cold_page(struct page *page, int cold) | |||
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | pcp = &this_cpu_ptr(zone->pageset)->pcp; | 1415 | pcp = &this_cpu_ptr(zone->pageset)->pcp; |
1416 | if (cold) | 1416 | if (!cold) |
1417 | list_add_tail(&page->lru, &pcp->lists[migratetype]); | ||
1418 | else | ||
1419 | list_add(&page->lru, &pcp->lists[migratetype]); | 1417 | list_add(&page->lru, &pcp->lists[migratetype]); |
1418 | else | ||
1419 | list_add_tail(&page->lru, &pcp->lists[migratetype]); | ||
1420 | pcp->count++; | 1420 | pcp->count++; |
1421 | if (pcp->count >= pcp->high) { | 1421 | if (pcp->count >= pcp->high) { |
1422 | unsigned long batch = ACCESS_ONCE(pcp->batch); | 1422 | unsigned long batch = ACCESS_ONCE(pcp->batch); |
@@ -1431,7 +1431,7 @@ out: | |||
1431 | /* | 1431 | /* |
1432 | * Free a list of 0-order pages | 1432 | * Free a list of 0-order pages |
1433 | */ | 1433 | */ |
1434 | void free_hot_cold_page_list(struct list_head *list, int cold) | 1434 | void free_hot_cold_page_list(struct list_head *list, bool cold) |
1435 | { | 1435 | { |
1436 | struct page *page, *next; | 1436 | struct page *page, *next; |
1437 | 1437 | ||
@@ -1548,7 +1548,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, | |||
1548 | { | 1548 | { |
1549 | unsigned long flags; | 1549 | unsigned long flags; |
1550 | struct page *page; | 1550 | struct page *page; |
1551 | int cold = !!(gfp_flags & __GFP_COLD); | 1551 | bool cold = ((gfp_flags & __GFP_COLD) != 0); |
1552 | 1552 | ||
1553 | again: | 1553 | again: |
1554 | if (likely(order == 0)) { | 1554 | if (likely(order == 0)) { |
@@ -2823,7 +2823,7 @@ void __free_pages(struct page *page, unsigned int order) | |||
2823 | { | 2823 | { |
2824 | if (put_page_testzero(page)) { | 2824 | if (put_page_testzero(page)) { |
2825 | if (order == 0) | 2825 | if (order == 0) |
2826 | free_hot_cold_page(page, 0); | 2826 | free_hot_cold_page(page, false); |
2827 | else | 2827 | else |
2828 | __free_pages_ok(page, order); | 2828 | __free_pages_ok(page, order); |
2829 | } | 2829 | } |